code stringlengths 5 1M | repo_name stringlengths 5 109 | path stringlengths 6 208 | language stringclasses 1 value | license stringclasses 15 values | size int64 5 1M |
|---|---|---|---|---|---|
package edu.mit.cryptdb.tpch
import java.io.{ File, PrintWriter }
// TODO: take data from the same run of ./qgen (instead of mixing runs together)
// this way, we can say we used random seed X if asked
object Queries {
val q1 = """
select
l_returnflag,
l_linestatus,
sum(l_quantity) as sum_qty,
sum(l_extendedprice) as sum_base_price,
sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
avg(l_quantity) as avg_qty,
avg(l_extendedprice) as avg_price,
avg(l_discount) as avg_disc,
count(*) as count_order
from
lineitem
where
l_shipdate <= date '1998-01-01'
group by
l_returnflag,
l_linestatus
order by
l_returnflag,
l_linestatus;"""
val q2 = """
select
s_acctbal,
s_name,
n_name,
p_partkey,
p_mfgr,
s_address,
s_phone,
s_comment
from
part,
supplier,
partsupp,
nation,
region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and p_size = 36
and p_type like '%STEEL'
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'ASIA'
and ps_supplycost = (
select
min(ps_supplycost)
from
partsupp,
supplier,
nation,
region
where
p_partkey = ps_partkey
and s_suppkey = ps_suppkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'ASIA'
)
order by
s_acctbal desc,
n_name,
s_name,
p_partkey
limit 100;
"""
val q3 = """
select
l_orderkey,
sum(l_extendedprice * (1 - l_discount)) as revenue,
o_orderdate,
o_shippriority
from
customer,
orders,
lineitem
where
c_mktsegment = 'FURNITURE'
and c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate < date '1995-03-12'
and l_shipdate > date '1995-03-12'
group by
l_orderkey,
o_orderdate,
o_shippriority
order by
revenue desc,
o_orderdate
limit 10;
"""
val q4 = """
select
o_orderpriority,
count(*) as order_count
from
orders
where
o_orderdate >= date '1994-11-01'
and o_orderdate < date '1994-11-01' + interval '3' month
and exists (
select
*
from
lineitem
where
l_orderkey = o_orderkey
and l_commitdate < l_receiptdate
)
group by
o_orderpriority
order by
o_orderpriority;
"""
val q5 = """
select
n_name,
sum(l_extendedprice * (1 - l_discount)) as revenue
from
customer,
orders,
lineitem,
supplier,
nation,
region
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and l_suppkey = s_suppkey
and c_nationkey = s_nationkey
and s_nationkey = n_nationkey
and n_regionkey = r_regionkey
and r_name = 'MIDDLE EAST'
and o_orderdate >= date '1993-01-01'
and o_orderdate < date '1993-01-01' + interval '1' year
group by
n_name
order by
revenue desc;
"""
val q6 = """
select
sum(l_extendedprice * l_discount) as revenue
from
lineitem
where
l_shipdate >= date '1993-01-01'
and l_shipdate < date '1993-01-01' + interval '1' year
and l_discount between 0.05 - 0.01 and 0.05 + 0.01
and l_quantity < 25;
"""
val q7 = """
select
supp_nation,
cust_nation,
l_year,
sum(volume) as revenue
from
(
select
n1.n_name as supp_nation,
n2.n_name as cust_nation,
extract(year from l_shipdate) as l_year,
l_extendedprice * (1 - l_discount) as volume
from
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2
where
s_suppkey = l_suppkey
and o_orderkey = l_orderkey
and c_custkey = o_custkey
and s_nationkey = n1.n_nationkey
and c_nationkey = n2.n_nationkey
and (
(n1.n_name = 'SAUDI ARABIA' and n2.n_name = 'ARGENTINA')
or (n1.n_name = 'ARGENTINA' and n2.n_name = 'SAUDI ARABIA')
)
and l_shipdate between date '1995-01-01' and date '1996-12-31'
) as shipping
group by
supp_nation,
cust_nation,
l_year
order by
supp_nation,
cust_nation,
l_year;
"""
val q8 = """
select
o_year,
sum(case
when nation = 'ARGENTINA' then volume
else 0
end) / sum(volume) as mkt_share
from
(
select
extract(year from o_orderdate) as o_year,
l_extendedprice * (1 - l_discount) as volume,
n2.n_name as nation
from
part,
supplier,
lineitem,
orders,
customer,
nation n1,
nation n2,
region
where
p_partkey = l_partkey
and s_suppkey = l_suppkey
and l_orderkey = o_orderkey
and o_custkey = c_custkey
and c_nationkey = n1.n_nationkey
and n1.n_regionkey = r_regionkey
and r_name = 'AMERICA'
and s_nationkey = n2.n_nationkey
and o_orderdate between date '1995-01-01' and date '1996-12-31'
and p_type = 'PROMO ANODIZED BRASS'
) as all_nations
group by
o_year
order by
o_year;
"""
val q9 = """
select
nation,
o_year,
sum(amount) as sum_profit
from
(
select
n_name as nation,
extract(year from o_orderdate) as o_year,
l_extendedprice * (1 - l_discount) - ps_supplycost * l_quantity as amount
from
part,
supplier,
lineitem,
partsupp,
orders,
nation
where
s_suppkey = l_suppkey
and ps_suppkey = l_suppkey
and ps_partkey = l_partkey
and p_partkey = l_partkey
and o_orderkey = l_orderkey
and s_nationkey = n_nationkey
and p_name like '%sky%'
) as profit
group by
nation,
o_year
order by
nation,
o_year desc;
"""
val q10 = """
select
c_custkey,
c_name,
sum(l_extendedprice * (1 - l_discount)) as revenue,
c_acctbal,
n_name,
c_address,
c_phone,
c_comment
from
customer,
orders,
lineitem,
nation
where
c_custkey = o_custkey
and l_orderkey = o_orderkey
and o_orderdate >= date '1994-08-01'
and o_orderdate < date '1994-08-01' + interval '3' month
and l_returnflag = 'R'
and c_nationkey = n_nationkey
group by
c_custkey,
c_name,
c_acctbal,
c_phone,
n_name,
c_address,
c_comment
order by
revenue desc
limit 20;
"""
val q11 = """
select
ps_partkey,
sum(ps_supplycost * ps_availqty) as value
from
partsupp,
supplier,
nation
where
ps_suppkey = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'ARGENTINA'
group by
ps_partkey having
sum(ps_supplycost * ps_availqty) > (
select
sum(ps_supplycost * ps_availqty) * 0.0001
from
partsupp,
supplier,
nation
where
ps_suppkey = s_suppkey
and s_nationkey = n_nationkey
and n_name = 'ARGENTINA'
)
order by
value desc;
"""
val q12 = """
select
l_shipmode,
sum(case
when o_orderpriority = '1-URGENT'
or o_orderpriority = '2-HIGH'
then 1
else 0
end) as high_line_count,
sum(case
when o_orderpriority <> '1-URGENT'
and o_orderpriority <> '2-HIGH'
then 1
else 0
end) as low_line_count
from
orders,
lineitem
where
o_orderkey = l_orderkey
and l_shipmode in ('AIR', 'FOB')
and l_commitdate < l_receiptdate
and l_shipdate < l_commitdate
and l_receiptdate >= date '1993-01-01'
and l_receiptdate < date '1993-01-01' + interval '1' year
group by
l_shipmode
order by
l_shipmode;
"""
val q13 = """
select
c_count,
count(*) as custdist
from
(
select
c_custkey,
count(o_orderkey) as c_count
from
customer left outer join orders on
c_custkey = o_custkey
and o_comment not like '%:1%:2%'
group by
c_custkey
) as c_orders
group by
c_count
order by
custdist desc,
c_count desc;
"""
val q14 = """
select
100.00 * sum(case
when p_type like 'PROMO%'
then l_extendedprice * (1 - l_discount)
else 0
end) / sum(l_extendedprice * (1 - l_discount)) as promo_revenue
from
lineitem,
part
where
l_partkey = p_partkey
and l_shipdate >= date '1996-07-01'
and l_shipdate < date '1996-07-01' + interval '1' month;
"""
val q16 = """
select
p_brand,
p_type,
p_size,
count(distinct ps_suppkey) as supplier_cnt
from
partsupp,
part
where
p_partkey = ps_partkey
and p_brand <> 'Brand#43'
and p_type not like 'PROMO ANODIZED%'
and p_size in (15, 12, 33, 48, 41, 9, 2, 18)
and ps_suppkey not in (
select
s_suppkey
from
supplier
where
s_comment like '%Customer%Complaints%'
)
group by
p_brand,
p_type,
p_size
order by
supplier_cnt desc,
p_brand,
p_type,
p_size;
"""
// val q17 = """
//select
// sum(l_extendedprice) / 7.0 as avg_yearly
//from
// lineitem,
// part
//where
// p_partkey = l_partkey
// and p_brand = 'Brand#45'
// and p_container = 'LG BOX'
// and l_quantity < (
// select
// 0.2 * avg(l_quantity)
// from
// lineitem
// where
// l_partkey = p_partkey
// );
//"""
// proposed rewrite- it's missing:
// group_concat(l_quantity)
// group_concat(l_extendedprice)
//
// but forms the basis for a reasonable comparison. This is b/c
// in scale 10 there are only 58850 rows (before group by)
// which match the WHERE clause, so if we had to send 2 more
// 8-byte ints back, that would be 58850 * 2 * 8 = 0.90MB (uncompressed), which
// even on our modest 10mbps network link would be less than 1 second of xfer time.
// Furthermore, to decrypt 58850 * 2 ints in DET, it would take about
// 58850 * 2 * (0.0173 / 1000.0) = 2.04 sec (0.25 sec for 8-way parallelism). So
// in the worst case this would add ~3 seconds to query execution time
val q17 = """
select
l_partkey, 0.2 * avg(l_quantity)
from
lineitem,
part
where
p_partkey = l_partkey
and p_brand = 'Brand#45'
and p_container = 'LG BOX'
group by l_partkey;
"""
val q18 = """
select
c_name,
c_custkey,
o_orderkey,
o_orderdate,
o_totalprice,
sum(l_quantity)
from
customer,
orders,
lineitem
where
o_orderkey in (
select
l_orderkey
from
lineitem
group by
l_orderkey having
sum(l_quantity) > 315
)
and c_custkey = o_custkey
and o_orderkey = l_orderkey
group by
c_name,
c_custkey,
o_orderkey,
o_orderdate,
o_totalprice
order by
o_totalprice desc,
o_orderdate
limit 100;
"""
val q19 = """
select
sum(l_extendedprice* (1 - l_discount)) as revenue
from
lineitem,
part
where
(
p_partkey = l_partkey
and p_brand = 'Brand#23'
and p_container in ('SM CASE', 'SM BOX', 'SM PACK', 'SM PKG')
and l_quantity >= 10 and l_quantity <= 10 + 10
and p_size between 1 and 5
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
)
or
(
p_partkey = l_partkey
and p_brand = 'Brand#32'
and p_container in ('MED BAG', 'MED BOX', 'MED PKG', 'MED PACK')
and l_quantity >= 20 and l_quantity <= 20 + 10
and p_size between 1 and 10
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
)
or
(
p_partkey = l_partkey
and p_brand = 'Brand#34'
and p_container in ('LG CASE', 'LG BOX', 'LG PACK', 'LG PKG')
and l_quantity >= 20 and l_quantity <= 20 + 10
and p_size between 1 and 15
and l_shipmode in ('AIR', 'AIR REG')
and l_shipinstruct = 'DELIVER IN PERSON'
);
"""
val q20 = """
select
s_name,
s_address
from
supplier,
nation
where
s_suppkey in (
select
ps_suppkey
from
partsupp
where
ps_partkey in (
select
p_partkey
from
part
where
p_name like 'khaki%'
)
and ps_availqty > (
select
0.5 * sum(l_quantity)
from
lineitem
where
l_partkey = ps_partkey
and l_suppkey = ps_suppkey
and l_shipdate >= date '1997-01-01'
and l_shipdate < date '1997-01-01' + interval '1' year
)
)
and s_nationkey = n_nationkey
and n_name = 'ALGERIA'
order by
s_name;
"""
val q20Rewritten = """
select s_name, s_address
from supplier, nation
where s_suppkey in (
select ps_suppkey from (
select ps_suppkey, min(ps_availqty) as x, sum(l_quantity) as y
from
partsupp, lineitem
where
l_partkey = ps_partkey
and l_suppkey = ps_suppkey
and l_shipdate >= date '1997-01-01'
and l_shipdate < date '1997-01-01' + interval '1' year
and ps_partkey in ( select p_partkey from part where p_name like '%khaki%' )
group by ps_partkey, ps_suppkey
) as anon1
where x > 0.5 * y
)
and s_nationkey = n_nationkey
and n_name = 'ALGERIA'
order by s_name;
"""
val q21 = """
select
s_name,
count(*) as numwait
from
supplier,
lineitem l1,
orders,
nation
where
s_suppkey = l1.l_suppkey
and o_orderkey = l1.l_orderkey
and o_orderstatus = 'F'
and l1.l_receiptdate > l1.l_commitdate
and exists (
select
*
from
lineitem l2
where
l2.l_orderkey = l1.l_orderkey
and l2.l_suppkey <> l1.l_suppkey
)
and not exists (
select
*
from
lineitem l3
where
l3.l_orderkey = l1.l_orderkey
and l3.l_suppkey <> l1.l_suppkey
and l3.l_receiptdate > l3.l_commitdate
)
and s_nationkey = n_nationkey
and n_name = 'IRAN'
group by
s_name
order by
numwait desc,
s_name
limit 100;
"""
val q22 = """
select
cntrycode,
count(*) as numcust,
sum(abal) as totacctbal
from
(
select
substring(c_phone from 1 for 2) as cntrycode,
c_acctbal as abal
from
customer
where
substring(c_phone from 1 for 2) in
('41', '26', '36', '27', '38', '37', '22')
and c_acctbal > (
select
avg(c_acctbal)
from
customer
where
c_acctbal > 0.00
and substring(c_phone from 1 for 2) in
('41', '26', '36', '27', '38', '37', '22')
)
and not exists (
select
*
from
orders
where
o_custkey = c_custkey
)
) as custsale
group by
cntrycode
order by
cntrycode;
"""
val q22Inner = """
select
substring(c_phone from 1 for 2) as cntrycode,
c_acctbal as abal
from
customer
where
substring(c_phone from 1 for 2) in
('41', '26', '36', '27', '38', '37', '22')
and c_acctbal > (
select
avg(c_acctbal)
from
customer
where
c_acctbal > 0.00
and substring(c_phone from 1 for 2) in
('41', '26', '36', '27', '38', '37', '22')
)
and not exists (
select
*
from
orders
where
o_custkey = c_custkey
)
"""
val QueriesById = Map(
1 -> Queries.q1,
2 -> Queries.q2,
3 -> Queries.q3,
4 -> Queries.q4,
5 -> Queries.q5,
6 -> Queries.q6,
7 -> Queries.q7,
8 -> Queries.q8,
9 -> Queries.q9,
10 -> Queries.q10,
11 -> Queries.q11,
12 -> Queries.q12,
13 -> Queries.q13,
14 -> Queries.q14,
16 -> Queries.q16,
17 -> Queries.q17,
18 -> Queries.q18,
19 -> Queries.q19,
20 -> Queries.q20,
21 -> Queries.q21,
22 -> Queries.q22)
// makes a bunch of qN.sql files in dir. helpful for debugging
def makeSqlFiles(queries: Map[Int, String], dir: File): Unit = {
dir.mkdirs()
queries.foreach { case (id, sql) =>
val f = new File(dir, "q%d.sql".format(id))
val p = new PrintWriter(f)
p.println(sql)
p.flush()
p.close()
}
}
val AllQueries = Seq(Queries.q1, Queries.q2, Queries.q3, Queries.q4, Queries.q5, Queries.q6, Queries.q7, Queries.q8, Queries.q9, Queries.q10, Queries.q11, Queries.q12, Queries.q13, Queries.q14, /*Queries.q15,*/ Queries.q16, Queries.q17, Queries.q18, Queries.q19, Queries.q20, Queries.q21, Queries.q22)
val VldbTrainQueries =
Seq(Queries.q1, Queries.q2, Queries.q3, Queries.q4, Queries.q5,
Queries.q6, Queries.q7, Queries.q8, Queries.q9, Queries.q10,
Queries.q11, Queries.q12, Queries.q14, Queries.q17, Queries.q18,
Queries.q19, Queries.q20Rewritten, Queries.q21, Queries.q22)
val VldbTrainQueriesIndices =
// corresponds to VldbTrainQueries- don't modify one w/o modifying the other
Seq(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 17, 18, 19, 20, 21, 22)
// simulated using our cost optimizer, using edu.mit.cryptdb.tpch.Runner
// the first entry is n = 1, next is n = 2, and so on...
//
// if you modify VldbTrainQueries, must modify this
val VldbSimulationIdxs =
Seq(
Seq(15),
Seq(11, 15),
Seq(0, 3, 15),
Seq(0, 3, 8, 15),
Seq(0, 3, 5, 8, 15),
Seq(0, 3, 5, 8, 15, 18),
Seq(0, 3, 5, 8, 14, 15, 18),
Seq(0, 3, 5, 8, 12, 14, 15, 18),
Seq(0, 3, 5, 8, 10, 12, 14, 15, 18),
Seq(0, 3, 5, 6, 8, 10, 12, 14, 15, 18),
Seq(0, 1, 3, 5, 6, 8, 10, 12, 14, 15, 18),
Seq(0, 1, 3, 5, 6, 7, 8, 10, 12, 14, 15, 18),
Seq(0, 1, 2, 3, 5, 6, 7, 8, 10, 12, 14, 15, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 15, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 14, 15, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18),
Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))
}
| tristartom/monomi-optimizer | src/main/scala/tpch/queries.scala | Scala | mit | 17,369 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package doc
package html
import model._
import java.io.{ File => JFile }
import io.{ Streamable, Directory }
import scala.collection._
import page.diagram._
import scala.reflect.internal.Reporter
/** A class that can generate Scaladoc sites to some fixed root folder.
* @author David Bernard
* @author Gilles Dubochet */
class HtmlFactory(val universe: doc.Universe, val reporter: Reporter) {
import page.IndexScript
/** The character encoding to be used for generated Scaladoc sites.
* This value is currently always UTF-8. */
def encoding: String = "UTF-8"
def siteRoot: JFile = new JFile(universe.settings.outdir.value)
def libResources = List(
"class.svg",
"annotation.svg",
"object.svg",
"trait.svg",
"package.svg",
"class_comp.svg",
"annotation_comp.svg",
"object_comp.svg",
"trait_comp.svg",
"object_comp_trait.svg",
"object_comp_annotation.svg",
"abstract_type.svg",
"lato-v11-latin-100.eot",
"lato-v11-latin-100.ttf",
"lato-v11-latin-100.woff",
"lato-v11-latin-regular.eot",
"lato-v11-latin-regular.ttf",
"lato-v11-latin-regular.woff",
"open-sans-v13-latin-regular.eot",
"open-sans-v13-latin-regular.ttf",
"open-sans-v13-latin-regular.woff",
"open-sans-v13-latin-400i.eot",
"open-sans-v13-latin-400i.ttf",
"open-sans-v13-latin-400i.woff",
"open-sans-v13-latin-700.eot",
"open-sans-v13-latin-700.ttf",
"open-sans-v13-latin-700.woff",
"open-sans-v13-latin-700i.eot",
"open-sans-v13-latin-700i.ttf",
"open-sans-v13-latin-700i.woff",
"source-code-pro-v6-latin-700.eot",
"source-code-pro-v6-latin-700.ttf",
"source-code-pro-v6-latin-700.woff",
"source-code-pro-v6-latin-regular.eot",
"source-code-pro-v6-latin-regular.ttf",
"source-code-pro-v6-latin-regular.woff",
"MaterialIcons-Regular.eot",
"MaterialIcons-Regular.ttf",
"MaterialIcons-Regular.woff",
"index.js",
"scheduler.js",
"template.js",
"index.css",
"ref-index.css",
"template.css",
"diagrams.css",
"print.css",
"class_diagram.png",
"object_diagram.png",
"trait_diagram.png",
"type_diagram.png",
"ownderbg2.gif",
"ownerbg.gif",
"ownerbg2.gif"
)
final def webjarResources = List(
("jquery.min.js", "9/aliU8dGd2tb6OSsuzixeV4y/faTqgFtohetphbbj0=")
)
/** Generates the Scaladoc site for a model into the site root.
* A scaladoc site is a set of HTML and related files
* that document a model extracted from a compiler run.
*/
def generate(): Unit = {
def copyResource(subPath: String): Unit = {
val bytes = new Streamable.Bytes {
val p = "/scala/tools/nsc/doc/html/resource/" + subPath
val inputStream = getClass.getResourceAsStream(p)
assert(inputStream != null, p)
}.toByteArray()
val dest = Directory(siteRoot) / subPath
dest.parent.createDirectory()
val out = dest.toFile.bufferedOutput()
try out.write(bytes, 0, bytes.length)
finally out.close()
}
def copyWebjarResource(resourceName: String, expectedSRI: String): Unit = {
import java.security.MessageDigest
import java.util.Base64
val md = MessageDigest.getInstance("SHA-256")
val base64encoder = Base64.getEncoder
// https://developer.mozilla.org/en-US/docs/Web/Security/Subresource_Integrity
def calsSubResourceIntegrity(input: String): String = {
val messageDigest = md.digest(input.getBytes)
val base64encoded = base64encoder.encode(messageDigest)
new String(base64encoded, "UTF-8")
}
val bytes = new Streamable.Bytes {
val p = "/" + resourceName
val inputStream = getClass.getResourceAsStream(p)
assert(inputStream != null, p)
}.toByteArray()
val fileContent = new String(bytes)
val actualSRI = calsSubResourceIntegrity(fileContent)
if (expectedSRI != actualSRI)
throw new Exception(s"Subresource Integrity unmatched on ${resourceName}. Could be wrong webjar or hijacked: $actualSRI")
val dest = Directory(siteRoot) / "lib" / resourceName
dest.parent.createDirectory()
dest.toFile.writeAll(fileContent)
}
libResources foreach (s => copyResource("lib/" + s))
webjarResources foreach { case (resourceName, integrity) =>
copyWebjarResource(resourceName, integrity)
}
IndexScript(universe) writeFor this
try {
writeTemplates(_ writeFor this)
} finally {
DiagramStats.printStats(universe.settings)
}
}
def writeTemplates(writeForThis: HtmlPage => Unit): Unit = {
val written = mutable.HashSet.empty[DocTemplateEntity]
def writeTemplate(tpl: DocTemplateEntity): Unit = {
if (!(written contains tpl)) {
val diagramGenerator: DiagramGenerator = new DotDiagramGenerator(universe.settings)
writeForThis(page.EntityPage(universe, diagramGenerator, tpl, reporter))
written += tpl
tpl.templates collect { case d: DocTemplateEntity => d } map writeTemplate
}
}
writeTemplate(universe.rootPackage)
}
}
| lrytz/scala | src/scaladoc/scala/tools/nsc/doc/html/HtmlFactory.scala | Scala | apache-2.0 | 5,444 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalanlp;
package io;
import java.io.File;
import scalanlp.pipes.Pipes;
import scalanlp.collection.LazyIterable;
import scalanlp.serialization._;
/**
* A TXTFile acts as a source of Strings.
*
* @author dramage
*/
class TXTFile(path : String) extends File(path) with LazyIterable[String] {
import Pipes.global._;
override def iterator = this.getLines;
override def toString =
"TXTFile(" + TextSerialization.toString(path) + ")";
}
/**
* Companion object for TXTFiles that includes an implicit conversion
* to a parcel.
*
* @author dramage
*/
object TXTFile {
/** Named file in the current folder. */
def apply(name : String)(implicit pipes : Pipes = Pipes.global) =
new TXTFile(pipes.file(name).getPath);
/** From file. */
def apply(file : File) =
new TXTFile(file.getPath);
/** From file in directory. */
def apply(file : File, name : String) =
new TXTFile(new File(file, name).getPath);
/** Calls file.asParcel. */
implicit def TXTFileAsParcel(file : TXTFile) =
scalanlp.stage.Parcel(
history = scalanlp.stage.History.Origin(file.toString),
meta = scalanlp.collection.immutable.DHMap() + (file : File),
data = file.zipWithIndex.map(tup => scalanlp.stage.Item(tup._2, tup._1)));
}
| MLnick/scalanlp-core | data/src/main/scala/scalanlp/io/TXTFile.scala | Scala | apache-2.0 | 1,849 |
/** Dict.scala -> This file manages the dictionary for OLK
Copyright (C) 2015 Stephen Tridgell
This file is part of a pipelined OLK application.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this code. If not, see <http://www.gnu.org/licenses/>.
*/
package OLK.Dict
import Chisel._
import scala.collection.mutable.ArrayBuffer
/** Dict
This file manages the Dictionary
parameter d : dictionary size
parameter f : no features
parameter p : pipeline stages
input alpha = SFix
input 1/(1+r) = Constant
input example = SFix[F]
input addToDict = 1 Bit
output currentDict = SFix[f][d]
output currentAlpha = SFix[d]
output currentPipeline = SFix[f][p]
Registers:
pipelineEx = SFix[f][p]
dict = SFix[f][d]
weights = SFix[d]
Logic:
pipelineEx = [example pipelineEx[0:end-1]]
dict = (addToDict) ? [pipelineEx[end] dict[0:end-1]] : dict
weights = (addToDict) ? [alpha (1/(1+r))*weights[0:end-1]] : weights
currentPipeline = pipelineEx
currentAlpha = weights
currentDict = dict
*/
class Dict(val bitWidth : Int, val fracWidth : Int, val dictSize : Int,
val features : Int, val pipelineStages : Int, val isNORMA : Boolean) extends Module {
val io = new Bundle {
val forceNA = Bool(INPUT)
val alpha = Fixed(INPUT, bitWidth, fracWidth)
val forget = Fixed(INPUT, bitWidth, fracWidth)
val example = Vec.fill(features){Fixed(INPUT, bitWidth, fracWidth)}
val addToDict = Bool(INPUT)
val currentDict = Vec.fill(dictSize){Vec.fill(features){Fixed(OUTPUT, bitWidth, fracWidth)}}
val currentAlpha = Vec.fill(dictSize){Fixed(OUTPUT, bitWidth, fracWidth)}
val currentPipeline = Vec.fill(pipelineStages){Vec.fill(features){Fixed(OUTPUT, bitWidth, fracWidth)}}
}
// Registers
val pipelinedEx = RegInit( Vec.fill( pipelineStages ){Vec.fill( features ){ Fixed(0.0, bitWidth, fracWidth)}})
val dict = RegInit( Vec.fill( dictSize ){ Vec.fill( features ){ Fixed(0.0, bitWidth, fracWidth) }})
val weights = RegInit( Vec.fill( dictSize ){ Fixed(0.0, bitWidth, fracWidth) } )
val forgetWeights = weights.toList.map(x => { io.forget*%x })
val ZERO = Fixed(0, bitWidth, fracWidth)
io.currentDict := dict
io.currentPipeline := pipelinedEx
io.currentAlpha := weights
for (f <- 0 until features) {
// Pipeline
pipelinedEx(0)(f) := io.example(f)
for (p <- 0 until (pipelineStages - 1))
pipelinedEx(p+1)(f) := pipelinedEx(p)(f)
// Dictionary
when (io.addToDict) {
dict(0)(f) := pipelinedEx(pipelineStages - 1)(f)
for (d <- 0 until (dictSize - 1))
dict(d+1)(f) := dict(d)(f)
} .otherwise {
for (d <- 0 until (dictSize))
dict(d)(f) := dict(d)(f)
}
}
when (io.addToDict) {
weights(0) := io.alpha
} .otherwise {
if (isNORMA)
weights(0) := Mux(io.forceNA, weights(0), forgetWeights(0))
else
weights(0) := weights(0)
}
for (d <- 0 until (dictSize - 1)) {
when (io.addToDict) {
weights(d+1) := forgetWeights(d)
} .otherwise {
if (isNORMA)
weights(d+1) := Mux(io.forceNA, weights(d+1), forgetWeights(d+1))
else
weights(d+1) := weights(d+1)
}
}
}
| da-steve101/chisel-pipelined-olk | src/main/scala/Dict/Dict.scala | Scala | gpl-2.0 | 3,702 |
package io.argos.agent.sentinels
import akka.actor.ActorRef
import io.argos.agent.{Messages, SentinelConfiguration}
import io.argos.agent.bean.{MetricsRequest, MetricsResponse, ThreadPoolStats}
import io.argos.agent.util.HostnameProvider
import io.argos.agent.bean._
abstract class BlockedSentinel(val metricsProvider: ActorRef, val conf: SentinelConfiguration) extends Sentinel {
def getThreadPoolStats : MetricsRequest
override def processProtocolElement: Receive = {
case CheckMetrics() => if (System.currentTimeMillis >= nextReact) metricsProvider ! getThreadPoolStats
case metrics: MetricsResponse[ThreadPoolStats] if metrics.value.isDefined => {
val treadPool = metrics.value.get
if (log.isDebugEnabled) {
log.debug("BlockedSentinel : ThreadPool=<{}>, currentlyBlockedTasks=<{}>", treadPool.`type`, treadPool.currentBlockedTasks.toString)
}
if (treadPool.currentBlockedTasks > 0 && System.currentTimeMillis >= nextReact) {
react(treadPool)
}
}
}
def react(info: ThreadPoolStats): Unit = {
val message =
s"""Cassandra Node ${HostnameProvider.hostname} may be overloaded.
|
|Some actions are blocked for the Type '${info.`type`}'
|
|Currently blocked tasks : ${info.currentBlockedTasks}
|Pending tasks : ${info.pendingTasks}
|Active Tasks : ${info.activeTasks}
|Available executors : ${info.maxPoolSize}
|
|Total blocked tasks since node startup : ${info.totalBlockedTasks}
|
|Something wrong may append on this node...
""".stripMargin
context.system.eventStream.publish(buildNotification(conf.messageHeader.map(h => h + " \\n\\n--####--\\n\\n" + message).getOrElse(message)))
updateNextReact()
{ }
}
}
// --------- BlockedSentinel implementations
class CompactionExecBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_INTERNAL_STAGE, Messages.INTERNAL_STAGE_COMPACTION_EXEC)
}
class CounterMutationBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_STAGE, Messages.STAGE_COUNTER_MUTATION)
}
class GossipBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_INTERNAL_STAGE, Messages.INTERNAL_STAGE_GOSSIP)
}
class InternalResponseBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_INTERNAL_STAGE, Messages.INTERNAL_STAGE_INTERNAL_RESPONSE)
}
class MemtableFlusherBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_INTERNAL_STAGE, Messages.INTERNAL_STAGE_MEMTABLE_FLUSHER)
}
class MutationBlockedSentinel( override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_STAGE, Messages.STAGE_MUTATION)
}
class ReadBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_STAGE, Messages.STAGE_READ)
}
class ReadRepairBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_STAGE, Messages.STAGE_READ_REPAIR)
}
class RequestResponseBlockedSentinel(override val metricsProvider : ActorRef, override val conf: SentinelConfiguration) extends BlockedSentinel(metricsProvider, conf) {
override def getThreadPoolStats: MetricsRequest = MetricsRequest(ActorProtocol.ACTION_CHECK_STAGE, Messages.STAGE_REQUEST_RESPONSE)
}
| leleueri/argos | argos-agent/src/main/scala/io/argos/agent/sentinels/BlockedSentinel.scala | Scala | apache-2.0 | 4,633 |
/*
* Copyright 2014-16 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package au.com.intelix.rs.core.services
import au.com.intelix.rs.core.Ser
import scala.language.implicitConversions
trait StreamId extends Ser
case class SimpleStreamId(id: String) extends StreamId {
override lazy val toString: String = id
}
case class CompoundStreamId[T](id: String, v: T) extends StreamId {
override lazy val toString: String = id + "#" + v
}
object StreamId {
implicit def toStreamId(id: String): StreamId = SimpleStreamId(id)
implicit def toStreamId(id: (String, Any)): StreamId = CompoundStreamId(id._1, id._2)
}
abstract class CompoundStreamIdTemplate[T](id: String) {
def apply(v: T) = CompoundStreamId(id, v)
def unapply(s: StreamId): Option[T] = s match {
case CompoundStreamId(i, v) if i == id => Some(v.asInstanceOf[T])
case _ => None
}
}
abstract class SimpleStreamIdTemplate(id: String) {
def apply() = SimpleStreamId(id)
def unapply(s: StreamId): Boolean = s match {
case SimpleStreamId(i) => i == id
case _ => false
}
}
| intelix/reactiveservices | platform/core/src/main/scala/au/com/intelix/rs/core/services/StreamId.scala | Scala | apache-2.0 | 1,600 |
/*
* copy from https://github.com/jrudolph/sbt-dependency-graph/blob/master/src/main/scala/net/virtualvoid/sbt/graph/IvyGraphMLDependencies.scala
* and delete some unneeded function
*/
package com.zavakid.sbt
import sbinary.{DefaultProtocol, Format}
import scala.collection.mutable
import scala.collection.mutable.{Set => MSet}
import scala.xml.parsing.ConstructingParser
import scala.xml.{Document, Node, NodeSeq}
object IvyGraphMLDependencies extends App {
case class ModuleId(organisation: String,
name: String,
version: String) {
def idString: String = organisation + ":" + name + ":" + version
}
case class Module(id: ModuleId,
license: Option[String] = None,
extraInfo: String = "",
evictedByVersion: Option[String] = None,
error: Option[String] = None) {
def hadError: Boolean = error.isDefined
def isUsed: Boolean = !evictedByVersion.isDefined
}
type Edge = (ModuleId, ModuleId)
case class ModuleGraph(nodes: Seq[Module], edges: Seq[Edge]) {
lazy val modules: Map[ModuleId, Module] =
nodes.map(n => (n.id, n)).toMap
def module(id: ModuleId): Module = modules(id)
lazy val dependencyMap: Map[ModuleId, Seq[Module]] =
createMap(identity)
lazy val reverseDependencyMap: Map[ModuleId, Seq[Module]] =
createMap { case (a, b) => (b, a)}
def createMap(bindingFor: ((ModuleId, ModuleId)) => (ModuleId, ModuleId)): Map[ModuleId, Seq[Module]] = {
val m = new mutable.HashMap[ModuleId, MSet[Module]] with mutable.MultiMap[ModuleId, Module]
edges.foreach { entry =>
val (f, t) = bindingFor(entry)
m.addBinding(f, module(t))
}
m.toMap.mapValues(_.toSeq.sortBy(_.id.idString)).withDefaultValue(Nil)
}
}
def graph(ivyReportFile: String): ModuleGraph =
buildGraph(buildDoc(ivyReportFile))
def buildGraph(doc: Document): ModuleGraph = {
def edgesForModule(id: ModuleId, revision: NodeSeq): Seq[Edge] =
for {
caller <- revision \\ "caller"
callerModule = moduleIdFromElement(caller, caller.attribute("callerrev").get.text)
} yield (moduleIdFromElement(caller, caller.attribute("callerrev").get.text), id)
val moduleEdges: Seq[(Module, Seq[Edge])] = for {
mod <- doc \\ "dependencies" \\ "module"
revision <- mod \\ "revision"
rev = revision.attribute("name").get.text
moduleId = moduleIdFromElement(mod, rev)
module = Module(moduleId,
(revision \\ "license").headOption.flatMap(_.attribute("name")).map(_.text),
evictedByVersion = (revision \\ "evicted-by").headOption.flatMap(_.attribute("rev").map(_.text)),
error = revision.attribute("error").map(_.text))
} yield (module, edgesForModule(moduleId, revision))
val (nodes, edges) = moduleEdges.unzip
val info = (doc \\ "info").head
def infoAttr(name: String): String =
info.attribute(name).getOrElse(throw new IllegalArgumentException("Missing attribute " + name)).text
val rootModule = Module(ModuleId(infoAttr("organisation"), infoAttr("module"), infoAttr("revision")))
ModuleGraph(rootModule +: nodes, edges.flatten)
}
def reverseGraphStartingAt(graph: ModuleGraph, root: ModuleId): ModuleGraph = {
val depsMap = graph.reverseDependencyMap
def visit(module: ModuleId, visited: Set[ModuleId]): Seq[(ModuleId, ModuleId)] =
if (visited(module))
Nil
else
depsMap.get(module) match {
case Some(deps) =>
deps.flatMap { to =>
(module, to.id) +: visit(to.id, visited + module)
}
case None => Nil
}
val edges = visit(root, Set.empty)
val nodes = edges.foldLeft(Set.empty[ModuleId])((set, edge) => set + edge._1 + edge._2).map(graph.module)
ModuleGraph(nodes.toSeq, edges)
}
def ignoreScalaLibrary(scalaVersion: String, graph: ModuleGraph): ModuleGraph = {
def isScalaLibrary(m: Module) = isScalaLibraryId(m.id)
def isScalaLibraryId(id: ModuleId) = id.organisation == "org.scala-lang" && id.name == "scala-library"
def dependsOnScalaLibrary(m: Module): Boolean =
graph.dependencyMap(m.id).exists(isScalaLibrary)
def addScalaLibraryAnnotation(m: Module): Module = {
if (dependsOnScalaLibrary(m))
m.copy(extraInfo = m.extraInfo + " [S]")
else
m
}
val newNodes = graph.nodes.map(addScalaLibraryAnnotation).filterNot(isScalaLibrary)
val newEdges = graph.edges.filterNot(e => isScalaLibraryId(e._2))
ModuleGraph(newNodes, newEdges)
}
def moduleIdFromElement(element: Node, version: String): ModuleId =
ModuleId(element.attribute("organisation").get.text, element.attribute("name").get.text, version)
private def buildDoc(ivyReportFile: String) =
ConstructingParser.fromSource(io.Source.fromFile(ivyReportFile), preserveWS = false).document()
}
object ModuleGraphProtocol extends DefaultProtocol {
import com.zavakid.sbt.IvyGraphMLDependencies._
implicit def seqFormat[T: Format]: Format[Seq[T]] = wrap[Seq[T], List[T]](_.toList, _.toSeq)
implicit val ModuleIdFormat: Format[ModuleId] = asProduct3(ModuleId)(ModuleId.unapply(_).get)
implicit val ModuleFormat: Format[Module] = asProduct5(Module)(Module.unapply(_).get)
implicit val ModuleGraphFormat: Format[ModuleGraph] = asProduct2(ModuleGraph)(ModuleGraph.unapply(_).get)
}
| CSUG/sbt-one-log | src/main/scala/com/zavakid/sbt/Util.scala | Scala | apache-2.0 | 5,456 |
/*
* Code Pulse: A real-time code coverage testing tool. For more information
* see http://code-pulse.com
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package bootstrap.liftweb
import net.liftweb.util.StringHelpers
import net.liftweb.http.S
object SnippetRequest {
/** Matches a snippet class name and the current LocParam, e.g.
* {{{
* case ("HelloWorld", Full(p: MyLocParam)) => ...
* }}}
*/
def unapply(snippetName: String): Option[(String, Any)] = {
val camelName = StringHelpers.camelify(snippetName)
val loc = S.location.flatMap(_.currentValue)
Some(camelName -> loc)
}
} | secdec/codepulse | codepulse/src/main/scala/bootstrap/liftweb/SnippetRequest.scala | Scala | apache-2.0 | 1,195 |
package nl.dekkr.hoppr.model
/**
* The different loglevels used in the FetchLog
*/
sealed trait LogLevel
case object Critical extends LogLevel
case object Error extends LogLevel
case object Warning extends LogLevel
case object Info extends LogLevel
case object Debug extends LogLevel
| plamola/hoppR | src/main/scala/nl/dekkr/hoppr/model/LogLevel.scala | Scala | mit | 289 |
package kogu.spark.distribution
import kogu.spark.JdbcDetails
import org.apache.spark.ml.feature._
import org.apache.spark.sql.DataFrame
object TextDist {
val defaultStorage = org.apache.spark.storage.StorageLevel.MEMORY_ONLY
def main(args: Array[String]): Unit = {
val columnName = "DESCRIPTION"
val df = createDF(JdbcDetails(), "GBM_DATA").select(columnName)
df.persist(defaultStorage)
df.show()
// val frame = ngrams(df)
// frame.show(truncate = false)
topTerms(df, columnName)
}
private def context = {
val conf = new org.apache.spark.SparkConf()
.setAppName("test")
.setMaster("local[*]")
.set("spark.ui.enabled", "false")
val sc = new org.apache.spark.SparkContext(conf)
sc.setLocalProperty("spark.ui.enabled", "false")
val sqlContext = new org.apache.spark.sql.hive.HiveContext(sc)
sqlContext.setConf("spark.sql.shuffle.partitions", "1")
sqlContext
}
private def createDF(details: JdbcDetails, table: String): DataFrame = {
val sqlContext = context
val df = sqlContext.read.jdbc(details.url, table, details.props)
df.printSchema()
df
}
def ngrams(df: DataFrame): DataFrame = {
df.show(truncate = false)
val colName = df.columns(0)
val temp1 = "tokenized_" + colName
val temp2 = "filtered_" + colName
val tokenizer = new RegexTokenizer()
.setGaps(true)
.setPattern("[\\\\W]+")
.setToLowercase(true)
.setInputCol(colName)
.setOutputCol(temp1)
val remover = new StopWordsRemover()
.setInputCol(temp1)
.setOutputCol(temp2)
val unigram = new NGram().setInputCol(temp2).setOutputCol("unigram").setN(1)
val bigram = new NGram().setInputCol(temp2).setOutputCol("bigram")
val trigram = new NGram().setInputCol(temp2).setOutputCol("trigram").setN(3)
val stages = Array(tokenizer, remover, unigram, bigram, trigram)
val frame = stages.foldLeft(df)((acc, tr) => tr.transform(acc))
frame
}
def topTerms(df: DataFrame, colName: String, n: Int = 20): Array[(String, Double)] = {
val column = df(colName)
import org.apache.spark.sql.functions.length
val dataFrame = df.select(colName).filter(column.notNullOrEmpty and length(column) <= 200)
val tokenized = "tokenized"
val filtered = "filtered"
val rawFeatures = "rawFeatures"
val tokenizer = new RegexTokenizer()
.setGaps(true)
.setPattern("[\\\\W]+")
.setToLowercase(true)
.setInputCol(colName)
.setOutputCol(tokenized)
val remover = new StopWordsRemover()
.setInputCol(tokenized)
.setOutputCol(filtered)
val hashingTF = new HashingTF()
.setInputCol(filtered)
.setOutputCol(rawFeatures)
.setNumFeatures(20)
val frame2 = Array(tokenizer, remover, hashingTF).foldLeft(dataFrame)((acc, tr) => tr.transform(acc))
val model: CountVectorizerModel = new CountVectorizer()
.setInputCol(filtered)
.setOutputCol("vectorized")
.setMinDF(2)
.fit(frame2)
val frame3 = model.transform(frame2)
model.vocabulary.take(20).foreach(println)
val idfModel = new IDF()
.setInputCol(rawFeatures)
.setOutputCol("idf")
.fit(frame3)
val rescaledData = idfModel.transform(frame3)
rescaledData.show(truncate = false)
Array()
}
}
| kogupta/SparkPlayground | src/main/scala/kogu/spark/distribution/TextDist.scala | Scala | apache-2.0 | 3,329 |
// The natural recursive solution
def toListRecursive: List[A] = uncons match {
case Some(c) => c.head :: c.tail.toList
case _ => List()
}
/*
The above solution will stack overflow for large streams, since it's
not tail-recursive. Here is a tail-recursive implementation. At each
step we cons onto the front of the `acc` list, which will result in the
reverse of the stream. Then at the end we reverse the result to get the
correct order again.
*/
def toList: List[A] = {
@annotation.tailrec
def go(s: Stream[A], acc: List[A]): List[A] = s uncons match {
case Some(c) =>
go(c.tail, c.head :: acc)
case _ => acc
}
go(this, List()).reverse
}
/*
In order to avoid the `reverse` at the end, we could write it using a
mutable list buffer and an explicit loop instead. Note that the mutable
list buffer never escapes our `toList` method, so this function is
still _pure_.
*/
def toListFast: List[A] = {
val buf = new collection.mutable.ListBuffer[A]
@annotation.tailrec
def go(s: Stream[A]): List[A] = s uncons match {
case Some(c) =>
buf += c.head
go(c.tail)
case _ => buf.toList
}
go(this)
} | willcodejavaforfood/fpinscala | answerkey/laziness/1.answer.scala | Scala | mit | 1,147 |
package skinny.view.velocity
import javax.servlet.ServletContext
import org.apache.velocity.tools.view.VelocityView
/**
* VelocityView configuration factory.
*/
object VelocityViewConfig {
def viewWithServletContext(ctx: ServletContext, sbtProjectPath: Option[String] = None): VelocityView = {
new ScalaVelocityView(ctx, sbtProjectPath)
}
}
| Kuchitama/skinny-framework | velocity/src/main/scala/skinny/view/velocity/VelocityViewConfig.scala | Scala | mit | 355 |
/*
* Copyright (c) 2014 Dufresne Management Consulting LLC.
*/
package com.nickelsoftware.bettercare4me.models;
import org.joda.time.LocalDate
import org.scalatestplus.play.OneAppPerSuite
import org.scalatestplus.play.PlaySpec
class ProviderTestSpec extends PlaySpec {
"The Provider class" must {
"be created with valid arguments" in {
val provider = Provider("key1", "Michel", "Dufresne")
provider.providerID mustBe "key1"
provider.firstName mustBe "Michel"
provider.lastName mustBe "Dufresne"
}
"put all atributes into a List" in {
val provider = Provider("key1", "Michel", "Dufresne")
provider.toList mustBe List("key1", "Michel", "Dufresne")
}
"create a Provider from a list of attributes" in {
val provider = Provider("key1", "Michel", "Dufresne")
ProviderParser.fromList(provider.toList) mustBe provider
}
}
"The SimplePersistenceLayer class" must {
"create Provider with sequential keys" in {
val persistenceLayer = new SimplePersistenceLayer(99)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-0", "Michel", "Dufresne")
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-1", "Michel", "Dufresne")
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-2", "Michel", "Dufresne")
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-3", "Michel", "Dufresne")
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-4", "Michel", "Dufresne")
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-5", "Michel", "Dufresne")
}
"create Provider with sequential keys (independent of Patient and Claim keys)" in {
val persistenceLayer = new SimplePersistenceLayer(99)
val dob = new LocalDate(1962, 7, 27).toDateTimeAtStartOfDay()
val dos = new LocalDate(2014, 9, 5).toDateTimeAtStartOfDay()
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-0", "Michel", "Dufresne")
persistenceLayer.createPatient("Michel", "Dufresne", "M", dob) mustBe Patient("patient-99-0", "Michel", "Dufresne", "M", dob)
persistenceLayer.createMedClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos) mustBe MedClaim("c-md-99-0", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-1", "Michel", "Dufresne")
persistenceLayer.createPatient("Michel", "Dufresne", "M", dob) mustBe Patient("patient-99-1", "Michel", "Dufresne", "M", dob)
persistenceLayer.createMedClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos) mustBe MedClaim("c-md-99-1", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-2", "Michel", "Dufresne")
persistenceLayer.createPatient("Michel", "Dufresne", "M", dob) mustBe Patient("patient-99-2", "Michel", "Dufresne", "M", dob)
persistenceLayer.createMedClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos) mustBe MedClaim("c-md-99-2", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-3", "Michel", "Dufresne")
persistenceLayer.createPatient("Michel", "Dufresne", "M", dob) mustBe Patient("patient-99-3", "Michel", "Dufresne", "M", dob)
persistenceLayer.createMedClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos) mustBe MedClaim("c-md-99-3", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos)
persistenceLayer.createRxClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos) mustBe RxClaim("c-rx-99-4", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos)
persistenceLayer.createLabClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos) mustBe LabClaim("c-lc-99-5", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-4", "Michel", "Dufresne")
persistenceLayer.createPatient("Michel", "Dufresne", "M", dob) mustBe Patient("patient-99-4", "Michel", "Dufresne", "M", dob)
persistenceLayer.createMedClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos) mustBe MedClaim("c-md-99-6", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos, dos)
persistenceLayer.createProvider("Michel", "Dufresne") mustBe Provider("provider-99-5", "Michel", "Dufresne")
persistenceLayer.createRxClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos) mustBe RxClaim("c-rx-99-7", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos)
persistenceLayer.createLabClaim("patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos) mustBe LabClaim("c-lc-99-8", "patient.uuid", "patient.first", "patient.last", "provider.uuid", "provider.first", "provider.last", dos)
}
}
}
| reactivecore01/bettercare4.me | play/test/com/nickelsoftware/bettercare4me/models/ProviderTestSpec.scala | Scala | apache-2.0 | 5,986 |
package com.github.mrpowers.spark.spec.sql
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.sql.Row
import org.scalatest._
import com.github.mrpowers.spark.daria.sql.SparkSessionExt._
import com.github.mrpowers.spark.spec.SparkSessionTestWrapper
import com.github.mrpowers.spark.fast.tests.DatasetComparer
class DataFrameNaFunctionsSpec extends FunSpec with SparkSessionTestWrapper with DatasetComparer {
import spark.implicits._
describe("#drop") {
it("drops rows that contains null values") {
val sourceData = List(
Row(1, null),
Row(null, null),
Row(3, 30),
Row(10, 20)
)
val sourceSchema = List(
StructField("num1", IntegerType, true),
StructField("num2", IntegerType, true)
)
val sourceDF = spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val actualDF = sourceDF.na.drop()
val expectedData = List(
Row(3, 30),
Row(10, 20)
)
val expectedSchema = List(
StructField("num1", IntegerType, true),
StructField("num2", IntegerType, true)
)
val expectedDF = spark.createDataFrame(
spark.sparkContext.parallelize(expectedData),
StructType(expectedSchema)
)
assertSmallDatasetEquality(actualDF, expectedDF)
}
}
describe("#fill") {
it("Returns a new DataFrame that replaces null or NaN values in numeric columns with value") {
val sourceDF = spark.createDF(
List(
(1, null),
(null, null),
(3, 30),
(10, 20)
), List(
("num1", IntegerType, true),
("num2", IntegerType, true)
)
)
val actualDF = sourceDF.na.fill(77)
val expectedDF = spark.createDF(
List(
(1, 77),
(77, 77),
(3, 30),
(10, 20)
), List(
("num1", IntegerType, false),
("num2", IntegerType, false)
)
)
assertSmallDatasetEquality(actualDF, expectedDF)
}
}
describe("#replace") {
pending
}
}
| MrPowers/spark-spec | src/test/scala/com/github/mrpowers/spark/spec/sql/DataFrameNaFunctionsSpec.scala | Scala | mit | 2,197 |
/*
* Copyright 2012 ζ¨ε (Yang Bo)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dongxiguo.commons.continuations
class ShuttedDownException(message: String)
extends IllegalStateException(message)
// vim: set ts=2 sw=2 et:
| Atry/commons-continuations | src/main/scala/com/dongxiguo/commons/continuations/ShuttedDownException.scala | Scala | apache-2.0 | 759 |
package org.jetbrains.plugins.scala.debugger.positionManager
import org.jetbrains.plugins.scala.debugger.{Loc, ScalaVersion_2_11, ScalaVersion_2_12}
/**
* @author Nikolay.Tropin
*/
class LocationOfLineTest extends LocationsOfLineTestBase with ScalaVersion_2_11
class LocationOfLineTest_212 extends LocationsOfLineTestBase with ScalaVersion_2_12 {
override def testLambdas(): Unit = {
checkLocationsOfLine(
Set(Loc("Lambdas$", "main", 4), Loc("Lambdas$", "Lambdas$$$anonfun$1", 4)),
Set(Loc("Lambdas$", "main", 5), Loc("Lambdas$", "Lambdas$$$anonfun$2", 5), Loc("Lambdas$", "Lambdas$$$anonfun$3", 5)),
Set(Loc("Lambdas$", "Lambdas$$$anonfun$4", 6))
)
}
override def testMultilevel(): Unit = {
checkLocationsOfLine(
Set(Loc("Multilevel$This$1", "<init>", 18)), //location for constructor is customized
Set(Loc("Multilevel$This$1", "<init>", 4)),
Set(Loc("Multilevel$This$1", "foo", 6)),
Set(Loc("Multilevel$This$1$$anon$1", "<init>", 6)),
Set(Loc("Multilevel$This$1$$anon$1", "run", 8)),
Set(Loc("Multilevel$This$1$$anon$1", "run", 8)),
Set(Loc("Multilevel$This$1$$anon$1", "Multilevel$This$1$$anon$1$$$anonfun$1", 9)),
Set(Loc("Multilevel$", "main", 18))
)
}
}
abstract class LocationsOfLineTestBase extends PositionManagerTestBase {
val noLocations = Set.empty[Loc]
setupFile("Simple.scala",
s"""
|object Simple {
| ${offsetMarker}val z = 1
| def main(args: Array[String]) {
| ${offsetMarker}val i = 1
| $offsetMarker"asd".substring(i)
| ${offsetMarker}foo()
| }$offsetMarker
|
| def foo(): Unit = {
| $offsetMarker$bp""
| }$offsetMarker
|}$offsetMarker
|""".stripMargin.trim)
def testSimple(): Unit = {
checkLocationsOfLine(
Set(Loc("Simple$", "<init>", 2), Loc("Simple$", "z", 2)),
Set(Loc("Simple$", "main", 4)),
Set(Loc("Simple$", "main", 5)),
Set(Loc("Simple$", "main", 6)),
noLocations,
Set(Loc("Simple$", "foo", 10)),
noLocations,
noLocations
)
}
setupFile("SimpleClass.scala",
s"""
|object SimpleClass {
| def main(args: Array[String]) {
| val b = new Bar(1)
| b.foo()$bp
| }
|}
|
|${offsetMarker}class Bar(i: Int) {
| ${offsetMarker}val s = ""
|
| def foo(): Unit = {
| $offsetMarker""
| }
|}
|
|""".stripMargin.trim)
def testSimpleClass(): Unit = {
checkLocationsOfLine(
Set(Loc("Bar", "<init>", 14)), //location for constructor is customized
Set(Loc("Bar", "<init>", 9), Loc("Bar", "s", 9)),
Set(Loc("Bar", "foo", 12))
)
}
setupFile("Lambdas.scala",
s"""
|object Lambdas {
| def main(args: Array[String]): Unit = {
| val list = List(1, 2)
| ${offsetMarker}Some(1).getOrElse(2)
| ${offsetMarker}list.filter(_ < 10).map(x => "aaa" + x)
| .foreach(${offsetMarker}println)
| ""$bp
| }
|}
|""".stripMargin.trim)
def testLambdas(): Unit = {
checkLocationsOfLine(
Set(Loc("Lambdas$", "main", 4), Loc("Lambdas$$anonfun$main$1", "apply$mcI$sp", 4)),
Set(Loc("Lambdas$", "main", 5), Loc("Lambdas$$anonfun$main$2", "apply$mcZI$sp", 5), Loc("Lambdas$$anonfun$main$3", "apply", 5)),
Set(Loc("Lambdas$$anonfun$main$4", "apply", 6))
)
}
setupFile("LocalFunction.scala",
s"""
|object LocalFunction {
|
| def main(args: Array[String]) {
| def foo(s: String): Unit = {
| def bar() = {
| $offsetMarker"bar"
| }
|
| ${offsetMarker}println(bar())
|
| }
|
| ${offsetMarker}foo("aaa") $bp
| }
|}
|""".stripMargin.trim)
def testLocalFunction(): Unit = {
checkLocationsOfLine(
Set(Loc("LocalFunction$", "bar$1", 6)),
Set(Loc("LocalFunction$", "foo$1", 9)),
Set(Loc("LocalFunction$", "main", 13))
)
}
setupFile("Multilevel.scala",
s"""
|object Multilevel {
| def main(args: Array[String]) {
| ${offsetMarker}class This {
| ${offsetMarker}val x = 1
| def foo() {
| ${offsetMarker}val runnable = ${offsetMarker}new Runnable {
| def run() {
| ${offsetMarker}val x = $offsetMarker() => {
| ${offsetMarker}This.this.x
| "stop here"$bp
| }
| x()
| }
| }
| runnable.run()
| }
| }
| ${offsetMarker}new This().foo()
| }
|}""".stripMargin.trim)
def testMultilevel(): Unit = {
checkLocationsOfLine(
Set(Loc("Multilevel$This$1", "<init>", 18)), //location for constructor is customized
Set(Loc("Multilevel$This$1", "<init>", 4)),
Set(Loc("Multilevel$This$1", "foo", 6)),
Set(Loc("Multilevel$This$1$$anon$1", "<init>", 6)),
Set(Loc("Multilevel$This$1$$anon$1", "run", 8)),
Set(Loc("Multilevel$This$1$$anon$1", "run", 8)),
Set(Loc("Multilevel$This$1$$anon$1$$anonfun$1", "apply", 9)),
Set(Loc("Multilevel$", "main", 18))
)
}
}
| katejim/intellij-scala | test/org/jetbrains/plugins/scala/debugger/positionManager/LocationsOfLineTest.scala | Scala | apache-2.0 | 5,458 |
package in.suhj.eridown.option
import in.suhj.eridown.core.Generator
import in.suhj.eridown.elements.block._
import in.suhj.eridown.elements.inline._
object Constants {
val eridownBlocks: List[Generator] = List(
CodeLineGenerator,
ThematicBreakGenerator,
CodeFenceGenerator,
HeadingGenerator,
BlockquoteLineGenerator,
ListItemGenerator,
TableGenerator,
DefinitionListGenerator,
NoFormatGenerator,
HtmlCommentBlockGenerator
)
val eridownInlines: List[Generator] = List(
BoldGenerator,
EmphasisGenerator,
StrikeGenerator,
CodeInlineGenerator,
LinkGenerator,
ImageGenerator,
DefaultHtmlTagGenerator,
NoFormatInlineGenerator
)
}
| raon0211/eridown | src/main/scala/in/suhj/eridown/option/Constants.scala | Scala | mit | 787 |
package demo
import google.maps.Data.Feature
import google.maps.LatLng
import org.scalajs.dom._
import scala.scalajs.js
import js.annotation.JSExport
import org.scalajs.dom
object ScalaJSGMapExample extends js.JSApp {
def main(): Unit = {
println("About to init map...")
def initialize() = js.Function {
val opts = google.maps.MapOptions(
center = new LatLng(51.201203, -1.724370),
zoom = 8,
panControl = false,
streetViewControl = false,
mapTypeControl = false)
val gmap = new google.maps.Map(document.getElementById("map-canvas"), opts)
val data = new google.maps.Data(google.maps.Data.DataOptions(gmap))
val latLng = new google.maps.LatLng(52.201203, -1.724370)
val feature = new google.maps.Data.Feature(google.maps.Data.FeatureOptions(
geometry = latLng,
id = "Test feature",
properties = null
))
val feature2 = new google.maps.Data.Feature(google.maps.Data.FeatureOptions(
geometry = new google.maps.LatLng(52.301203, -1.724470),
id = "Test feature2",
properties = null
))
data.add(feature)
data.forEach((feature: google.maps.Data.Feature) => {
println(feature.getId());
})
val style = google.maps.Data.StyleOptions(
clickable= true,
cursor= "pointer",
fillColor= "#79B55B",
fillOpacity= 1,
icon = null,
shape = google.maps.MarkerShape(coords = js.Array(1, 2, 3), shape = "circle" ),
strokeColor = "#79B55B",
strokeOpacity = 1,
strokeWeight= 1,
title = "string",
visible = true,
zIndex= 1
)
data.overrideStyle(feature2, style)
data.setStyle(style)
println(s"Added new style", style)
data.add(feature2)
data.revertStyle(feature)
def featureToGeoJson(f:Object):Unit = {
println("featureToGeoJson")
println(f)
}
data.toGeoJson(featureToGeoJson(_:Object))
val marker = new google.maps.Marker(google.maps.MarkerOptions(
position = gmap.getCenter(),
map = gmap,
title = "Click to zoom"
))
google.maps.event.addListener(gmap, "center_changed", () => {
// 3 seconds after the center of the map has changed, pan back to the
// marker.
window.setTimeout(() => {
gmap.panTo(marker.getPosition());
}, 3000)
})
val contentString = """<div id="content">
<div id="siteNotice">
</div>
<h1 id="firstHeading" class="firstHeading">Uluru</h1>
<div id="bodyContent">
<p><b>Uluru</b>, also referred to as <b>Ayers Rock</b>, is a large sandstone rock formation in the southern part of the '+
Northern Territory, central Australia. It lies 335 km (208 mi)
south west of the nearest large town, Alice Springs; 450 km
(280 mi) by road. Kata Tjuta and Uluru are the two major
features of the Uluru - Kata Tjuta National Park. Uluru is
sacred to the Pitjantjatjara and Yankunytjatjara, the
Aboriginal people of the area. It has many springs, waterholes,
rock caves and ancient paintings. Uluru is listed as a World
Heritage Site.</p>
<p>Attribution: Uluru, <a href="https://en.wikipedia.org/w/index.php?title=Uluru&oldid=297882194">
https://en.wikipedia.org/w/index.php?title=Uluru</a>
(last visited June 22, 2009).</p>
</div>
</div>"""
val infowindow = new google.maps.InfoWindow(google.maps.InfoWindowOptions(
content=contentString
))
google.maps.event.addListener(marker, "click", () => {
println("Marker click !")
infowindow.open(gmap,marker)
})
""
}
google.maps.event.addDomListener(window, "load", initialize)
}
}
| coreyauger/scala-js-marked | demo/src/main/scala/ScalaJSGMapExample.scala | Scala | mit | 3,859 |
package org.scalaide.core.internal.lexical
import org.eclipse.jdt.ui.text.IJavaPartitions._
import org.eclipse.jface.text._
import org.eclipse.jface.text.IDocument.DEFAULT_CONTENT_TYPE
import scala.annotation.switch
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
import org.scalaide.core.lexical.ScalaPartitions._
import scala.xml.parsing.TokenTests
object ScalaPartitionTokeniser {
def tokenise(text: String): List[ITypedRegion] = {
val tokens = new ListBuffer[ITypedRegion]
val tokeniser = new ScalaPartitionTokeniser(text)
while (tokeniser.tokensRemain) {
val nextToken = tokeniser.nextToken()
if (nextToken.getLength > 0)
tokens += nextToken
}
tokens.toList
}
}
/** @see org.scalaide.core.lexical.ScalaCodePartitioner
*/
class ScalaPartitionTokeniser(text: String) extends TokenTests {
import ScalaDocumentPartitioner.EOF
private val length = text.length
private var pos = 0
private var previousTokenEnd = -1
private var contentTypeOpt: Option[String] = None
private def ch = if (pos >= length) EOF else text.charAt(pos)
private def ch(lookahead: Int) = {
val offset = pos + lookahead
if (offset >= length || offset < 0)
EOF
else
text.charAt(offset)
}
private def accept(): Unit = { pos += 1 }
private def accept(n: Int): Unit = { pos += n }
private def setContentType(contentType: String): Unit = { contentTypeOpt = Some(contentType) }
def tokensRemain = pos < length
def nextToken(): ITypedRegion = {
require(tokensRemain)
modeStack.head match {
case ScalaState(_) =>
getScalaToken()
case XmlState(_, _) =>
getXmlToken()
case StringInterpolationState(multiline, embeddedIdentifierNext) =>
getStringInterpolationToken(multiline, embeddedIdentifierNext)
case ScaladocCodeBlockState(nesting) =>
accept(3)
modeStack = modeStack.tail
setContentType(SCALADOC_CODE_BLOCK)
getCodeBlockComment(nesting)
case ScaladocState(nesting) =>
modeStack = modeStack.tail
setContentType(JAVA_DOC)
getMultiLineComment(nesting)
}
val contentType = contentTypeOpt.get
val tokenStart = previousTokenEnd + 1
val tokenLength = pos - tokenStart
previousTokenEnd = pos - 1
contentTypeOpt = None
new TypedRegion(tokenStart, tokenLength, contentType)
}
private def getScalaToken(): Unit = {
(ch: @switch) match {
case EOF => require(false)
case '<' => ch(-1) match {
case EOF | ' ' | '\\t' | '\\n' | '{' | '(' | '>' if (isNameStart(ch(1)) || ch(1) == '!' || ch(1) == '?') =>
nestIntoXmlMode()
getXmlToken()
case _ =>
accept()
setContentType(DEFAULT_CONTENT_TYPE)
getOrdinaryScala()
}
case '"' =>
val multiline = ch(1) == '"' && ch(2) == '"'
val isInterpolation = Character.isUnicodeIdentifierPart(ch(-1)) // TODO: More precise detection
if (isInterpolation)
nestIntoStringInterpolationMode(multiline)
if (multiline) {
setContentType(SCALA_MULTI_LINE_STRING)
accept(3)
getMultiLineStringLit(quotesRequired = 3, isInterpolation)
} else {
setContentType(JAVA_STRING)
accept()
getStringLit(isInterpolation)
}
case '/' =>
(ch(1): @switch) match {
case '/' =>
accept(2)
setContentType(JAVA_SINGLE_LINE_COMMENT)
getSingleLineComment()
case '*' =>
accept(2)
if (ch == '*' && ch(1) != '/') {
accept()
setContentType(JAVA_DOC)
} else
setContentType(JAVA_MULTI_LINE_COMMENT)
getMultiLineComment(nesting = 1)
case _ =>
accept()
setContentType(DEFAULT_CONTENT_TYPE)
getOrdinaryScala()
}
case '\\'' => scanForCharLit() match {
case Some(offset) =>
accept(offset + 1)
setContentType(JAVA_CHARACTER)
case None =>
accept()
setContentType(DEFAULT_CONTENT_TYPE)
getOrdinaryScala()
}
case '`' =>
accept()
setContentType(DEFAULT_CONTENT_TYPE)
getBackQuotedIdent()
case _ =>
setContentType(DEFAULT_CONTENT_TYPE)
getOrdinaryScala()
}
}
private def scanForCharLit(): Option[Int] =
if (ch(1) == '\\\\')
if (ch(2) == 'u')
if (ch(3) == '\\'') Some(3)
else if (ch(4) == '\\'') Some(4)
else if (ch(5) == '\\'') Some(5)
else if (ch(6) == '\\'') Some(6)
else if (ch(7) == '\\'') Some(7)
else None
else if (isOctalDigit(ch(2)))
if (ch(3) == '\\'') Some(3)
else if (ch(4) == '\\'') Some(4)
else if (ch(5) == '\\'') Some(5)
else None
else if (ch(3) == '\\'')
Some(3)
else
None
else if (ch(2) == '\\'')
Some(2)
else
None
private def isOctalDigit(c: Char): Boolean =
c >= '0' && c <= '7'
@tailrec
private def getStringLit(isInterpolation: Boolean): Unit =
(ch: @switch) match {
case '"' =>
accept()
if (isInterpolation)
modeStack = modeStack.tail
case EOF =>
if (isInterpolation)
modeStack = modeStack.tail
case '\\n' =>
accept()
if (isInterpolation)
modeStack = modeStack.tail
case '\\r' if ch(1) != '\\n' =>
if (isInterpolation)
modeStack = modeStack.tail
case '\\\\' if ch(1) == '"' || ch(1) == '\\\\' =>
accept(2)
getStringLit(isInterpolation)
case '$' if ch(1) == '$' =>
accept(2)
getStringLit(isInterpolation)
case '$' if isInterpolation && ch(1) == '{' =>
accept()
nestIntoScalaMode()
case '$' if isInterpolation =>
accept()
stringInterpolationState.embeddedIdentifierNext = true
case _ =>
accept()
getStringLit(isInterpolation)
}
@tailrec
private def getBackQuotedIdent(): Unit =
(ch: @switch) match {
case '`' => accept()
case EOF =>
case '\\n' => accept()
case '\\r' if ch(1) != '\\n' =>
case _ =>
accept()
getBackQuotedIdent()
}
@tailrec
private def getMultiLineStringLit(quotesRequired: Int, isInterpolation: Boolean): Unit =
(ch: @switch) match {
case '"' =>
accept()
getMultiLineStringLit(quotesRequired - 1, isInterpolation)
case EOF =>
if (isInterpolation)
modeStack = modeStack.tail
case '$' if ch(1) == '$' =>
accept(2)
getMultiLineStringLit(quotesRequired, isInterpolation)
case '$' if isInterpolation && ch(1) == '{' =>
accept()
nestIntoScalaMode()
case '$' if isInterpolation =>
accept()
stringInterpolationState.embeddedIdentifierNext = true
case _ =>
if (quotesRequired > 0) {
accept()
getMultiLineStringLit(3, isInterpolation)
} else if (isInterpolation)
modeStack = modeStack.tail
}
@tailrec
private def getOrdinaryScala(): Unit =
(ch: @switch) match {
case EOF | '"' | '`' =>
case '\\'' if scanForCharLit().isDefined =>
case '/' =>
(ch(1): @switch) match {
case '/' | '*' =>
case _ =>
accept()
getOrdinaryScala()
}
case '<' => ch(-1) match {
case EOF | ' ' | '\\t' | '\\n' | '{' | '(' | '>' if (isNameStart(ch(1)) || ch(1) == '!' || ch(1) == '?') =>
case _ =>
accept()
getOrdinaryScala()
}
case '{' =>
scalaState.nesting += 1
accept()
getOrdinaryScala()
case '}' =>
scalaState.nesting -= 1
accept()
if (scalaState.nesting == 0 && modeStack.size > 1)
modeStack = modeStack.tail
else
getOrdinaryScala()
case _ =>
accept()
getOrdinaryScala()
}
@tailrec
private def getMultiLineComment(nesting: Int): Unit = {
(ch: @switch) match {
case '*' if (ch(1) == '/') =>
accept(2)
if (nesting > 1)
getMultiLineComment(nesting - 1)
case '/' if (ch(1) == '*') =>
accept(2)
getMultiLineComment(nesting + 1)
case '{' if ch(1) == '{' && ch(2) == '{' && contentTypeOpt.exists(_ == JAVA_DOC) =>
nestIntoScaladocCodeBlockMode(nesting)
case EOF =>
case _ =>
accept()
getMultiLineComment(nesting)
}
}
@tailrec
private def getCodeBlockComment(nesting: Int): Unit =
(ch: @switch) match {
case '*' if ch(1) == '/' =>
accept(2)
if (nesting > 1)
getCodeBlockComment(nesting - 1)
else
setContentType(JAVA_DOC)
case '/' if ch(1) == '*' =>
accept(2)
getCodeBlockComment(nesting + 1)
case '}' if ch(1) == '}' && ch(2) == '}' =>
nestIntoScaladocMode(nesting)
accept(3)
case EOF =>
case _ =>
accept()
getCodeBlockComment(nesting)
}
@tailrec
private def getSingleLineComment(): Unit =
(ch: @switch) match {
case EOF =>
case '\\n' =>
accept()
case '\\r' if ch(1) != '\\n' =>
accept()
case _ =>
accept()
getSingleLineComment()
}
private def getStringInterpolationToken(multiline: Boolean, embeddedIdentifierNext: Boolean): Unit = {
if (embeddedIdentifierNext) {
setContentType(DEFAULT_CONTENT_TYPE)
stringInterpolationState.embeddedIdentifierNext = false
do
accept()
while (ch != EOF && Character.isUnicodeIdentifierPart(ch))
} else {
if (multiline) {
setContentType(SCALA_MULTI_LINE_STRING)
getMultiLineStringLit(quotesRequired = 3, isInterpolation = true)
} else {
setContentType(JAVA_STRING)
getStringLit(isInterpolation = true)
}
}
}
private sealed trait ScannerMode
private case class XmlState(var nesting: Int, var inTag: Option[Boolean]) extends ScannerMode
private case class ScalaState(var nesting: Int) extends ScannerMode
private case class StringInterpolationState(multiline: Boolean, var embeddedIdentifierNext: Boolean) extends ScannerMode
private case class ScaladocCodeBlockState(val nesting: Int) extends ScannerMode
private case class ScaladocState(val nesting: Int) extends ScannerMode
private var modeStack: List[ScannerMode] =
List(new ScalaState(nesting = 0))
private def xmlState = modeStack.head.asInstanceOf[XmlState]
private def scalaState = modeStack.head.asInstanceOf[ScalaState]
private def stringInterpolationState = modeStack.head.asInstanceOf[StringInterpolationState]
private def nestIntoScalaMode(): Unit = {
modeStack ::= ScalaState(nesting = 0)
}
private def nestIntoXmlMode(): Unit = {
modeStack ::= XmlState(nesting = 0, inTag = None)
}
private def nestIntoStringInterpolationMode(multiline: Boolean): Unit = {
modeStack ::= StringInterpolationState(multiline, embeddedIdentifierNext = false)
}
private def nestIntoScaladocCodeBlockMode(nesting: Int): Unit = {
modeStack ::= ScaladocCodeBlockState(nesting)
}
private def nestIntoScaladocMode(nesting: Int): Unit = {
modeStack ::= ScaladocState(nesting)
}
private def getXmlToken(): Unit =
if (xmlState.inTag.isDefined) {
val isEndTag = xmlState.inTag.get
xmlState.inTag = None
setContentType(XML_TAG)
val (nestingAlteration, embeddedScalaInterrupt) = getXmlTag(isEndTag)
if (embeddedScalaInterrupt) {
xmlState.inTag = Some(isEndTag)
nestIntoScalaMode()
} else {
xmlState.nesting += nestingAlteration
if (xmlState.nesting == 0)
modeStack = modeStack.tail
}
} else
(ch: @switch) match {
case '<' =>
if (ch(1) == '!') {
if (ch(2) == '-' && ch(3) == '-') {
accept(4)
setContentType(XML_COMMENT)
getXmlComment()
if (xmlState.nesting == 0)
modeStack = modeStack.tail
} else if (ch(2) == '[' && ch(3) == 'C' && ch(4) == 'D' && ch(5) == 'A' && ch(6) == 'T' && ch(7) == 'A' && ch(8) == '[') {
accept(9)
setContentType(XML_CDATA)
getXmlCDATA()
if (xmlState.nesting == 0)
modeStack = modeStack.tail
} else {
accept(2)
setContentType(XML_PCDATA)
getXmlCharData()
}
} else if (ch(1) == '?') {
accept(2)
setContentType(XML_PI)
getXmlProcessingInstruction()
if (xmlState.nesting == 0)
modeStack = modeStack.tail
// } else if (... TODO: <xml:unparsed>) {}
} else {
setContentType(XML_TAG)
val isEndTag = ch(1) == '/'
accept()
val (nestingAlteration, embeddedScalaInterrupt) = getXmlTag(isEndTag)
if (embeddedScalaInterrupt) {
xmlState.inTag = Some(isEndTag)
nestIntoScalaMode()
} else {
xmlState.nesting += nestingAlteration
if (xmlState.nesting == 0)
modeStack = modeStack.tail
}
}
case '{' if ch(1) != '{' =>
nestIntoScalaMode()
getScalaToken()
case '{' if ch(1) == '{' =>
setContentType(XML_PCDATA)
accept(2)
getXmlCharData()
case _ =>
setContentType(XML_PCDATA)
getXmlCharData()
}
@tailrec
private def getXmlCharData(): Unit =
(ch: @switch) match {
case EOF | '<' =>
case '{' if ch(1) == '{' =>
accept(2)
getXmlCharData()
case '{' if ch(1) != '{' =>
nestIntoScalaMode()
case _ =>
accept()
getXmlCharData()
}
/**
* Read an Xml tag, or part of one up to a Scala escape.
* @return nesting alteration (0, 1 or -1) showing the change to the depth of XML tag nesting,
* and whether the tag scanning was interrupted by embedded Scala.
*/
@tailrec
private def getXmlTag(isEndTag: Boolean): (Int, Boolean) =
(ch: @switch) match {
case EOF => (0, false)
case '"' =>
accept()
getXmlAttributeValue('"')
getXmlTag(isEndTag)
case '\\'' =>
accept()
getXmlAttributeValue('\\'')
getXmlTag(isEndTag)
case '{' if ch(1) == '{' =>
accept(2)
(0, false)
case '{' if ch(1) != '{' =>
(0, true)
case '/' if ch(1) == '>' && !isEndTag => // an empty tag
accept(2)
(0, false)
case '>' =>
if (isEndTag) {
accept()
(-1, false)
} else {
accept()
(1, false)
}
case _ =>
accept()
getXmlTag(isEndTag)
}
@tailrec
private def getXmlAttributeValue(quote: Char): Unit =
ch match {
case EOF =>
case `quote` =>
accept()
case _ =>
accept()
getXmlAttributeValue(quote)
}
@tailrec
private def getXmlProcessingInstruction(): Unit =
(ch: @switch) match {
case EOF =>
case '?' if ch(1) == '>' =>
accept(2)
case _ =>
accept()
getXmlProcessingInstruction()
}
@tailrec
private def getXmlCDATA(): Unit =
(ch: @switch) match {
case EOF =>
case ']' if ch(1) == ']' && ch(2) == '>' =>
accept(3)
case _ =>
accept()
getXmlCDATA()
}
@tailrec
private def getXmlComment(): Unit =
(ch: @switch) match {
case EOF =>
case '-' if ch(1) == '-' && ch(2) == '>' =>
accept(3)
case _ =>
accept()
getXmlComment()
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/lexical/ScalaPartitionTokeniser.scala | Scala | bsd-3-clause | 15,988 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand._
import sangria.validation._
import scala.language.postfixOps
/**
* Lone anonymous operation
*
* A GraphQL document is only valid if when it contains an anonymous operation
* (the query short-hand) that it contains only that one operation definition.
*/
class LoneAnonymousOperation extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
var operationCount = 0
override val onEnter: ValidationVisit = {
case ast.Document(definitions, _, _) =>
operationCount = definitions.count(_.isInstanceOf[ast.OperationDefinition])
Right(Continue)
case op: ast.OperationDefinition =>
if (op.name.isEmpty && operationCount > 1)
Left(Vector(AnonOperationNotAloneViolation(ctx.sourceMapper, op.position.toList)))
else
Right(Continue)
}
}
} | narahari92/sangria | src/main/scala/sangria/validation/rules/LoneAnonymousOperation.scala | Scala | apache-2.0 | 946 |
package controllers
import java.io.File
import play.api.mvc._
object ServeAssets extends Controller {
/**
* http://stackoverflow.com/questions/11451246/how-to-serve-uploaded-files-in-play2-using-scala
*/
def temporary(filename: String) = Action { request =>
Ok.sendFile(new File(s"/tmp/${filename}"))
}
}
| inakianduaga/scala-image-processor | app/controllers/ServeAssets.scala | Scala | mit | 327 |
package org.http4s.server
import java.security.cert.X509Certificate
final case class SecureSession(
sslSessionId: String,
cipherSuite: String,
keySize: Int,
X509Certificate: List[X509Certificate])
object SecureSession {
def apply(
sslSessionId: String,
cipherSuite: String,
keySize: Int,
X509Certificate: Array[X509Certificate]): SecureSession =
SecureSession(sslSessionId, cipherSuite, keySize, X509Certificate.toList)
}
| aeons/http4s | server/src/main/scala/org/http4s/server/SecureSession.scala | Scala | apache-2.0 | 471 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.plan.nodes.calcite.{Expand, Rank, WindowAggregate}
import org.apache.flink.table.planner.plan.nodes.physical.batch._
import org.apache.flink.table.planner.plan.utils.{FlinkRelMdUtil, RankUtil}
import org.apache.flink.table.planner.{JArrayList, JDouble}
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.rel.core._
import org.apache.calcite.rel.metadata._
import org.apache.calcite.rel.{RelNode, SingleRel}
import org.apache.calcite.rex.{RexInputRef, RexLiteral}
import org.apache.calcite.sql.SqlKind
import org.apache.calcite.util.{BuiltInMethod, ImmutableBitSet, NumberUtil, Util}
import scala.collection.JavaConversions._
/**
* [[FlinkRelMdPopulationSize]] supplies a implementation of
* [[RelMetadataQuery#getPopulationSize]] for the standard logical algebra.
*/
class FlinkRelMdPopulationSize private extends MetadataHandler[BuiltInMetadata.PopulationSize] {
override def getDef: MetadataDef[BuiltInMetadata.PopulationSize] =
BuiltInMetadata.PopulationSize.DEF
def getPopulationSize(
rel: TableScan,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val unique = RelMdUtil.areColumnsDefinitelyUnique(mq, rel, groupKey)
if (unique) {
mq.getRowCount(rel)
} else {
mq.getDistinctRowCount(rel, groupKey, null)
}
}
def getPopulationSize(
rel: Values,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
// assume half the rows are duplicates
// PopulationSize should not be less than 1.0
val rowCount = rel.estimateRowCount(mq)
Math.max(rowCount / 2.0, 1.0)
}
def getPopulationSize(
rel: Project,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val baseCols = ImmutableBitSet.builder
val projCols = ImmutableBitSet.builder
val projExprs = rel.getProjects
RelMdUtil.splitCols(projExprs, groupKey, baseCols, projCols)
var population = mq.getPopulationSize(rel.getInput, baseCols.build)
if (population == null) {
return null
}
// No further computation required if the projection expressions are
// all column references
if (projCols.cardinality == 0) {
return population
}
for (bit <- projCols.build) {
val subRowCount = RelMdUtil.cardOfProjExpr(mq, rel, projExprs.get(bit))
if (subRowCount == null) {
return null
}
// subRowCount may be less than 1.0
population *= Math.max(1.0, subRowCount)
}
// REVIEW zfong 6/22/06 - Broadbase did not have the call to
// numDistinctVals. This is needed; otherwise, population can be
// larger than the number of rows in the RelNode.
val rowCount = mq.getRowCount(rel)
RelMdUtil.numDistinctVals(population, rowCount)
}
def getPopulationSize(
rel: Filter,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = mq.getPopulationSize(rel.getInput, groupKey)
def getPopulationSize(
rel: Calc,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val program = rel.getProgram
val projects = program.getProjectList.map(program.expandLocalRef)
val baseCols = ImmutableBitSet.builder
val projCols = ImmutableBitSet.builder
RelMdUtil.splitCols(projects, groupKey, baseCols, projCols)
var population = mq.getPopulationSize(rel.getInput, baseCols.build)
if (population == null) {
return null
}
// No further computation required if the projection expressions are
// all column references
if (projCols.cardinality == 0) {
return population
}
for (bit <- projCols.build) {
val subRowCount = FlinkRelMdUtil.cardOfCalcExpr(mq, rel, projects.get(bit))
if (subRowCount == null) {
return null
}
// subRowCount may be less than 1.0
population *= Math.max(1.0, subRowCount)
}
// REVIEW zfong 6/22/06 - Broadbase did not have the call to
// numDistinctVals. This is needed; otherwise, population can be
// larger than the number of rows in the RelNode.
val rowCount = mq.getRowCount(rel)
RelMdUtil.numDistinctVals(population, rowCount)
}
def getPopulationSize(
rel: Expand,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
if (groupKey.toList.contains(rel.expandIdIndex)) {
// populationSize of expand = populationSize of project1 + populationSize of project2 + ...
// + populationSize of projectN-1
val groupKeyIgnoreExpandId = groupKey.filter(_ != rel.expandIdIndex)
var populationSize = 0D
rel.projects foreach { project =>
val groupKeyOfCurrentProject = new JArrayList[Int]()
groupKeyIgnoreExpandId.foreach { key =>
project.get(key) match {
case literal: RexLiteral if literal.isNull => // do nothing
case inputRef: RexInputRef => groupKeyOfCurrentProject.add(inputRef.getIndex)
case e => throw new TableException(s"Unknown expression ${e.toString}!")
}
}
val populationSizeOfCurrentProject =
mq.getPopulationSize(rel.getInput, ImmutableBitSet.of(groupKeyOfCurrentProject: _*))
if (populationSizeOfCurrentProject == null) {
return null
}
populationSize += populationSizeOfCurrentProject
}
populationSize
} else {
mq.getPopulationSize(rel.getInput, groupKey)
}
}
def getPopulationSize(
rel: Exchange,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = mq.getPopulationSize(rel.getInput, groupKey)
def getPopulationSize(
rel: Rank,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val rankFunColumnIndex = RankUtil.getRankNumberColumnIndex(rel).getOrElse(-1)
if (rankFunColumnIndex < 0 || !groupKey.toArray.contains(rankFunColumnIndex)) {
mq.getPopulationSize(rel.getInput, groupKey)
} else {
val rankFunNdv: JDouble = if (rankFunColumnIndex > 0 &&
groupKey.toArray.contains(rankFunColumnIndex)) {
FlinkRelMdUtil.getRankRangeNdv(rel.rankRange)
} else {
1D
}
val newGroupKey = groupKey.clear(rankFunColumnIndex)
val inputPopulationSize: JDouble = if (newGroupKey.isEmpty) {
1D
} else {
val size = mq.getPopulationSize(rel.getInput, newGroupKey)
if (size == null) {
return null
}
size
}
val populationSize = inputPopulationSize * rankFunNdv
val rowCount = mq.getRowCount(rel)
NumberUtil.min(populationSize, rowCount)
}
}
def getPopulationSize(
rel: Sort,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = mq.getPopulationSize(rel.getInput, groupKey)
def getPopulationSize(
rel: Aggregate,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
getPopulationSizeOfAggregate(rel, mq, groupKey)
}
def getPopulationSize(
rel: BatchExecGroupAggregateBase,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
// for global agg which has inner local agg, it passes the parameters to input directly
if (rel.isFinal && rel.isMerge) {
return mq.getPopulationSize(rel.getInput, groupKey)
}
getPopulationSizeOfAggregate(rel, mq, groupKey)
}
private def getPopulationSizeOfAggregate(
agg: SingleRel,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val (childKey, aggCalls) = FlinkRelMdUtil.splitGroupKeysOnAggregate(agg, groupKey)
val popSizeOfColsInGroupKeys = mq.getPopulationSize(agg.getInput, childKey)
if (popSizeOfColsInGroupKeys == null) {
return null
}
val factorOfKeyInAggCall = 0.1
val popSizeOfColsInAggCalls = aggCalls.foldLeft(1D) {
(popSize, aggCall) =>
val popSizeOfAggCall = aggCall.getAggregation.getKind match {
case SqlKind.COUNT =>
val inputRowCnt = mq.getRowCount(agg.getInput)
// Assume result of count(c) of each group bucket is different, start with 0, end with
// N -1 (N is max ndv of count).
// 0 + 1 + ... + (N - 1) <= rowCount => N ~= Sqrt(2 * rowCnt)
if (inputRowCnt != null) {
Math.sqrt(2D * inputRowCnt)
} else {
return null
}
case _ =>
val argList = aggCall.getArgList
if (argList.isEmpty) {
return null
}
val approximatePopSize = mq.getPopulationSize(
agg.getInput,
ImmutableBitSet.of(argList))
if (approximatePopSize != null) {
approximatePopSize * factorOfKeyInAggCall
} else {
return null
}
}
popSize * Math.max(popSizeOfAggCall, 1D)
}
val inputRowCnt = mq.getRowCount(agg.getInput)
NumberUtil.min(popSizeOfColsInGroupKeys * popSizeOfColsInAggCalls, inputRowCnt)
}
def getPopulationSize(
rel: WindowAggregate,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val fieldCnt = rel.getRowType.getFieldCount
val namedPropertiesCnt = rel.getNamedProperties.size
val namedWindowStartIndex = fieldCnt - namedPropertiesCnt
val groupKeyFromNamedWindow = groupKey.toList.exists(_ >= namedWindowStartIndex)
if (groupKeyFromNamedWindow) {
// cannot estimate PopulationSize result when some group keys are from named windows
null
} else {
// regular aggregate
getPopulationSize(rel.asInstanceOf[Aggregate], mq, groupKey)
}
}
def getPopulationSize(
rel: BatchExecWindowAggregateBase,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
if (rel.isFinal) {
val namedWindowStartIndex = rel.getRowType.getFieldCount - rel.getNamedProperties.size
val groupKeyFromNamedWindow = groupKey.toList.exists(_ >= namedWindowStartIndex)
if (groupKeyFromNamedWindow) {
return null
}
if (rel.isMerge) {
// set the bits as they correspond to local window aggregate
val localWinAggGroupKey = FlinkRelMdUtil.setChildKeysOfWinAgg(groupKey, rel)
return mq.getPopulationSize(rel.getInput, localWinAggGroupKey)
}
} else {
// local window aggregate
val assignTsFieldIndex = rel.getGrouping.length
if (groupKey.toList.contains(assignTsFieldIndex)) {
// groupKey contains `assignTs` fields
return null
}
}
getPopulationSizeOfAggregate(rel, mq, groupKey)
}
def getPopulationSize(
window: Window,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = getPopulationSizeOfOverAgg(window, mq, groupKey)
def getPopulationSize(
rel: BatchExecOverAggregate,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = getPopulationSizeOfOverAgg(rel, mq, groupKey)
private def getPopulationSizeOfOverAgg(
overAgg: SingleRel,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val input = overAgg.getInput
val fieldsCountOfInput = input.getRowType.getFieldCount
val groupKeyContainsAggCall = groupKey.toList.exists(_ >= fieldsCountOfInput)
// cannot estimate population size of aggCall result of OverAgg
if (groupKeyContainsAggCall) {
null
} else {
mq.getPopulationSize(input, groupKey)
}
}
def getPopulationSize(
rel: Join,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
rel.getJoinType match {
case JoinRelType.SEMI | JoinRelType.ANTI =>
mq.getPopulationSize(rel.getLeft, groupKey)
case _ =>
RelMdUtil.getJoinPopulationSize(mq, rel, groupKey)
}
}
def getPopulationSize(
rel: Union,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
var population = 0.0
for (input <- rel.getInputs) {
val subPop = mq.getPopulationSize(input, groupKey)
if (subPop == null) {
return null
}
population += subPop
}
population
}
def getPopulationSize(
subset: RelSubset,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
val rel = Util.first(subset.getBest, subset.getOriginal)
mq.getPopulationSize(rel, groupKey)
}
/**
* Catch-all implementation for
* [[BuiltInMetadata.PopulationSize#getPopulationSize(ImmutableBitSet)]],
* invoked using reflection.
*
* @see org.apache.calcite.rel.metadata.RelMetadataQuery#getPopulationSize(RelNode,
* ImmutableBitSet)
*/
def getPopulationSize(
rel: RelNode,
mq: RelMetadataQuery,
groupKey: ImmutableBitSet): JDouble = {
// if the keys are unique, return the row count; otherwise, we have
// no further information on which to return any legitimate value
// REVIEW zfong 4/11/06 - Broadbase code returns the product of each
// unique key, which would result in the population being larger
// than the total rows in the relnode
val unique = RelMdUtil.areColumnsDefinitelyUnique(mq, rel, groupKey)
if (unique) {
mq.getRowCount(rel)
} else {
null
}
}
}
object FlinkRelMdPopulationSize {
private val INSTANCE = new FlinkRelMdPopulationSize
val SOURCE: RelMetadataProvider = ReflectiveRelMetadataProvider.reflectiveSource(
BuiltInMethod.POPULATION_SIZE.method, INSTANCE)
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdPopulationSize.scala | Scala | apache-2.0 | 14,373 |
// - Project: scalajs-svgjs (https://github.com/jokade/scalajs-svgjs)
// Description: Bindings for svg.js Element
//
// Copyright (c) 2015 Johannes Kastner <jokade@karchedon.de>
// Distributed under the MIT License (see included file LICENSE)
package biz.enef.svgjs
import scala.scalajs.js
trait Element extends js.Object {
/**
* Returns the x-coordinate of the upper left corner.
*/
def x() : Float = js.native
/**
* Moves the element along the x-axis by its upper left corner.
*/
def x(x: Float) : Float = js.native
/**
* Returns the y-coordinate of the upper left corner.
*/
def y() : Float = js.native
/**
* Moves the element along the y-axis by its upper left corner.
*/
def y(y: Float) : Float = js.native
/**
* Returns the x-coordinate of the center.
*/
def cx() : Float = js.native
/**
* Moves the element along the x-axis by its center.
*/
def cx(x: Float) : Float = js.native
/**
* Returns the y-coordinate of the center.
*/
def cy() : Float = js.native
/**
* Moves the element along the y-axis by its center.
*/
def cy(y: Float) : Float = js.native
/**
* Move upper left corner of element to given x and y values
*/
def move(x: Float, y: Float) : Unit = js.native
/**
* Move element by its center to the given x and y values
*/
def center(x: Float, y: Float) : Unit = js.native
/**
* Returns the width of this element
*/
def width() : Float = js.native
/**
* Set the width of this element.
*/
def width(w: Float) : Float = js.native
/**
* Returns the height of this element
*/
def height() : Float = js.native
/**
* Set the height of this element.
*/
def height(h: Float) : Float = js.native
/**
* Set element size to given height and width
*/
def size(width: Float, height: Float) : Unit = js.native
/**
* Creates a new unlinked copy of this element.
*/
override def clone() : Element = js.native
/**
* Removes this element from the drawing
*/
def remove() : Element = js.native
/**
* Replaces this element with the specified element.
*/
def replace(elem: Element) : Element = js.native
/**
* Add element to given container and return self
*/
def addTo(parent: Container) : Element = js.native
/**
* Add element to given container and return container
*/
def putIn(parent: Container) : Container = js.native
/**
* Returns the parent document
*/
def doc() : Doc = js.native
/**
* Get the value of the specified attribute.
*/
def attr[T](name: String) : T = js.native
/**
* Set a single attribute
*/
def attr(name: String, value: js.Any) : Unit = js.native
/**
* Set multiple attributes at once
*/
def attr(attrs: js.Object) : Unit = js.native
/**
* Manage transformations
*/
def transform(transform: js.Object) : Unit = js.native
/**
* Returns the value of the specified style.
*/
def style(s: String) : String = js.native
/**
* Set a single style.
*/
def style(s: String, value: String) : Unit = js.native
/**
* Set multiple styles.
*/
def style(styles: js.Object) : Unit = js.native
/**
* Returns the ID of this element
*/
def id() : String = js.native
/**
* Set the ID of this element
*/
def id(id: String) : String = js.native
/**
* Get the bounding box for this element
*/
def bbox() : BBox = js.native
/**
* Get rect box for this element
*/
def rbox() : RBox = js.native
/**
* Checks whether the specified point is inside the bounding box of this element
*/
def inside(x: Float, y: Float) : Boolean = js.native
/**
* Show element
*/
def show() : Unit = js.native
/**
* Hide element
*/
def hide() : Unit = js.native
}
| jokade/scalajs-svgjs | src/main/scala/biz/enef/svgjs/Element.scala | Scala | mit | 3,839 |
package keystoneml.loaders
import java.io.FileInputStream
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import keystoneml.utils.{ImageMetadata, LabeledImage, RowColumnMajorByteArrayVectorizedImage}
/**
* Loads images from the CIFAR-10 Dataset.
*/
object CifarLoader {
// We hardcode this because these are properties of the CIFAR-10 dataset.
val nrow = 32
val ncol = 32
val nchan = 3
val labelSize = 1
def cifar10ToBufferedImage(cifar: Array[Byte]): RowColumnMajorByteArrayVectorizedImage = {
val byteLen = nrow*ncol*nchan
// Allocate some space for the rows.
require(cifar.length == byteLen, "CIFAR-10 Images MUST be 32x32x3.")
RowColumnMajorByteArrayVectorizedImage(cifar, ImageMetadata(nrow, ncol, nchan))
}
def loadLabeledImages(path: String): Seq[LabeledImage] = {
val imgCount = labelSize + nrow*ncol*nchan
val imageBytes = Array.fill[Byte](imgCount)(0x00)
var out = Array[LabeledImage]()
val inFile = new FileInputStream(path)
while(inFile.read(imageBytes, 0, imgCount) > 0) {
val img = cifar10ToBufferedImage(imageBytes.tail)
val label = imageBytes.head.toShort
val li = LabeledImage(img, label)
out = out :+ li
}
out
}
def apply(sc: SparkContext, path: String): RDD[LabeledImage] = {
val images = CifarLoader.loadLabeledImages(path)
sc.parallelize(images)
}
}
| amplab/keystone | src/main/scala/keystoneml/loaders/CifarLoader.scala | Scala | apache-2.0 | 1,406 |
package com.clarifi.reporting.ermine.syntax
import com.clarifi.reporting.Supply
import com.clarifi.reporting.ermine.parsing.Localized
import com.clarifi.reporting.ermine._
import com.clarifi.reporting.ermine.Diagnostic._
import com.clarifi.reporting.ermine.Kind.{ subKind }
import com.clarifi.reporting.ermine.Type.{ typeVars, allTypeVars, sub }
import com.clarifi.reporting.ermine.Term.{ termVars }
import scala.collection.immutable.List
import scalaz.Monad
import scalaz.Scalaz._
// misc.
case class ForeignClass(loc: Pos, cls: Class[_]) extends Located
case class ForeignMember(loc: Pos, name: String) extends Located
// body statements
sealed trait Statement extends Located {
def definedTerms: Set[TermVar] = Set()
def definedTypes: Set[TypeVar] = Set()
}
sealed trait Explicit {
def global: Global
def isType: Boolean
}
case class Single(global: Global, isType: Boolean) extends Explicit
case class Renaming(global: Global, l: Local, isType: Boolean) extends Explicit
object Explicit {
def lookup(g: Global, l: List[Explicit]): Option[Local] = l collect { case Renaming(n,l,_) if n == g => l} headOption
}
// header statements
// group 1)
case class ImportExportStatement(
loc: Pos,
export: Boolean,
module: String,
as: Option[String],
explicits: List[Explicit] = List(),
using: Boolean = false
) {
def exported(ty: Boolean, g: Global): Boolean =
if(using)
explicits.exists(e => e.global == g && e.isType == ty)
else
explicits.forall {
case Single(global, isType) => g != global || isType != ty
case _ => true
}
}
// group 2)
case class FieldStatement(loc: Pos, vs: List[TypeVar], ty: Type) extends Statement {
override def definedTypes = vs.toSet
}
sealed trait Block extends Statement {
def statements: List[Statement]
override def definedTerms = statements.foldLeft(Set():Set[TermVar])(_ ++ _.definedTerms)
override def definedTypes = statements.foldLeft(Set():Set[TypeVar])(_ ++ _.definedTypes)
}
case class PrivateBlock(loc: Pos, statements: List[Statement]) extends Block
case class DatabaseBlock(loc: Pos, dbName: String, statements: List[Statement]) extends Block
case class ForeignBlock(loc: Pos, statements: List[Statement]) extends Block
sealed trait ForeignStatement extends Statement
sealed trait TypeDef extends Statement {
def v: TypeVar
def kindArgs: List[KindVar]
def typeArgs: List[TypeVar]
def privateTerms: Set[TermVar] = Set()
def rho(k: Kind) = typeArgs.foldRight(k)((x,y) => ArrowK(loc.inferred, x.extract, y))
def asRho(k: Kind): TypeDef
def closeWith(s: List[TypeVar])(implicit su: Supply): TypeDef
override def definedTypes = Set(v)
def close(implicit su: Supply) = closeWith(List())
}
case class ClassBlock(
loc: Pos,
v: TypeVar,
kindArgs: List[KindVar],
typeArgs: List[TypeVar],
ctx: List[Type],
override val privateTerms: Set[TermVar],
statements: List[BindingStatement]
) extends Block with TypeDef {
def asRho(k: Kind) = ClassBlock(loc, v as rho(k), kindArgs, typeArgs, ctx, privateTerms, statements)
override def definedTypes = Set(v)
def closeWith(s: List[TypeVar])(implicit su: Supply) = {
val sp = v :: typeArgs ++ s
ClassBlock(loc, v, kindArgs, typeArgs, ctx.map(_.closeWith(sp)), privateTerms,
statements.map { case SigStatement(l,vs,t) => SigStatement(l, vs, t.closeWith(sp))
case t => t
}
)
}
}
// group 3)
// type foo args = body
case class TypeStatement(loc: Pos, v: TypeVar, kindArgs: List[KindVar], typeArgs: List[TypeVar], body: Type) extends TypeDef {
def asRho(k: Kind) = TypeStatement(loc, v as rho(k), kindArgs, typeArgs, body)
def closeWith(s: List[TypeVar])(implicit su: Supply) = TypeStatement(loc, v, kindArgs, typeArgs, body.closeWith(v :: typeArgs ++ s))
}
// data v args = constructors
case class DataStatement(loc: Pos, v: TypeVar, kindArgs: List[KindVar], typeArgs: List[TypeVar], constructors: List[(List[TypeVar], TermVar, List[Type])]) extends TypeDef {
override def definedTerms = constructors.map(_._2).toSet
def asRho(k: Kind) = DataStatement(loc, v as rho(k), kindArgs, typeArgs, constructors)
def closeWith(s: List[TypeVar])(implicit su: Supply) =
DataStatement(loc, v, kindArgs, typeArgs, constructors.map { case (es, u, l) => (es, u, l.map(_.closeWith(v :: typeArgs ++ s ++ es))) })
}
object TypeDef {
// perform strongly connected component analysis to infer better kinds
def typeDefComponents(xs: List[TypeDef]): List[List[TypeDef]] = {
val vm = xs.map(b => b.v.id -> b).toMap
val sccs = SCC.tarjan(vm.keySet.toList) { s => {
val vars = vm(s) match {
case DataStatement(_, _, _, typeArgs, cons) =>
cons.foldLeft(Vars() : TypeVars)((acc, c) => acc ++ (allTypeVars(c._3) -- c._1)) -- typeArgs
case TypeStatement(_, _, _, typeArgs, body) => allTypeVars(body) -- typeArgs
case ClassBlock(_, _, _, typeArgs, ctx, _, stmts) => (allTypeVars(ctx) ++ allTypeVars(stmts)) -- typeArgs
}
vars.toList.collect { case v if vm.contains(v.id) => v.id }
}}
sccs.reverse.map { xs => xs.toList.map(vm(_)) }
}
}
// foreign data "class" Foo a b c
case class ForeignDataStatement(loc: Pos, v: TypeVar, args: List[TypeVar], cls: ForeignClass) extends ForeignStatement {
override def definedTypes = Set(v)
}
// group 4)
sealed abstract class ForeignTermDef extends ForeignStatement {
def v: TermVar
}
// foreign method "member" foo : Type
case class ForeignMethodStatement(loc: Pos, v: TermVar, ty: Type, member: ForeignMember) extends ForeignTermDef {
override def definedTerms = Set(v)
}
// foreign function "class" "member" foo : Type
case class ForeignFunctionStatement(loc: Pos, v: TermVar, ty: Type, cls: ForeignClass, member: ForeignMember) extends ForeignTermDef {
override def definedTerms = Set(v)
}
// foreign value "class" "member" Foo : Type
case class ForeignValueStatement(loc: Pos, v: TermVar, ty: Type, cls: ForeignClass, member: ForeignMember) extends ForeignTermDef {
override def definedTerms = Set(v)
}
// foreign constructor foo : a -> b -> Foo a b c
case class ForeignConstructorStatement(loc: Pos, v: TermVar, ty: Type) extends ForeignTermDef {
override def definedTerms = Set(v)
}
case class ForeignSubtypeStatement(loc: Pos, v: TermVar, ty: Type) extends ForeignTermDef {
override def definedTerms = Set(v)
}
// table fully.qualified.name.foo : [Bar, Baz, Quux .. a]
case class TableStatement(loc: Pos, dbName: String, vs: List[(List[Name], TermVar)], ty: Type) extends Statement {
override def definedTerms = vs.map(_._2).toSet
}
// group 5)
// these can occur inside of a let binding
sealed abstract class BindingStatement extends Statement
case class SigStatement(loc: Pos, vs: List[TermVar], ty: Type) extends BindingStatement {
override def definedTerms = vs.toSet
}
case class TermStatement(loc: Pos, v: TermVar, pats: List[Pattern], body: Term) extends BindingStatement {
def binding = ImplicitBinding(loc, v, List(Alt(loc, pats, body)))
override def definedTerms = Set(v)
}
// TODO: allow these to appear in a let binding group as a BindingStatement. using them in a where is problematic since we never reassociate.
case class FixityStatement(loc: Pos, vs: List[Name], typeLevel: Boolean) extends Statement
object Statement {
implicit def statementHasTypeVars[A <: Statement]: HasTypeVars[A] = new HasTypeVars[A] {
def vars(stm: A) = stm match {
case SigStatement(_, vs, t) => typeVars(vs) ++ typeVars(t)
case TableStatement(_, _, vs, t) => typeVars(vs.map(_._2)) ++ typeVars(t)
case FieldStatement(_, _, ty) => typeVars(ty)
case TermStatement(_, v, pats, body) => typeVars(v) ++ typeVars(pats) ++ typeVars(body)
case PrivateBlock(_, ss) => typeVars(ss)
case ForeignBlock(_, ss) => typeVars(ss) // :List[Statement])
case DatabaseBlock(_, _, ss) => typeVars(ss)
case ForeignMethodStatement(_, v, t, _) => typeVars(v) ++ typeVars(t)
case ForeignFunctionStatement(_, v, t, _, _) => typeVars(v) ++ typeVars(t)
case ForeignValueStatement(_, v, t, _, _) => typeVars(v) ++ typeVars(t)
case ForeignConstructorStatement(_, v, t) => typeVars(v) ++ typeVars(t)
case ForeignSubtypeStatement(_, v, t) => typeVars(v) ++ typeVars(t)
case ClassBlock(_, v, _, typeArgs, ctx, _, body) => (typeVars(ctx) ++ typeVars(body)) -- (v :: typeArgs) //?
case TypeStatement(_, _, _, typeArgs, body) => typeVars(body) -- typeArgs
case DataStatement(_, v, _, typeArgs, cons) =>
cons.foldLeft(Vars() : TypeVars)((acc, tup) =>
acc ++ (typeVars(tup._3) -- tup._1)
) -- (v :: typeArgs)
case r : ForeignDataStatement => Vars()
case f : FixityStatement => Vars()
}
def allVars(stm: A) = stm match {
case SigStatement(_, vs, t) => allTypeVars(vs) ++ allTypeVars(t)
case TableStatement(_, _, vs, t) => allTypeVars(vs.map(_._2)) ++ allTypeVars(t)
case FieldStatement(_, _, ty) => allTypeVars(ty)
case TermStatement(_, v, pats, body) => allTypeVars(v) ++ allTypeVars(pats) ++ allTypeVars(body)
case PrivateBlock(_, ss) => allTypeVars(ss)
case ForeignBlock(_, ss) => allTypeVars(ss) // :List[Statement])
case DatabaseBlock(_, _, ss) => allTypeVars(ss)
case ForeignMethodStatement(_, v, t, _) => allTypeVars(v) ++ allTypeVars(t)
case ForeignFunctionStatement(_, v, t, _, _) => allTypeVars(v) ++ allTypeVars(t)
case ForeignValueStatement(_, v, t, _, _) => allTypeVars(v) ++ allTypeVars(t)
case ForeignConstructorStatement(_, v, t) => allTypeVars(v) ++ allTypeVars(t)
case ForeignSubtypeStatement(_, v, t) => allTypeVars(v) ++ allTypeVars(t)
case ClassBlock(_, v, _, typeArgs, ctx, _, body) => (typeVars(ctx) ++ typeVars(body)) -- (v :: typeArgs) //?
case TypeStatement(_, _, _, typeArgs, body) => allTypeVars(body) -- typeArgs
case DataStatement(_, v, _, typeArgs, cons) =>
cons.foldLeft(Vars() : TypeVars)((acc, tup) =>
acc ++ (typeVars(tup._3) -- tup._1)
) -- (v :: typeArgs)
case r : ForeignDataStatement => Vars()
case f : FixityStatement => Vars()
}
def sub(ks: PartialFunction[KindVar,Kind], ts: PartialFunction[TypeVar, Type], stm: A) = stm match {
case SigStatement(l, vs, t) =>
SigStatement(l, Type.sub(ks, ts, vs), Type.sub(ks, ts, t)).asInstanceOf[A]
case TableStatement(l, db, vs, t) =>
TableStatement(l, db, vs.map(_._1).zip(Type.sub(ks, ts, vs.map(_._2))), Type.sub(ks, ts, t)).asInstanceOf[A]
case FieldStatement(l, vs, ty) =>
FieldStatement(l, vs, ty.subst(ks, ts)).asInstanceOf[A]
case TermStatement(l, v, pats, body) =>
TermStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, pats), Type.sub(ks, ts, body)).asInstanceOf[A]
case PrivateBlock(l, ss) =>
PrivateBlock(l, Type.sub(ks, ts, ss)).asInstanceOf[A]
case ForeignBlock(l, ss) =>
ForeignBlock(l, Type.sub(ks, ts, ss)).asInstanceOf[A]
case DatabaseBlock(a, b, ss) =>
DatabaseBlock(a, b, Type.sub(ks, ts, ss)).asInstanceOf[A]
case ForeignMethodStatement(l, v, t, m) =>
ForeignMethodStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, t), m).asInstanceOf[A]
case ForeignFunctionStatement(l, v, t, c, m) =>
ForeignFunctionStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, t), c, m).asInstanceOf[A]
case ForeignValueStatement(l, v, t, c, m) =>
ForeignValueStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, t), c, m).asInstanceOf[A]
case ForeignConstructorStatement(l, v, t) =>
ForeignConstructorStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, t)).asInstanceOf[A]
case ForeignSubtypeStatement(l, v, t) =>
ForeignSubtypeStatement(l, Type.sub(ks, ts, v), Type.sub(ks, ts, t)).asInstanceOf[A]
case ClassBlock(l, v, kindArgs, typeArgs, ctx, privates, body) =>
ClassBlock(l, v, kindArgs, subKind(ks, typeArgs), Type.sub(ks, ts, ctx), privates, Type.sub(ks, ts, body)).asInstanceOf[A]
case TypeStatement(l, v, kindArgs, typeArgs, body) =>
TypeStatement(l, v, kindArgs, subKind(ks, typeArgs), Type.sub(ks, ts, body)).asInstanceOf[A]
case DataStatement(l, v, kindArgs, typeArgs, cons) =>
DataStatement(l, v, kindArgs, subKind(ks, typeArgs),
cons.map{ case (es, v, l) => (es map (subKind(ks, _)), v, Type.sub(ks, ts, l)) }).asInstanceOf[A]
case r : ForeignDataStatement => r.asInstanceOf[A]
case f : FixityStatement => f.asInstanceOf[A]
}
}
def implicitBindingSpan(
l: Loc,
v: TermVar,
alts: List[Alt],
ss: List[BindingStatement]
): (ImplicitBinding, List[BindingStatement]) = ss match {
case TermStatement(lp, vp, pats, body) :: ss if v == vp =>
implicitBindingSpan(l,v, Alt(lp, pats, body) :: alts, ss)
case _ => (ImplicitBinding(l, v, alts.reverse), ss)
}
def gatherBindings(ss: List[BindingStatement], is: List[ImplicitBinding] = List(), sigs: List[SigStatement] = List()): (List[ImplicitBinding], List[SigStatement]) = ss match {
case TermStatement(l, v, pats, body) :: ss =>
val (i, xs) = implicitBindingSpan(l, v, List(Alt(l, pats, body)), ss)
gatherBindings(xs, i :: is, sigs)
case (s : SigStatement) :: ss => gatherBindings(ss, is, s :: sigs)
case List() => (is, sigs)
}
def checkBindings[M[+_]:Monad:Diagnostic](
l: Pos,
is: List[ImplicitBinding],
ss: List[SigStatement]): M[Localized[(List[ImplicitBinding], List[ExplicitBinding])]] = {
val im = is.map(i => i.v -> i).toMap // Map[TermVar,ImplicitBinding]
val sl = for { s <- ss ; v <- s.vs } yield v -> s // List[(TermVar, SigStatement)]
// we need to check that the variables we've sigged are distinct.
for {
es <- sl.traverse[M,ExplicitBinding] { case (v, s) =>
for {
i <- im.get(v) match {
case Some(i) => i.pure[M]
case None => raise[M](s.loc, "missing definition")
}
} yield ExplicitBinding(i.loc, i.v, Annot.plain(i.loc, s.ty), i.alts)
}
/*
// Stolen from KindParsers and TypeParsers@125
n <- // need to figure this out
id <- // and this
val l = termNames.member()
val v = V(l, id, Some(n), Bound, ()) // todo: check this.
val r = for {old <- gets(l.get(_))
_ <- modify(l.set(_, Some(v)))
} yield modify(l.set(_, old))
u <- r
*/
_ <- Localized((), es.map(_.v.name.get.local)).distinct(l) // check that the explicits are distinct
} yield Localized( ((im -- es.map(_.v)).values.toList,es)
, is.map(_.v.name.get.local)
)
}
}
| ermine-language/ermine-legacy | src/main/scala/com/clarifi/reporting/ermine/syntax/Statement.scala | Scala | bsd-2-clause | 15,223 |
package org.broadinstitute.dsde.firecloud.service
import akka.http.scaladsl.model.HttpMethods._
final class ServiceSpecSpec extends ServiceSpec {
"allHttpMethodsExcept() works" in {
allHttpMethodsExcept(GET) should be(Seq(CONNECT, DELETE, HEAD, PATCH, POST, PUT, TRACE))
allHttpMethodsExcept(DELETE, POST) should be(Seq(CONNECT, GET, HEAD, PATCH, PUT, TRACE))
}
}
| broadinstitute/firecloud-orchestration | src/test/scala/org/broadinstitute/dsde/firecloud/service/ServiceSpecSpec.scala | Scala | bsd-3-clause | 378 |
package scala.tools.nsc
package settings
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.tools.testkit.AssertUtil.assertThrows
@RunWith(classOf[JUnit4])
class ScalaVersionTest {
// scala/bug#8711
@Test def versionUnparse(): Unit = {
val v = "2.11.3"
assertEquals(v, ScalaVersion(v).unparse)
assertEquals("2.11.3-RC4", ScalaVersion("2.11.3-rc4").unparse)
}
// scala/bug#9167
@Test def `version parses with rigor`(): Unit = {
import settings.{ SpecificScalaVersion => V }
import ScalaVersion._
// no-brainers
assertEquals(V(2,11,7,Final), ScalaVersion("2.11.7"))
assertEquals(V(2,11,7,Final), ScalaVersion("2.11.7-FINAL"))
assertEquals(V(2,11,7,Milestone(3)), ScalaVersion("2.11.7-M3"))
assertEquals(V(2,11,7,RC(3)), ScalaVersion("2.11.7-RC3"))
assertEquals(V(2,11,7,Development("devbuild")), ScalaVersion("2.11.7-devbuild"))
// partial-brainers
assertEquals(V(2,11,7,Milestone(3)), ScalaVersion("2.11.7-m3"))
assertEquals(V(2,11,7,RC(3)), ScalaVersion("2.11.7-rc3"))
assertEquals(V(2,11,7,Development("maybegood")), ScalaVersion("2.11.7-maybegood"))
assertEquals(V(2,11,7,Development("RCCola")), ScalaVersion("2.11.7-RCCola"))
assertEquals(V(2,11,7,Development("RC1.5")), ScalaVersion("2.11.7-RC1.5"))
assertEquals(V(2,11,7,Development("")), ScalaVersion("2.11.7-"))
assertEquals(V(2,11,7,Development("0.5")), ScalaVersion("2.11.7-0.5"))
assertEquals(V(2,11,7,Development("devbuild\\nt9167")), ScalaVersion("2.11.7-devbuild\\nt9167"))
assertEquals(V(2,11,7,Development("final")), ScalaVersion("2.11.7-final"))
// oh really
assertEquals(NoScalaVersion, ScalaVersion("none"))
assertEquals(AnyScalaVersion, ScalaVersion("any"))
assertThrows[NumberFormatException] { ScalaVersion("2.11.7.2") }
assertThrows[NumberFormatException] { ScalaVersion("2.11.7.beta") }
assertThrows[NumberFormatException] { ScalaVersion("2.x.7") }
assertThrows[NumberFormatException] { ScalaVersion("2.-11.7") }
assertThrows[NumberFormatException] { ScalaVersion("2. ") }
assertThrows[NumberFormatException] { ScalaVersion("2.1 .7") }
assertThrows[NumberFormatException] { ScalaVersion("2.") }
assertThrows[NumberFormatException] { ScalaVersion("2..") }
assertThrows[NumberFormatException] { ScalaVersion("2...") }
assertThrows[NumberFormatException] { ScalaVersion("2-") }
assertThrows[NumberFormatException] { ScalaVersion("2-.") } // scalacheck territory
assertThrows[NumberFormatException] { ScalaVersion("any.7") }
assertThrows[NumberFormatException] ( ScalaVersion("2.11-ok"), _ ==
"Bad version (2.11-ok) not major[.minor[.revision[-suffix]]]" )
}
// scala/bug#9377
@Test def `missing version is as good as none`(): Unit = {
assertEquals(NoScalaVersion, ScalaVersion(""))
}
}
| martijnhoekstra/scala | test/junit/scala/tools/nsc/settings/ScalaVersionTest.scala | Scala | apache-2.0 | 2,915 |
package io.abacus.tallyho.hyperloglog
import io.abacus.tallyho.pipeline.SimplePipeline
import com.clearspring.analytics.stream.cardinality.HyperLogLog
import com.twitter.algebird.HyperLogLog._
import com.twitter.algebird.{HLL, HyperLogLogMonoid}
trait HyperLogLogInterface {
def process(elem:String):Unit
def estimate:Long
}
// creates HLL with accuracy accuracy = 1.04/sqrt(2^log2m)
// m is the number of counters
class AlgebirdHLL() extends HyperLogLogInterface {
val hll = new HyperLogLogMonoid(12)
var sumHll = hll.zero
def process(elem:String) = {
val item = hll(elem.getBytes)
sumHll = hll.plus(sumHll, item)
}
def estimate = {
val approxSize = hll.sizeOf(sumHll)
approxSize.estimate
}
}
class StreamLibHLL() extends HyperLogLogInterface {
val hll = new HyperLogLog(12)
def estimate = hll.cardinality()
def process(elem:String) = hll.offer(elem)
}
class CardinalityEstimationPipeline[T<:HyperLogLogInterface](hll:T) extends SimplePipeline[String,String,Long] {
override def result = hll.estimate
override def step(elem: String): String = {
hll.process(elem)
elem
}
}
| non/tallyho | src/main/scala/io/abacus/tallyho/hyperloglog/HyperLogLog.scala | Scala | apache-2.0 | 1,155 |
package eclim.test.search
import eclim.test.{TestJava, TestScala}
class TestComplete {
def testScala(){
val test = new TestScala
test.scalaMethod1
}
def testJava(){
val test = new TestJava
test.javaMethod1
}
}
| euclio/eclim | org.eclim.sdt/test/eclim_unit_test_scala/src/eclim/test/search/TestSearch.scala | Scala | gpl-3.0 | 238 |
package io.ssc.trackthetrackers.analysis.preprocessing
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
case class RawLabeledThirdParty(domain: String, registrationOrg: String, registrationCountry: String, occurrences: Int,
occurrencesDynamic: Int, occurrencesStatic: Int,
occurrencesJavascript: Int, occurrencesIframe: Int, occurrencesImage: Int,
occurrencesLink: Int, category: String, comment: String, company: String, found: String,
certain: String, labelingComment: String, requiresTranslation: String) {
def asLabeledThirdParty() = {
LabeledThirdParty(domain, occurrences, occurrencesDynamic, occurrencesStatic, occurrencesJavascript,
occurrencesIframe, occurrencesImage, occurrencesLink, optionize(category),
optionize(registrationOrg), optionize(company))
}
def optionize(value: String): Option[String] = {
value match {
case "#" => None
case _ => Some(value)
}
}
}
case class LabeledThirdParty(domain: String, occurrences: Int, occurrencesDynamic: Int, occurrencesStatic: Int,
occurrencesJavascript: Int, occurrencesIframe: Int, occurrencesImage: Int,
occurrencesLink: Int, category: Option[String], registrationOrg: Option[String],
company: Option[String])
object LabeledThirdParties {
def retrieve(file: String, categories: Set[String] = Set())(implicit env: ExecutionEnvironment) = {
val labeledThirdParties = env.readCsvFile[RawLabeledThirdParty](file, fieldDelimiter = "\t")
.map { _.asLabeledThirdParty() }
.filter { labeledThirdParty =>
if (categories.isEmpty) { true } else {
labeledThirdParty.category.exists { categories.contains(_) }
}
}
.collect()
labeledThirdParties.sortBy { _.occurrences }
.reverse
}
}
object PlayWithLabeledThirdParties extends App {
implicit val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment
val parties = LabeledThirdParties.retrieve("/home/ssc/ownCloud/trackthetrackers/labeling/labeled-thirdparties.csv",
Set("Advertising", "Analytics", "Beacon", "Widget"))
parties.filter { party => party.company.isEmpty && party.registrationOrg.isDefined }
.foreach { party => println(party.domain + " " + party.registrationOrg.get) }
}
| sscdotopen/trackthetrackers | analysis/src/main/scala/io/ssc/trackthetrackers/analysis/preprocessing/LabeledThirdParties.scala | Scala | gpl-3.0 | 2,776 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import java.security.cert.X509Certificate
import org.specs2.mutable.Specification
import play.api.http.HeaderNames._
import play.api.http.HttpConfiguration
import play.api.i18n.Lang
import play.api.libs.typedmap.{ TypedKey, TypedMap }
import play.api.mvc.request.{ DefaultRequestFactory, RemoteConnection, RequestTarget }
class RequestHeaderSpec extends Specification {
"request header" should {
"convert to java" in {
"keep all the headers" in {
val rh = dummyRequestHeader("GET", "/", Headers(HOST -> "playframework.com"))
rh.asJava.getHeaders.contains(HOST) must beTrue
}
"keep the headers accessible case insensitively" in {
val rh = dummyRequestHeader("GET", "/", Headers(HOST -> "playframework.com"))
rh.asJava.getHeaders.contains("host") must beTrue
}
}
"have typed attributes" in {
"can set and get a single attribute" in {
val x = TypedKey[Int]("x")
dummyRequestHeader().withAttrs(TypedMap(x -> 3)).attrs(x) must_== 3
}
"can set two attributes and get one back" in {
val x = TypedKey[Int]("x")
val y = TypedKey[String]("y")
dummyRequestHeader().withAttrs(TypedMap(x -> 3, y -> "hello")).attrs(y) must_== "hello"
}
"getting a set attribute should be Some" in {
val x = TypedKey[Int]("x")
dummyRequestHeader().withAttrs(TypedMap(x -> 5)).attrs.get(x) must beSome(5)
}
"getting a nonexistent attribute should be None" in {
val x = TypedKey[Int]("x")
dummyRequestHeader().attrs.get(x) must beNone
}
"can add single attribute" in {
val x = TypedKey[Int]("x")
dummyRequestHeader().addAttr(x, 3).attrs(x) must_== 3
}
"keep current attributes when adding a new one" in {
val x = TypedKey[Int]
val y = TypedKey[String]
dummyRequestHeader().withAttrs(TypedMap(y -> "hello")).addAttr(x, 3).attrs(y) must_== "hello"
}
"overrides current attribute value" in {
val x = TypedKey[Int]
val y = TypedKey[String]
val requestHeader = dummyRequestHeader().withAttrs(TypedMap(y -> "hello"))
.addAttr(x, 3)
.addAttr(y, "white")
requestHeader.attrs(y) must_== "white"
requestHeader.attrs(x) must_== 3
}
}
"handle host" in {
"relative uri with host header" in {
val rh = dummyRequestHeader("GET", "/", Headers(HOST -> "playframework.com"))
rh.host must_== "playframework.com"
}
"absolute uri" in {
val rh = dummyRequestHeader("GET", "https://example.com/test", Headers(HOST -> "playframework.com"))
rh.host must_== "example.com"
}
"absolute uri with port" in {
val rh = dummyRequestHeader("GET", "https://example.com:8080/test", Headers(HOST -> "playframework.com"))
rh.host must_== "example.com:8080"
}
"absolute uri with port and invalid characters" in {
val rh = dummyRequestHeader("GET", "https://example.com:8080/classified-search/classifieds?version=GTI|V8", Headers(HOST -> "playframework.com"))
rh.host must_== "example.com:8080"
}
"relative uri with invalid characters" in {
val rh = dummyRequestHeader("GET", "/classified-search/classifieds?version=GTI|V8", Headers(HOST -> "playframework.com"))
rh.host must_== "playframework.com"
}
}
"parse accept languages" in {
"return an empty sequence when no accept languages specified" in {
dummyRequestHeader().acceptLanguages must beEmpty
}
"parse a single accept language" in {
accept("en") must contain(exactly(Lang("en")))
}
"parse a single accept language and country" in {
accept("en-US") must contain(exactly(Lang("en-US")))
}
"parse multiple accept languages" in {
accept("en-US, es") must contain(exactly(Lang("en-US"), Lang("es")).inOrder)
}
"sort accept languages by quality" in {
accept("en-US;q=0.8, es;q=0.7") must contain(exactly(Lang("en-US"), Lang("es")).inOrder)
accept("en-US;q=0.7, es;q=0.8") must contain(exactly(Lang("es"), Lang("en-US")).inOrder)
}
"default accept language quality to 1" in {
accept("en-US, es;q=0.7") must contain(exactly(Lang("en-US"), Lang("es")).inOrder)
accept("en-US;q=0.7, es") must contain(exactly(Lang("es"), Lang("en-US")).inOrder)
}
}
"deprecated copy method" in {
def checkRequestValues(
origReq: RequestHeader,
changeReq: RequestHeader => RequestHeader)(
id: Long = origReq.id,
uri: String = origReq.uri,
path: String = origReq.path,
method: String = origReq.method,
version: String = origReq.version,
queryString: Map[String, Seq[String]] = origReq.queryString,
headers: Headers = origReq.headers,
remoteAddress: String = origReq.remoteAddress,
secure: Boolean = origReq.secure,
clientCertificateChain: Option[Seq[X509Certificate]] = origReq.clientCertificateChain) = {
val newReq: RequestHeader = changeReq(origReq)
newReq.id must_== id
newReq.uri must_== uri
newReq.path must_== path
newReq.method must_== method
newReq.version must_== version
newReq.queryString must_== queryString
newReq.headers must_== headers
newReq.remoteAddress must_== remoteAddress
newReq.secure must_== secure
newReq.clientCertificateChain must_== clientCertificateChain
}
"must change request id" in {
checkRequestValues(dummyRequestHeader(), _.copy(id = 999L))(id = 999L)
}
"must change request uri" in {
checkRequestValues(dummyRequestHeader(), _.copy(uri = "/x/y/z"))(uri = "/x/y/z")
}
"must change request path" in {
checkRequestValues(dummyRequestHeader(), _.copy(path = "/x/y/z"))(path = "/x/y/z")
}
"must change request method" in {
checkRequestValues(dummyRequestHeader(), _.copy(method = "HELLO"))(method = "HELLO")
}
"must change request version" in {
checkRequestValues(dummyRequestHeader(), _.copy(version = "HTTP/9.9"))(version = "HTTP/9.9")
}
"must change request queryString" in {
checkRequestValues(dummyRequestHeader(), _.copy(queryString = Map("x" -> Seq("y", "z"))))(queryString = Map("x" -> Seq("y", "z")))
}
"must change request headers" in {
checkRequestValues(dummyRequestHeader(), _.copy(headers = new Headers(List(("x", "y")))))(headers = new Headers(List(("x", "y"))))
}
"must change request remoteAddress" in {
checkRequestValues(dummyRequestHeader(), _.copy(remoteAddress = "x"))(remoteAddress = "x")
}
"must change request secure" in {
checkRequestValues(dummyRequestHeader(), _.copy(secure = true))(secure = true)
}
"must change request client certificate chain" in {
// Too lazy to make a real object, so take advantage of Java's weak runtime checks
val ccc = Some("x").asInstanceOf[Option[Seq[X509Certificate]]]
checkRequestValues(dummyRequestHeader(), _.copy(clientCertificateChain = ccc))(clientCertificateChain = ccc)
}
}
}
private def accept(value: String) = dummyRequestHeader(
headers = Headers("Accept-Language" -> value)
).acceptLanguages
private def dummyRequestHeader(
requestMethod: String = "GET",
requestUri: String = "/",
headers: Headers = Headers()): RequestHeader = {
new DefaultRequestFactory(HttpConfiguration()).createRequestHeader(
connection = RemoteConnection("", false, None),
method = requestMethod,
target = RequestTarget(requestUri, "", Map.empty),
version = "",
headers = headers,
attrs = TypedMap.empty
)
}
}
| Shenker93/playframework | framework/src/play/src/test/scala/play/api/mvc/RequestHeaderSpec.scala | Scala | apache-2.0 | 7,972 |
/*
* Copyright 2013-2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.index
import java.util
import java.util.Map.Entry
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.data
import org.apache.accumulo.core.data.{Key, Value}
import org.geotools.data.Query
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.core.data.AccumuloConnectorCreator
import org.locationtech.geomesa.core.data.tables.RecordTable
import org.locationtech.geomesa.core.filter._
import org.locationtech.geomesa.core.index.FilterHelper.filterListAsAnd
import org.locationtech.geomesa.core.iterators.IteratorTrigger
import org.locationtech.geomesa.core.util.{SelfClosingBatchScanner, SelfClosingIterator}
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.identity.{FeatureId, Identifier}
import org.opengis.filter.{Filter, Id}
import scala.collection.JavaConverters._
object RecordIdxStrategy {
def getRecordIdxStrategy(filter: Filter, sft: SimpleFeatureType): Option[Strategy] =
if (filterIsId(filter)) Some(new RecordIdxStrategy) else None
def intersectIDFilters(filters: Seq[Filter]): Option[Id] = filters.size match {
case 0 => None // empty filter sequence
case 1 => Some(filters.head).map ( _.asInstanceOf[Id] ) // single filter
case _ => // multiple filters -- need to intersect
// get the Set of IDs in *each* filter and convert to a Scala immutable Set
val ids = filters.map ( _.asInstanceOf[Id].getIDs.asScala.toSet )
// take the intersection of all sets
val intersectionIDs = ids.reduceLeft ( _ intersect _ )
// convert back to a Option[Id]
if (intersectionIDs.isEmpty) None
else {
val newIDSet: util.Set[FeatureId] = intersectionIDs.map ( x => ff.featureId(x.toString) ).asJava
val newFilter = ff.id(newIDSet)
Some(newFilter)
}
}
}
class RecordIdxStrategy extends Strategy with Logging {
def execute(acc: AccumuloConnectorCreator,
iqp: QueryPlanner,
featureType: SimpleFeatureType,
query: Query,
output: ExplainerOutputType): SelfClosingIterator[Entry[Key, Value]] = {
val recordScanner = acc.createRecordScanner(featureType)
val qp = buildIDQueryPlan(query, iqp, featureType, output)
configureBatchScanner(recordScanner, qp)
SelfClosingBatchScanner(recordScanner)
}
def buildIDQueryPlan(query: Query,
iqp: QueryPlanner,
featureType: SimpleFeatureType,
output: ExplainerOutputType) = {
val schema = iqp.schema
val featureEncoding = iqp.featureEncoding
output(s"Searching the record table with filter ${query.getFilter}")
val (idFilters, oFilters) = partitionID(query.getFilter)
// recombine non-ID filters
val combinedOFilter = filterListAsAnd(oFilters)
// Multiple sets of IDs in a ID Filter are ORs. ANDs of these call for the intersection to be taken.
// intersect together all groups of ID Filters, producing Some[Id] if the intersection returns something
val combinedIDFilter: Option[Id] = RecordIdxStrategy.intersectIDFilters(idFilters)
val identifiers: Option[Set[Identifier]] = combinedIDFilter.map ( _.getIdentifiers.asScala.toSet )
val prefix = getTableSharingPrefix(featureType)
val rangesAsOption: Option[Set[data.Range]] = identifiers.map {
aSet => aSet.map {
id => org.apache.accumulo.core.data.Range.exact(RecordTable.getRowKey(prefix, id.toString))
}
}
// check that the Set of Ranges exists and is not empty
val ranges = rangesAsOption match {
case Some(filterSet) if filterSet.nonEmpty => filterSet
case _ =>
// TODO: for below instead pass empty query plan (https://geomesa.atlassian.net/browse/GEOMESA-347)
// need to log a warning message as the exception will be caught by hasNext in FeatureReaderIterator
logger.error(s"Filter ${query.getFilter} results in no valid range for record table")
throw new RuntimeException(s"Filter ${query.getFilter} results in no valid range for record table")
}
output(s"Extracted ID filter: ${combinedIDFilter.get}")
output(s"Extracted Other filters: $oFilters")
output(s"Setting ${ranges.size} ranges.")
val qp = QueryPlan(Seq(), ranges.toSeq, Seq())
// this should be done with care, ECQL -> Filter -> CQL is NOT a unitary transform
val ecql = combinedOFilter.map { ECQL.toCQL }
val iteratorConfig = IteratorTrigger.chooseIterator(ecql, query, featureType)
val sffiIterCfg = getSFFIIterCfg(iteratorConfig, featureType, ecql, schema, featureEncoding, query)
// TODO GEOMESA-322 use other strategies with density iterator
//val topIterCfg = getTopIterCfg(query, geometryToCover, schema, featureEncoder, featureType)
qp.copy(iterators = qp.iterators ++ List(sffiIterCfg).flatten)
}
}
| jwkessi/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/index/RecordIdxStrategy.scala | Scala | apache-2.0 | 5,673 |
package com.rasterfoundry.backsplash.server
import cats.MonoidK
import cats.data.OptionT
import cats.effect.Sync
import cats.implicits._
import org.http4s._
import scalacache._
import scalacache.caffeine._
import scalacache.modes.sync._
/** Shut down this instance of the tile server after n requests */
object QuotaMiddleware {
def apply[F[_]: Sync](routes: HttpRoutes[F], cache: CaffeineCache[Int])(
implicit F: MonoidK[OptionT[F, ?]]
) = HttpRoutes { req: Request[F] =>
{
val served = cache.get("requestsServed") getOrElse { 0 }
val incremented = served + 1
cache.put("requestsServed")(incremented, None)
F.empty[Response[F]]
} <+> routes.run(req)
}
}
| azavea/raster-foundry | app-backend/backsplash-server/src/main/scala/com/rasterfoundry/backsplash/middleware/QuotaMiddleware.scala | Scala | apache-2.0 | 701 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v2
import uk.gov.hmrc.ct.box.CtBoxIdentifier
abstract class J10 extends CtBoxIdentifier(name = "Tax Avoidance 10 Reference Number")
| scottcutts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600j/v2/J10.scala | Scala | apache-2.0 | 770 |
package de.htwg.zeta.persistence.actorCache
import java.util.UUID
import javax.inject.Inject
import javax.inject.Singleton
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.Failure
import scala.util.Success
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.routing.ConsistentHashingPool
import akka.routing.ConsistentHashingRouter.ConsistentHashMapping
import akka.util.Timeout
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Create
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Delete
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Read
import de.htwg.zeta.persistence.actorCache.LoginInfoCacheActor.Update
import de.htwg.zeta.persistence.authInfo.ZetaLoginInfo
import de.htwg.zeta.persistence.general.LoginInfoRepository
/**
* Actor Cache Implementation of LoginInfoPersistence.
*/
@Singleton
class ActorCacheLoginInfoRepository @Inject()(
underlying: LoginInfoRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
implicit val timeout: Timeout
) extends LoginInfoRepository {
private def hashMapping: ConsistentHashMapping = {
case Create(loginInfo, _) => loginInfo.hashCode
case Read(loginInfo) => loginInfo.hashCode
case Update(loginInfo, _) => loginInfo.hashCode
case Delete(loginInfo) => loginInfo.hashCode
}
private val router: ActorRef = system.actorOf(
ConsistentHashingPool(
nrOfInstances = numberActorsPerEntityType,
hashMapping = hashMapping
).props(
LoginInfoCacheActor.props(underlying, cacheDuration)
),
"LoginInfo"
)
/** Create a LoginInfo.
*
* @param loginInfo The LoginInfo.
* @param id The id of the user.
* @return Unit-Future, when successful.
*/
override def create(loginInfo: ZetaLoginInfo, id: UUID): Future[Unit] = {
(router ? Create(loginInfo, id)).flatMap {
case Success(()) => Future.successful(())
case Failure(e) => Future.failed(e)
}
}
/** Get a user that matches the specified LoginInfo.
*
* @param loginInfo The LoginInfo.
* @return The id of the User.
*/
override def read(loginInfo: ZetaLoginInfo): Future[UUID] = {
(router ? Read(loginInfo)).flatMap {
case Success(userId: UUID) => Future.successful(userId)
case Failure(e) => Future.failed(e)
}
}
/** Update a LoginInfo.
*
* @param old The LoginInfo to update.
* @param updated The updated LoginInfo.
* @return Unit-Future
*/
override def update(old: ZetaLoginInfo, updated: ZetaLoginInfo): Future[Unit] = {
(router ? Update(old, updated)).flatMap {
case Success(()) => Future.successful(())
case Failure(e) => Future.failed(e)
}
}
/** Delete a LoginInfo.
*
* @param loginInfo LoginInfo
* @return Unit-Future
*/
override def delete(loginInfo: ZetaLoginInfo): Future[Unit] = {
(router ? Delete(loginInfo)).flatMap {
case Success(()) => Future.successful(())
case Failure(e) => Future.failed(e)
}
}
/** Read all LoginInfo's.
*
* @return Future containing all LoginInfo's
*/
override def readAllKeys(): Future[Set[ZetaLoginInfo]] = {
underlying.readAllKeys()
}
}
| Zeta-Project/zeta | api/persistence/src/main/scala/de/htwg/zeta/persistence/actorCache/ActorCacheLoginInfoRepository.scala | Scala | bsd-2-clause | 3,366 |
package coursier.bootstrap.launcher
import utest._
import coursier.bootstrap.launcher.credentials.Credentials
import coursier.bootstrap.launcher.credentials.DirectCredentials
import coursier.paths.CachePath
import scala.jdk.CollectionConverters._
import java.nio.file.Files
import java.io.File
import java.nio.file.Path
import java.net.URL
import java.nio.file.Paths
import java.net.URI
import java.util.Collections
object DownloadTests extends TestSuite {
private val testRepository = Option(System.getenv("TEST_REPOSITORY"))
.orElse(sys.props.get("test.repository"))
.getOrElse(sys.error("TEST_REPOSITORY not set"))
private val testRepositoryUser = Option(System.getenv("TEST_REPOSITORY_USER"))
.orElse(sys.props.get("test.repository.user"))
.getOrElse(sys.error("TEST_REPOSITORY_USER not set"))
private val testRepositoryPassword = Option(System.getenv("TEST_REPOSITORY_PASSWORD"))
.orElse(sys.props.get("test.repository.password"))
.getOrElse(sys.error("TEST_REPOSITORY_PASSWORD not set"))
private val testRepositoryHost = new URI(testRepository).getHost
private def deleteRecursive(f: File): Unit = {
if (f.isDirectory)
f.listFiles().foreach(deleteRecursive)
if (f.exists())
f.delete()
}
private def withTmpDir[T](f: Path => T): T = {
val dir = Files.createTempDirectory("coursier-test")
val shutdownHook: Thread =
new Thread {
override def run() =
deleteRecursive(dir.toFile)
}
Runtime.getRuntime.addShutdownHook(shutdownHook)
try f(dir)
finally {
deleteRecursive(dir.toFile)
Runtime.getRuntime.removeShutdownHook(shutdownHook)
}
}
val tests = Tests {
test("public URL") {
withTmpDir { dir =>
val remoteUrls = Seq(
new URL(
"https://repo1.maven.org/maven2/com/chuusai/shapeless_2.12/2.3.3/shapeless_2.12-2.3.3.jar"
)
)
val download = new Download(1, dir.toFile, Collections.emptyList())
val localUrls = download.getLocalURLs(remoteUrls.asJava).asScala
assert(remoteUrls.size == localUrls.size)
assert(
localUrls
.map(url => Paths.get(url.toURI).toFile)
.foldLeft(true)(_ && _.exists)
)
}
}
test("private URL with credentials in the URL") {
withTmpDir { dir =>
val remoteUrls = Seq(
new URL(
s"http://$testRepositoryUser:$testRepositoryPassword@" +
s"$testRepository/com/abc/test/0.1/test-0.1.pom".stripPrefix("http://")
)
)
val download = new Download(1, dir.toFile, Collections.emptyList())
val localUrls = download.getLocalURLs(remoteUrls.asJava).asScala
assert(remoteUrls.size == localUrls.size)
assert(
localUrls
.map(url => Paths.get(url.toURI).toFile)
.foldLeft(true)(_ && _.exists)
)
}
}
test("priavte URL with configured credentials") {
withTmpDir { dir =>
val remoteUrls = Seq(
new URL(s"$testRepository/com/abc/test/0.1/test-0.1.pom")
)
val download = new Download(
1,
dir.toFile,
Collections.singletonList(
new DirectCredentials(testRepositoryHost, testRepositoryUser, testRepositoryPassword)
.withMatchHost(true)
.withHttpsOnly(false)
)
)
val localUrls = download.getLocalURLs(remoteUrls.asJava).asScala
assert(remoteUrls.size == localUrls.size)
assert(
localUrls
.map(url => Paths.get(url.toURI).toFile)
.foldLeft(true)(_ && _.exists)
)
}
}
}
}
| alexarchambault/coursier | modules/bootstrap-launcher/src/it/scala/coursier/bootstrap/launcher/DownloadTests.scala | Scala | apache-2.0 | 3,715 |
package com.olegych.scastie.api
object Shared {
val scalaJsHttpPathPrefix = "scalajs"
}
| OlegYch/scastie | api/src/main/scala/com.olegych.scastie.api/Shared.scala | Scala | apache-2.0 | 91 |
package ahlers.phantom.embedded
import ahlers.phantom.embedded.MockArtifactStores.newArtifactStore
import de.flapdoodle.embed.process.config.io.ProcessOutput
import de.flapdoodle.embed.process.io.{IStreamProcessor, Processors}
import org.scalatest._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.tagobjects.{Disk, Network}
import org.scalatest.time.{Millis, Seconds, Span}
import scala.concurrent.{Future, Promise}
/**
* @author [[mailto:michael@ahlers.consulting Michael Ahlers]]
*/
class PhantomSpec
extends FlatSpec
with Matchers
with ScalaFutures {
override implicit def patienceConfig: PatienceConfig = PatienceConfig(timeout = Span(5, Seconds), interval = Span(500, Millis))
PhantomVersion.values foreach { version =>
it must s"start $version, and receive commands" taggedAs(Network, Disk) in {
val message = "Hello, World!"
val outputProcessor = new FutureStreamProcessor(message)
val processOutput =
new ProcessOutput(
outputProcessor,
Processors.silent,
Processors.silent
)
val runtimeConfig =
new PhantomRuntimeConfigBuilder()
.defaults()
.artifactStore(newArtifactStore)
.processOutput(processOutput)
.build()
val starter =
PhantomStarter
.getInstance(runtimeConfig)
val config =
new PhantomProcessConfigBuilder()
.defaults()
.version(version)
.build()
val executable = starter.prepare(config)
val process = executable.start()
val console = process.getConsole
console.write(s"console.log('$message')\\n;")
console.flush()
outputProcessor.getOutput().futureValue should include(message)
process.stop()
}
}
class FutureStreamProcessor(message: String)
extends IStreamProcessor {
private val output: Promise[String] = Promise()
override def process(block: String): Unit =
if (block.contains(message)) output.success(block)
override def onProcessed(): Unit = ()
def getOutput(): Future[String] =
output.future
}
}
| michaelahlers/embedded-phantom | src/it/scala/ahlers/phantom/embedded/PhantomSpec.scala | Scala | mit | 2,167 |
package com.twitter.finagle.netty4
import com.twitter.finagle.util.{LoadService, ServiceLoadedTimer}
import org.scalatest.funsuite.AnyFunSuite
class Netty4HashedWheelTimerTest extends AnyFunSuite {
test("We can get an instance of Netty4HashedWheelTimer via the LoadService") {
LoadService[ServiceLoadedTimer]() match {
case Seq(timer) => assert(timer.isInstanceOf[Netty4HashedWheelTimer])
case other => fail(s"Expected a Netty4HashedWheelTimer, found $other")
}
}
}
| twitter/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/Netty4HashedWheelTimerTest.scala | Scala | apache-2.0 | 492 |
package models
import scala.slick.lifted.ProvenShape.proveShapeOf
import org.joda.time.DateTime
import models.Mappers.dateTimeSlickMapper
import play.api.Application
import play.api.Play.current
import play.api.db.slick.Config.driver.simple._
import play.api.db.slick.DB
import securesocial.core._
import securesocial.core.providers.Token
/** Implements the UserServicePlugin required to use the SecureSocial plugin on
* top of Slick. See http://securesocial.ws/guide/user-service.html */
class LoginUserService(application: Application) extends UserServicePlugin(application)
with LoginUsers.Queries with LoginTokens.Queries {
def saveHook(user: User)(implicit s: Session): Unit = {
import models._
import Role._
val person = Person(user.firstname, user.lastname, "", user.email)
val connection = Connection(s)
connection insert person
if(Query(connection.database()) hasRole person.id)
connection insert PersonRole(person.id, Role.Author)
()
}
}
case class User(
uid: String,
pid: String,
email: String,
firstname: String,
lastname: String,
authmethod: String,
hasher: Option[String],
password: Option[String],
salt: Option[String]
) {
def id: IdentityId = IdentityId(uid, pid)
def toIdentity: SocialUser = SocialUser(
identityId=IdentityId(uid, pid),
firstName=firstname,
lastName=lastname,
fullName=s"$firstname $lastname",
email=Some(email),
avatarUrl=None,
authMethod=AuthenticationMethod(authmethod),
oAuth1Info=None,
oAuth2Info=None,
passwordInfo= password.map(p => PasswordInfo(hasher.getOrElse(""), p, salt))
)
}
object User {
// IMPORTANT: At this point (i.email.get) we assume that the provider gives
// us an email, which is not the case for some of them (e.g. Twitter).
def fromIdentity(i: Identity) = User(
uid=i.identityId.userId,
pid=i.identityId.providerId,
email=i.email.get,
firstname=i.firstName,
lastname=i.lastName,
authmethod=i.authMethod.method,
hasher=i.passwordInfo.map(_.hasher),
password=i.passwordInfo.map(_.password),
salt=i.passwordInfo.map(_.salt).getOrElse(None)
)
}
class LoginUserTable(tag: Tag) extends Table[User](tag, "LOGINUSERS") {
def uid = column[String]("UID", O.DBType("TEXT"))
def pid = column[String]("PID", O.DBType("TEXT"))
def email = column[String]("EMAIL", O.DBType("TEXT"))
def firstname = column[String]("FIRSTNAME", O.DBType("TEXT"))
def lastname = column[String]("LASTNAME", O.DBType("TEXT"))
def authmethod = column[String]("AUTHMETHOD", O.DBType("TEXT"))
def hasher = column[Option[String]]("HASHER", O.DBType("TEXT"))
def password = column[Option[String]]("PASSWORD", O.DBType("TEXT"))
def salt = column[Option[String]]("SALT", O.DBType("TEXT"))
def pk = primaryKey("LOGINUSERS_PK", (uid, pid))
def * = (uid, pid, email, firstname, lastname, authmethod, hasher, password, salt) <> ((User.apply _).tupled, User.unapply)
}
object LoginUsers extends TableQuery(new LoginUserTable(_)) {
def UserByidentityId(identityId: IdentityId)(implicit s: Session) =
this.filter(user => (user.uid is identityId.userId) && (user.pid is identityId.providerId))
def userByEmailAndProvider(email: String, pid: String)(implicit s: Session) =
this.filter(user => (user.email is email) && (user.pid is pid))
def withEmail(email: String)(implicit s: Session) =
this.filter(_.email is email).list.headOption
trait Queries {
def saveHook(user: User)(implicit s: Session): Unit
def save(identity: Identity): Identity = {
DB withTransaction { implicit s: Session =>
val user = User.fromIdentity(identity)
saveHook(user)
find(user.id) match {
case None =>
LoginUsers.insert(user)
case Some(u) =>
UserByidentityId(u.identityId).update(user)
}
user.toIdentity
}
}
def find(identityId: IdentityId): Option[Identity] = {
DB withSession { implicit s: Session =>
UserByidentityId(identityId).firstOption.map(_.toIdentity)
}
}
def findByEmailAndProvider(email: String, pid: String): Option[Identity] = {
DB withSession { implicit s: Session =>
userByEmailAndProvider(email, pid).firstOption.map(_.toIdentity)
}
}
}
}
case class MyToken(
uuid: String,
email: String,
creationTime: DateTime,
expirationTime: DateTime,
isSignUp: Boolean,
isInvitation: Boolean
) {
def toT = Token(uuid, email, creationTime, expirationTime, isSignUp)
}
object MyToken {
def fromT(t: Token) = MyToken(t.uuid, t.email, t.creationTime, t.expirationTime, t.isSignUp, false)
}
class LoginTokenTable(tag: Tag) extends Table[MyToken](tag, "LOGINTOKENS") {
def uuid = column[String]("UUID", O.DBType("text"), O.PrimaryKey)
def email = column[String]("EMAIL", O.DBType("text"))
def creationTime = column[DateTime]("CREATIONTIME")
def expirationTime = column[DateTime]("EXPIRATIONTIME")
def isSignUp = column[Boolean]("ISSIGNUP")
def isInvitation = column[Boolean]("ISINVITATION")
def * = (uuid, email, creationTime, expirationTime, isSignUp, isInvitation) <> ((MyToken.apply _).tupled, MyToken.unapply)
}
object LoginTokens extends TableQuery(new LoginTokenTable(_)) {
def allInvitations(implicit s: Session) =
this.filter(_.isInvitation).list
def ins(myToken: MyToken)(implicit s: Session) =
LoginTokens.insert(myToken)
def del(uuid: String)(implicit s: Session) =
this.filter(_.uuid is uuid).delete
def withIdType(uuid: String)(implicit s: Session): Option[MyToken] =
this.filter(_.uuid is uuid).firstOption
trait Queries {
def deleteToken(uuid: String): Unit = {
DB withSession { implicit s: Session =>
del(uuid)
}; ()
}
def findToken(uuid: String): Option[Token] = {
DB withSession { implicit s: Session =>
withIdType(uuid).map(_.toT)
}
}
def save(token: Token): Unit = {
DB withTransaction { implicit s: Session =>
findToken(token.uuid) match {
case None => LoginTokens.insert(MyToken.fromT(token))
case Some(t) => LoginTokens.filter(_.uuid is t.uuid).update(MyToken.fromT(token))
}
}; ()
}
def deleteExpiredTokens(): Unit = {
DB withSession { implicit s: Session =>
LoginTokens.filter(_.expirationTime <= DateTime.now).delete
}; ()
}
}
}
| SlickChair/SlickChair | app/models/LoginUserService.scala | Scala | mit | 6,452 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.ElasticDsl._
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.SpanSugar._
import org.scalatest.{ FlatSpec, Matchers, OneInstancePerTest }
class ExplainTest
extends FlatSpec
with ElasticSugar
with Matchers
with OneInstancePerTest
with ScalaFutures {
override implicit def patienceConfig: ExplainTest.this.type#PatienceConfig = PatienceConfig(timeout = 10 seconds, interval = 1 seconds)
client.execute {
index into "queens/england" fields ("name" -> "qe2") id 8
}.await
refresh("queens")
blockUntilCount(1, "queens")
"an explain request" should "explain a matching document" in {
val response = client.execute {
explain id 8 in "queens/england" query termQuery("name", "qe2")
}.await
response.isMatch shouldBe true
val futureResponse = client.execute {
explain id 8 in "queens/england" query termQuery("name", "qe2")
}
whenReady(futureResponse) { response =>
response.isMatch shouldBe true
}
}
it should "explain a not matching document" in {
val response = client.execute {
explain id 24 in "queens/england" query termQuery("name", "qe2")
}.await
response.isMatch shouldBe false
val futureResponse = client.execute {
explain id 24 in "queens/england" query termQuery("name", "qe2")
}
whenReady(futureResponse) { response =>
response.isMatch shouldBe false
}
}
}
| alexander-svendsen/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/ExplainTest.scala | Scala | apache-2.0 | 1,493 |
package org.apache.predictionio.examples.experimental.trimapp
import org.apache.predictionio.controller.PDataSource
import org.apache.predictionio.controller.EmptyEvaluationInfo
import org.apache.predictionio.controller.EmptyActualResult
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.Event
import org.apache.predictionio.data.storage.Storage
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import com.github.nscala_time.time.Imports._
import grizzled.slf4j.Logger
case class DataSourceParams(
srcAppId: Int,
dstAppId: Int,
startTime: Option[DateTime],
untilTime: Option[DateTime]
) extends Params
class DataSource(val dsp: DataSourceParams)
extends PDataSource[TrainingData,
EmptyEvaluationInfo, Query, EmptyActualResult] {
@transient lazy val logger = Logger[this.type]
override
def readTraining(sc: SparkContext): TrainingData = {
val eventsDb = Storage.getPEvents()
logger.info(s"TrimApp: $dsp")
logger.info(s"Read events from appId ${dsp.srcAppId}")
val srcEvents: RDD[Event] = eventsDb.find(
appId = dsp.srcAppId,
startTime = dsp.startTime,
untilTime = dsp.untilTime
)(sc)
val dstEvents: Array[Event] = eventsDb.find(appId = dsp.dstAppId)(sc).take(1)
if (dstEvents.size > 0) {
throw new Exception(s"DstApp ${dsp.dstAppId} is not empty. Quitting.")
}
logger.info(s"Write events to appId ${dsp.dstAppId}")
eventsDb.write(srcEvents, dsp.dstAppId)(sc)
logger.info(s"Finish writing events to appId ${dsp.dstAppId}")
new TrainingData()
}
}
class TrainingData(
) extends Serializable {
override def toString = ""
}
| alex9311/PredictionIO | examples/experimental/scala-parallel-trim-app/src/main/scala/DataSource.scala | Scala | apache-2.0 | 1,737 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.net.URI
import java.util.Locale
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{QueryPlanningTracker, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog.{CatalogDatabase, CatalogStorageFormat, CatalogTable, CatalogTableType, InMemoryCatalog, SessionCatalog, TemporaryViewRelation}
import org.apache.spark.sql.catalyst.catalog.CatalogTable.VIEW_STORING_ANALYZED_PLAN
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf}
import org.apache.spark.sql.types.StructType
trait AnalysisTest extends PlanTest {
protected def extendedAnalysisRules: Seq[Rule[LogicalPlan]] = Nil
protected def createTempView(
catalog: SessionCatalog,
name: String,
plan: LogicalPlan,
overrideIfExists: Boolean): Unit = {
val identifier = TableIdentifier(name)
val metadata = createTempViewMetadata(identifier, plan.schema)
val viewDefinition = TemporaryViewRelation(metadata, Some(plan))
catalog.createTempView(name, viewDefinition, overrideIfExists)
}
protected def createGlobalTempView(
catalog: SessionCatalog,
name: String,
plan: LogicalPlan,
overrideIfExists: Boolean): Unit = {
val globalDb = Some(SQLConf.get.getConf(StaticSQLConf.GLOBAL_TEMP_DATABASE))
val identifier = TableIdentifier(name, globalDb)
val metadata = createTempViewMetadata(identifier, plan.schema)
val viewDefinition = TemporaryViewRelation(metadata, Some(plan))
catalog.createGlobalTempView(name, viewDefinition, overrideIfExists)
}
private def createTempViewMetadata(
identifier: TableIdentifier,
schema: StructType): CatalogTable = {
CatalogTable(
identifier = identifier,
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = schema,
properties = Map((VIEW_STORING_ANALYZED_PLAN, "true")))
}
protected def getAnalyzer: Analyzer = {
val catalog = new SessionCatalog(
new InMemoryCatalog, FunctionRegistry.builtin, TableFunctionRegistry.builtin)
catalog.createDatabase(
CatalogDatabase("default", "", new URI("loc"), Map.empty),
ignoreIfExists = false)
createTempView(catalog, "TaBlE", TestRelations.testRelation, overrideIfExists = true)
createTempView(catalog, "TaBlE2", TestRelations.testRelation2, overrideIfExists = true)
createTempView(catalog, "TaBlE3", TestRelations.testRelation3, overrideIfExists = true)
createGlobalTempView(catalog, "TaBlE4", TestRelations.testRelation4, overrideIfExists = true)
createGlobalTempView(catalog, "TaBlE5", TestRelations.testRelation5, overrideIfExists = true)
new Analyzer(catalog) {
override val extendedResolutionRules = extendedAnalysisRules
}
}
protected def checkAnalysis(
inputPlan: LogicalPlan,
expectedPlan: LogicalPlan,
caseSensitive: Boolean = true): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val analyzer = getAnalyzer
val actualPlan = analyzer.executeAndCheck(inputPlan, new QueryPlanningTracker)
comparePlans(EliminateSubqueryAliases(actualPlan), expectedPlan)
}
}
protected def checkAnalysisWithoutViewWrapper(
inputPlan: LogicalPlan,
expectedPlan: LogicalPlan,
caseSensitive: Boolean = true): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val actualPlan = getAnalyzer.executeAndCheck(inputPlan, new QueryPlanningTracker)
val transformed = EliminateSubqueryAliases(actualPlan) transformUp {
case v: View if v.isTempViewStoringAnalyzedPlan => v.child
}
comparePlans(transformed, expectedPlan)
}
}
protected override def comparePlans(
plan1: LogicalPlan,
plan2: LogicalPlan,
checkAnalysis: Boolean = false): Unit = {
// Analysis tests may have not been fully resolved, so skip checkAnalysis.
super.comparePlans(plan1, plan2, checkAnalysis)
}
protected def assertAnalysisSuccess(
inputPlan: LogicalPlan,
caseSensitive: Boolean = true): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val analyzer = getAnalyzer
val analysisAttempt = analyzer.execute(inputPlan)
try analyzer.checkAnalysis(analysisAttempt) catch {
case a: AnalysisException =>
fail(
s"""
|Failed to Analyze Plan
|$inputPlan
|
|Partial Analysis
|$analysisAttempt
""".stripMargin, a)
}
}
}
protected def assertAnalysisError(
inputPlan: LogicalPlan,
expectedErrors: Seq[String],
caseSensitive: Boolean = true): Unit = {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val analyzer = getAnalyzer
val e = intercept[AnalysisException] {
analyzer.checkAnalysis(analyzer.execute(inputPlan))
}
if (!expectedErrors.map(_.toLowerCase(Locale.ROOT)).forall(
e.getMessage.toLowerCase(Locale.ROOT).contains)) {
fail(
s"""Exception message should contain the following substrings:
|
| ${expectedErrors.mkString("\\n ")}
|
|Actual exception message:
|
| ${e.getMessage}
""".stripMargin)
}
}
}
protected def interceptParseException(
parser: String => Any)(sqlCommand: String, messages: String*): Unit = {
val e = intercept[ParseException](parser(sqlCommand))
messages.foreach { message =>
assert(e.message.contains(message))
}
}
}
| maropu/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisTest.scala | Scala | apache-2.0 | 6,691 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector.catalog
import java.time.{Instant, ZoneId}
import java.time.temporal.ChronoUnit
import java.util
import java.util.OptionalLong
import scala.collection.mutable
import org.scalatest.Assertions._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, JoinedRow}
import org.apache.spark.sql.catalyst.util.{CharVarcharUtils, DateTimeUtils}
import org.apache.spark.sql.connector.distributions.{Distribution, Distributions}
import org.apache.spark.sql.connector.expressions._
import org.apache.spark.sql.connector.metric.{CustomMetric, CustomTaskMetric}
import org.apache.spark.sql.connector.read._
import org.apache.spark.sql.connector.write._
import org.apache.spark.sql.connector.write.streaming.{StreamingDataWriterFactory, StreamingWrite}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.unsafe.types.UTF8String
/**
* A simple in-memory table. Rows are stored as a buffered group produced by each output task.
*/
class InMemoryTable(
val name: String,
val schema: StructType,
override val partitioning: Array[Transform],
override val properties: util.Map[String, String],
val distribution: Distribution = Distributions.unspecified(),
val ordering: Array[SortOrder] = Array.empty,
val numPartitions: Option[Int] = None)
extends Table with SupportsRead with SupportsWrite with SupportsDelete
with SupportsMetadataColumns {
private object PartitionKeyColumn extends MetadataColumn {
override def name: String = "_partition"
override def dataType: DataType = StringType
override def comment: String = "Partition key used to store the row"
}
private object IndexColumn extends MetadataColumn {
override def name: String = "index"
override def dataType: DataType = IntegerType
override def comment: String = "Metadata column used to conflict with a data column"
}
// purposely exposes a metadata column that conflicts with a data column in some tests
override val metadataColumns: Array[MetadataColumn] = Array(IndexColumn, PartitionKeyColumn)
private val metadataColumnNames = metadataColumns.map(_.name).toSet -- schema.map(_.name)
private val allowUnsupportedTransforms =
properties.getOrDefault("allow-unsupported-transforms", "false").toBoolean
partitioning.foreach {
case _: IdentityTransform =>
case _: YearsTransform =>
case _: MonthsTransform =>
case _: DaysTransform =>
case _: HoursTransform =>
case _: BucketTransform =>
case _: SortedBucketTransform =>
case t if !allowUnsupportedTransforms =>
throw new IllegalArgumentException(s"Transform $t is not a supported transform")
}
// The key `Seq[Any]` is the partition values.
val dataMap: mutable.Map[Seq[Any], BufferedRows] = mutable.Map.empty
def data: Array[BufferedRows] = dataMap.values.toArray
def rows: Seq[InternalRow] = dataMap.values.flatMap(_.rows).toSeq
private val partCols: Array[Array[String]] = partitioning.flatMap(_.references).map { ref =>
schema.findNestedField(ref.fieldNames(), includeCollections = false) match {
case Some(_) => ref.fieldNames()
case None => throw new IllegalArgumentException(s"${ref.describe()} does not exist.")
}
}
private val UTC = ZoneId.of("UTC")
private val EPOCH_LOCAL_DATE = Instant.EPOCH.atZone(UTC).toLocalDate
private def getKey(row: InternalRow): Seq[Any] = {
@scala.annotation.tailrec
def extractor(
fieldNames: Array[String],
schema: StructType,
row: InternalRow): (Any, DataType) = {
val index = schema.fieldIndex(fieldNames(0))
val value = row.toSeq(schema).apply(index)
if (fieldNames.length > 1) {
(value, schema(index).dataType) match {
case (row: InternalRow, nestedSchema: StructType) =>
extractor(fieldNames.drop(1), nestedSchema, row)
case (_, dataType) =>
throw new IllegalArgumentException(s"Unsupported type, ${dataType.simpleString}")
}
} else {
(value, schema(index).dataType)
}
}
val cleanedSchema = CharVarcharUtils.replaceCharVarcharWithStringInSchema(schema)
partitioning.map {
case IdentityTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row)._1
case YearsTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days: Int, DateType) =>
ChronoUnit.YEARS.between(EPOCH_LOCAL_DATE, DateTimeUtils.daysToLocalDate(days))
case (micros: Long, TimestampType) =>
val localDate = DateTimeUtils.microsToInstant(micros).atZone(UTC).toLocalDate
ChronoUnit.YEARS.between(EPOCH_LOCAL_DATE, localDate)
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case MonthsTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days: Int, DateType) =>
ChronoUnit.MONTHS.between(EPOCH_LOCAL_DATE, DateTimeUtils.daysToLocalDate(days))
case (micros: Long, TimestampType) =>
val localDate = DateTimeUtils.microsToInstant(micros).atZone(UTC).toLocalDate
ChronoUnit.MONTHS.between(EPOCH_LOCAL_DATE, localDate)
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case DaysTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days, DateType) =>
days
case (micros: Long, TimestampType) =>
ChronoUnit.DAYS.between(Instant.EPOCH, DateTimeUtils.microsToInstant(micros))
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case HoursTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (micros: Long, TimestampType) =>
ChronoUnit.HOURS.between(Instant.EPOCH, DateTimeUtils.microsToInstant(micros))
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case BucketTransform(numBuckets, cols, _) =>
val valueTypePairs = cols.map(col => extractor(col.fieldNames, cleanedSchema, row))
var valueHashCode = 0
valueTypePairs.foreach( pair =>
if ( pair._1 != null) valueHashCode += pair._1.hashCode()
)
var dataTypeHashCode = 0
valueTypePairs.foreach(dataTypeHashCode += _._2.hashCode())
((valueHashCode + 31 * dataTypeHashCode) & Integer.MAX_VALUE) % numBuckets
}
}
protected def addPartitionKey(key: Seq[Any]): Unit = {}
protected def renamePartitionKey(
partitionSchema: StructType,
from: Seq[Any],
to: Seq[Any]): Boolean = {
val rows = dataMap.remove(from).getOrElse(new BufferedRows(from))
val newRows = new BufferedRows(to)
rows.rows.foreach { r =>
val newRow = new GenericInternalRow(r.numFields)
for (i <- 0 until r.numFields) newRow.update(i, r.get(i, schema(i).dataType))
for (i <- 0 until partitionSchema.length) {
val j = schema.fieldIndex(partitionSchema(i).name)
newRow.update(j, to(i))
}
newRows.withRow(newRow)
}
dataMap.put(to, newRows).foreach { _ =>
throw new IllegalStateException(
s"The ${to.mkString("[", ", ", "]")} partition exists already")
}
true
}
protected def removePartitionKey(key: Seq[Any]): Unit = dataMap.synchronized {
dataMap.remove(key)
}
protected def createPartitionKey(key: Seq[Any]): Unit = dataMap.synchronized {
if (!dataMap.contains(key)) {
val emptyRows = new BufferedRows(key)
val rows = if (key.length == schema.length) {
emptyRows.withRow(InternalRow.fromSeq(key))
} else emptyRows
dataMap.put(key, rows)
}
}
protected def clearPartition(key: Seq[Any]): Unit = dataMap.synchronized {
assert(dataMap.contains(key))
dataMap(key).clear()
}
def withData(data: Array[BufferedRows]): InMemoryTable = dataMap.synchronized {
data.foreach(_.rows.foreach { row =>
val key = getKey(row)
dataMap += dataMap.get(key)
.map(key -> _.withRow(row))
.getOrElse(key -> new BufferedRows(key).withRow(row))
addPartitionKey(key)
})
this
}
override def capabilities: util.Set[TableCapability] = util.EnumSet.of(
TableCapability.BATCH_READ,
TableCapability.BATCH_WRITE,
TableCapability.STREAMING_WRITE,
TableCapability.OVERWRITE_BY_FILTER,
TableCapability.OVERWRITE_DYNAMIC,
TableCapability.TRUNCATE)
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new InMemoryScanBuilder(schema)
}
class InMemoryScanBuilder(tableSchema: StructType) extends ScanBuilder
with SupportsPushDownRequiredColumns {
private var schema: StructType = tableSchema
override def build: Scan =
new InMemoryBatchScan(data.map(_.asInstanceOf[InputPartition]), schema, tableSchema)
override def pruneColumns(requiredSchema: StructType): Unit = {
val schemaNames = metadataColumnNames ++ tableSchema.map(_.name)
schema = StructType(requiredSchema.filter(f => schemaNames.contains(f.name)))
}
}
case class InMemoryStats(sizeInBytes: OptionalLong, numRows: OptionalLong) extends Statistics
case class InMemoryBatchScan(
var data: Seq[InputPartition],
readSchema: StructType,
tableSchema: StructType)
extends Scan with Batch with SupportsRuntimeFiltering with SupportsReportStatistics {
override def toBatch: Batch = this
override def estimateStatistics(): Statistics = {
if (data.isEmpty) {
return InMemoryStats(OptionalLong.of(0L), OptionalLong.of(0L))
}
val inputPartitions = data.map(_.asInstanceOf[BufferedRows])
val numRows = inputPartitions.map(_.rows.size).sum
// we assume an average object header is 12 bytes
val objectHeaderSizeInBytes = 12L
val rowSizeInBytes = objectHeaderSizeInBytes + schema.defaultSize
val sizeInBytes = numRows * rowSizeInBytes
InMemoryStats(OptionalLong.of(sizeInBytes), OptionalLong.of(numRows))
}
override def planInputPartitions(): Array[InputPartition] = data.toArray
override def createReaderFactory(): PartitionReaderFactory = {
val metadataColumns = readSchema.map(_.name).filter(metadataColumnNames.contains)
val nonMetadataColumns = readSchema.filterNot(f => metadataColumns.contains(f.name))
new BufferedRowsReaderFactory(metadataColumns, nonMetadataColumns, tableSchema)
}
override def filterAttributes(): Array[NamedReference] = {
val scanFields = readSchema.fields.map(_.name).toSet
partitioning.flatMap(_.references)
.filter(ref => scanFields.contains(ref.fieldNames.mkString(".")))
}
override def filter(filters: Array[Filter]): Unit = {
if (partitioning.length == 1) {
filters.foreach {
case In(attrName, values) if attrName == partitioning.head.name =>
val matchingKeys = values.map(_.toString).toSet
data = data.filter(partition => {
val key = partition.asInstanceOf[BufferedRows].keyString
matchingKeys.contains(key)
})
case _ => // skip
}
}
}
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
InMemoryTable.maybeSimulateFailedTableWrite(new CaseInsensitiveStringMap(properties))
InMemoryTable.maybeSimulateFailedTableWrite(info.options)
new WriteBuilder with SupportsTruncate with SupportsOverwrite with SupportsDynamicOverwrite {
private var writer: BatchWrite = Append
private var streamingWriter: StreamingWrite = StreamingAppend
override def truncate(): WriteBuilder = {
assert(writer == Append)
writer = TruncateAndAppend
streamingWriter = StreamingTruncateAndAppend
this
}
override def overwrite(filters: Array[Filter]): WriteBuilder = {
assert(writer == Append)
writer = new Overwrite(filters)
streamingWriter = new StreamingNotSupportedOperation(s"overwrite ($filters)")
this
}
override def overwriteDynamicPartitions(): WriteBuilder = {
assert(writer == Append)
writer = DynamicOverwrite
streamingWriter = new StreamingNotSupportedOperation("overwriteDynamicPartitions")
this
}
override def build(): Write = new Write with RequiresDistributionAndOrdering {
override def requiredDistribution: Distribution = distribution
override def requiredOrdering: Array[SortOrder] = ordering
override def requiredNumPartitions(): Int = {
numPartitions.getOrElse(0)
}
override def toBatch: BatchWrite = writer
override def toStreaming: StreamingWrite = streamingWriter match {
case exc: StreamingNotSupportedOperation => exc.throwsException()
case s => s
}
override def supportedCustomMetrics(): Array[CustomMetric] = {
Array(new InMemorySimpleCustomMetric)
}
}
}
}
private abstract class TestBatchWrite extends BatchWrite {
override def createBatchWriterFactory(info: PhysicalWriteInfo): DataWriterFactory = {
BufferedRowsWriterFactory
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {}
}
private object Append extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private object DynamicOverwrite extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
val newData = messages.map(_.asInstanceOf[BufferedRows])
dataMap --= newData.flatMap(_.rows.map(getKey))
withData(newData)
}
}
private class Overwrite(filters: Array[Filter]) extends TestBatchWrite {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
val deleteKeys = InMemoryTable.filtersToKeys(
dataMap.keys, partCols.map(_.toSeq.quoted), filters)
dataMap --= deleteKeys
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private object TruncateAndAppend extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
dataMap.clear
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private abstract class TestStreamingWrite extends StreamingWrite {
def createStreamingWriterFactory(info: PhysicalWriteInfo): StreamingDataWriterFactory = {
BufferedRowsWriterFactory
}
def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
}
private class StreamingNotSupportedOperation(operation: String) extends TestStreamingWrite {
override def createStreamingWriterFactory(info: PhysicalWriteInfo): StreamingDataWriterFactory =
throwsException()
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit =
throwsException()
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit =
throwsException()
def throwsException[T](): T = throw new IllegalStateException("The operation " +
s"${operation} isn't supported for streaming query.")
}
private object StreamingAppend extends TestStreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
dataMap.synchronized {
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
}
private object StreamingTruncateAndAppend extends TestStreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
dataMap.synchronized {
dataMap.clear
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
}
override def canDeleteWhere(filters: Array[Filter]): Boolean = {
InMemoryTable.supportsFilters(filters)
}
override def deleteWhere(filters: Array[Filter]): Unit = dataMap.synchronized {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper
dataMap --= InMemoryTable.filtersToKeys(dataMap.keys, partCols.map(_.toSeq.quoted), filters)
}
}
object InMemoryTable {
val SIMULATE_FAILED_WRITE_OPTION = "spark.sql.test.simulateFailedWrite"
def filtersToKeys(
keys: Iterable[Seq[Any]],
partitionNames: Seq[String],
filters: Array[Filter]): Iterable[Seq[Any]] = {
keys.filter { partValues =>
filters.flatMap(splitAnd).forall {
case EqualTo(attr, value) =>
value == extractValue(attr, partitionNames, partValues)
case EqualNullSafe(attr, value) =>
val attrVal = extractValue(attr, partitionNames, partValues)
if (attrVal == null && value === null) {
true
} else if (attrVal == null || value === null) {
false
} else {
value == attrVal
}
case IsNull(attr) =>
null == extractValue(attr, partitionNames, partValues)
case IsNotNull(attr) =>
null != extractValue(attr, partitionNames, partValues)
case AlwaysTrue() => true
case f =>
throw new IllegalArgumentException(s"Unsupported filter type: $f")
}
}
}
def supportsFilters(filters: Array[Filter]): Boolean = {
filters.flatMap(splitAnd).forall {
case _: EqualTo => true
case _: EqualNullSafe => true
case _: IsNull => true
case _: IsNotNull => true
case _: AlwaysTrue => true
case _ => false
}
}
private def extractValue(
attr: String,
partFieldNames: Seq[String],
partValues: Seq[Any]): Any = {
partFieldNames.zipWithIndex.find(_._1 == attr) match {
case Some((_, partIndex)) =>
partValues(partIndex)
case _ =>
throw new IllegalArgumentException(s"Unknown filter attribute: $attr")
}
}
private def splitAnd(filter: Filter): Seq[Filter] = {
filter match {
case And(left, right) => splitAnd(left) ++ splitAnd(right)
case _ => filter :: Nil
}
}
def maybeSimulateFailedTableWrite(tableOptions: CaseInsensitiveStringMap): Unit = {
if (tableOptions.getBoolean(SIMULATE_FAILED_WRITE_OPTION, false)) {
throw new IllegalStateException("Manual write to table failure.")
}
}
}
class BufferedRows(val key: Seq[Any] = Seq.empty) extends WriterCommitMessage
with InputPartition with HasPartitionKey with Serializable {
val rows = new mutable.ArrayBuffer[InternalRow]()
def withRow(row: InternalRow): BufferedRows = {
rows.append(row)
this
}
def keyString(): String = key.toArray.mkString("/")
override def partitionKey(): InternalRow = {
InternalRow.fromSeq(key)
}
def clear(): Unit = rows.clear()
}
private class BufferedRowsReaderFactory(
metadataColumnNames: Seq[String],
nonMetaDataColumns: Seq[StructField],
tableSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
new BufferedRowsReader(partition.asInstanceOf[BufferedRows], metadataColumnNames,
nonMetaDataColumns, tableSchema)
}
}
private class BufferedRowsReader(
partition: BufferedRows,
metadataColumnNames: Seq[String],
nonMetadataColumns: Seq[StructField],
tableSchema: StructType) extends PartitionReader[InternalRow] {
private def addMetadata(row: InternalRow): InternalRow = {
val metadataRow = new GenericInternalRow(metadataColumnNames.map {
case "index" => index
case "_partition" => UTF8String.fromString(partition.keyString)
}.toArray)
new JoinedRow(row, metadataRow)
}
private var index: Int = -1
override def next(): Boolean = {
index += 1
index < partition.rows.length
}
override def get(): InternalRow = {
val originalRow = partition.rows(index)
val values = new Array[Any](nonMetadataColumns.length)
nonMetadataColumns.zipWithIndex.foreach { case (col, idx) =>
values(idx) = extractFieldValue(col, tableSchema, originalRow)
}
addMetadata(new GenericInternalRow(values))
}
override def close(): Unit = {}
private def extractFieldValue(
field: StructField,
schema: StructType,
row: InternalRow): Any = {
val index = schema.fieldIndex(field.name)
field.dataType match {
case StructType(fields) =>
if (row.isNullAt(index)) {
return null
}
val childRow = row.toSeq(schema)(index).asInstanceOf[InternalRow]
val childSchema = schema(index).dataType.asInstanceOf[StructType]
val resultValue = new Array[Any](fields.length)
fields.zipWithIndex.foreach { case (childField, idx) =>
val childValue = extractFieldValue(childField, childSchema, childRow)
resultValue(idx) = childValue
}
new GenericInternalRow(resultValue)
case dt =>
row.get(index, dt)
}
}
}
private object BufferedRowsWriterFactory extends DataWriterFactory with StreamingDataWriterFactory {
override def createWriter(partitionId: Int, taskId: Long): DataWriter[InternalRow] = {
new BufferWriter
}
override def createWriter(
partitionId: Int,
taskId: Long,
epochId: Long): DataWriter[InternalRow] = {
new BufferWriter
}
}
private class BufferWriter extends DataWriter[InternalRow] {
private val buffer = new BufferedRows
override def write(row: InternalRow): Unit = buffer.rows.append(row.copy())
override def commit(): WriterCommitMessage = buffer
override def abort(): Unit = {}
override def close(): Unit = {}
override def currentMetricsValues(): Array[CustomTaskMetric] = {
val metric = new CustomTaskMetric {
override def name(): String = "in_memory_buffer_rows"
override def value(): Long = buffer.rows.size
}
Array(metric)
}
}
class InMemorySimpleCustomMetric extends CustomMetric {
override def name(): String = "in_memory_buffer_rows"
override def description(): String = "number of rows in buffer"
override def aggregateTaskMetrics(taskMetrics: Array[Long]): String = {
s"in-memory rows: ${taskMetrics.sum}"
}
}
| WeichenXu123/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryTable.scala | Scala | apache-2.0 | 23,551 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io._
import java.net.URL
import java.nio.file.{Files, Paths, StandardCopyOption}
import java.sql.{Date, Timestamp}
import java.util.{Locale, UUID}
import scala.collection.JavaConverters._
import org.apache.avro.{AvroTypeException, Schema, SchemaBuilder}
import org.apache.avro.Schema.{Field, Type}
import org.apache.avro.Schema.Type._
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.generic.GenericData.{EnumSymbol, Fixed}
import org.apache.commons.io.FileUtils
import org.apache.spark.{SPARK_VERSION_SHORT, SparkConf, SparkException, SparkUpgradeException}
import org.apache.spark.TestUtils.assertExceptionMsg
import org.apache.spark.sql._
import org.apache.spark.sql.TestingUDT.IntervalData
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.{withDefaultTimeZone, LA, UTC}
import org.apache.spark.sql.execution.{FormattedMode, SparkPlan}
import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, DataSource, FilePartition}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.v2.avro.AvroScan
import org.apache.spark.util.Utils
abstract class AvroSuite
extends QueryTest
with SharedSparkSession
with CommonFileDataSourceSuite
with NestedDataSourceSuiteBase {
import testImplicits._
override protected def dataSourceFormat = "avro"
override val nestedDataSources = Seq("avro")
val episodesAvro = testFile("episodes.avro")
val testAvro = testFile("test.avro")
override protected def beforeAll(): Unit = {
super.beforeAll()
spark.conf.set(SQLConf.FILES_MAX_PARTITION_BYTES.key, 1024)
}
def checkReloadMatchesSaved(originalFile: String, newFile: String): Unit = {
val originalEntries = spark.read.format("avro").load(testAvro).collect()
val newEntries = spark.read.format("avro").load(newFile)
checkAnswer(newEntries, originalEntries)
}
def checkAvroSchemaEquals(avroSchema: String, expectedAvroSchema: String): Unit = {
assert(new Schema.Parser().parse(avroSchema) ==
new Schema.Parser().parse(expectedAvroSchema))
}
def getAvroSchemaStringFromFiles(filePath: String): String = {
new DataFileReader({
val file = new File(filePath)
if (file.isFile) {
file
} else {
file.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
.head
}
}, new GenericDatumReader[Any]()).getSchema.toString(false)
}
private def getResourceAvroFilePath(name: String): String = {
Thread.currentThread().getContextClassLoader.getResource(name).toString
}
test("resolve avro data source") {
val databricksAvro = "com.databricks.spark.avro"
// By default the backward compatibility for com.databricks.spark.avro is enabled.
Seq("org.apache.spark.sql.avro.AvroFileFormat", databricksAvro).foreach { provider =>
assert(DataSource.lookupDataSource(provider, spark.sessionState.conf) ===
classOf[org.apache.spark.sql.avro.AvroFileFormat])
}
withSQLConf(SQLConf.LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED.key -> "false") {
val message = intercept[AnalysisException] {
DataSource.lookupDataSource(databricksAvro, spark.sessionState.conf)
}.getMessage
assert(message.contains(s"Failed to find data source: $databricksAvro"))
}
}
test("reading from multiple paths") {
val df = spark.read.format("avro").load(episodesAvro, episodesAvro)
assert(df.count == 16)
}
test("reading and writing partitioned data") {
val df = spark.read.format("avro").load(episodesAvro)
val fields = List("title", "air_date", "doctor")
for (field <- fields) {
withTempPath { dir =>
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.partitionBy(field).format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
// makes sure that no fields got dropped.
// We convert Rows to Seqs in order to work around SPARK-10325
assert(input.select(field).collect().map(_.toSeq).toSet ===
df.select(field).collect().map(_.toSeq).toSet)
}
}
}
test("request no fields") {
val df = spark.read.format("avro").load(episodesAvro)
df.createOrReplaceTempView("avro_table")
assert(spark.sql("select count(*) from avro_table").collect().head === Row(8))
}
test("convert formats") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.write.parquet(dir.getCanonicalPath)
assert(spark.read.parquet(dir.getCanonicalPath).count() === df.count)
}
}
test("rearrange internal schema") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.select("doctor", "title").write.format("avro").save(dir.getCanonicalPath)
}
}
test("union(int, long) is read as long") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.INT), Schema.create(Type.LONG)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toLong)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", LongType, nullable = true)))
assert(df.collect().toSet == Set(Row(1L), Row(2L)))
}
}
test("union(float, double) is read as double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.FLOAT), Schema.create(Type.DOUBLE)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2.toDouble)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(2.toDouble)))
}
}
test("union(float, double, null) is read as nullable double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union = Schema.createUnion(
List(Schema.create(Type.FLOAT),
Schema.create(Type.DOUBLE),
Schema.create(Type.NULL)
).asJava
)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", null)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(null)))
}
}
test("Union of a single type") {
withTempPath { dir =>
val UnionOfOne = Schema.createUnion(List(Schema.create(Type.INT)).asJava)
val fields = Seq(new Field("field1", UnionOfOne, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 8)
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.first() == Row(8))
}
}
test("SPARK-27858 Union type: More than one non-null type") {
withTempDir { dir =>
val complexNullUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.NULL), Schema.create(Type.STRING)).asJava)
val fields = Seq(
new Field("field1", complexNullUnionType, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 42)
dataFileWriter.append(avroRec)
val avroRec2 = new GenericData.Record(schema)
avroRec2.put("field1", "Alice")
dataFileWriter.append(avroRec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema === StructType.fromDDL("field1 struct<member0: int, member1: string>"))
assert(df.collect().toSet == Set(Row(Row(42, null)), Row(Row(null, "Alice"))))
}
}
test("Complex Union Type") {
withTempPath { dir =>
val fixedSchema = Schema.createFixed("fixed_name", "doc", "namespace", 4)
val enumSchema = Schema.createEnum("enum_name", "doc", "namespace", List("e1", "e2").asJava)
val complexUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.STRING), fixedSchema, enumSchema).asJava)
val fields = Seq(
new Field("field1", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field2", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field3", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field4", complexUnionType, "doc", null.asInstanceOf[AnyVal])
).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
val field1 = 1234
val field2 = "Hope that was not load bearing"
val field3 = Array[Byte](1, 2, 3, 4)
val field4 = "e2"
avroRec.put("field1", field1)
avroRec.put("field2", field2)
avroRec.put("field3", new Fixed(fixedSchema, field3))
avroRec.put("field4", new EnumSymbol(enumSchema, field4))
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.sqlContext.read.format("avro").load(s"$dir.avro")
assertResult(field1)(df.selectExpr("field1.member0").first().get(0))
assertResult(field2)(df.selectExpr("field2.member1").first().get(0))
assertResult(field3)(df.selectExpr("field3.member2").first().get(0))
assertResult(field4)(df.selectExpr("field4.member3").first().get(0))
}
}
test("Lots of nulls") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("binary", BinaryType, true),
StructField("timestamp", TimestampType, true),
StructField("array", ArrayType(ShortType), true),
StructField("map", MapType(StringType, StringType), true),
StructField("struct", StructType(Seq(StructField("int", IntegerType, true))))))
val rdd = spark.sparkContext.parallelize(Seq[Row](
Row(null, new Timestamp(1), Array[Short](1, 2, 3), null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null)))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("Struct field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("short", ShortType, true),
StructField("byte", ByteType, true),
StructField("boolean", BooleanType, true)
))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, 1.toShort, 1.toByte, true),
Row(2f, 2.toShort, 2.toByte, true),
Row(3f, 3.toShort, 3.toByte, true)
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
private def createDummyCorruptFile(dir: File): Unit = {
Utils.tryWithResource {
FileUtils.forceMkdir(dir)
val corruptFile = new File(dir, "corrupt.avro")
new BufferedWriter(new FileWriter(corruptFile))
} { writer =>
writer.write("corrupt")
}
}
test("Ignore corrupt Avro file if flag IGNORE_CORRUPT_FILES enabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[FileNotFoundException] {
spark.read.format("avro").load(dir.getAbsolutePath).schema
}.getMessage
assert(message.contains("No Avro files found."))
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes.avro"))
val result = spark.read.format("avro").load(episodesAvro).collect()
checkAnswer(spark.read.format("avro").load(dir.getAbsolutePath), result)
}
}
}
test("Throws IOException on reading corrupt Avro file if flag IGNORE_CORRUPT_FILES disabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[org.apache.spark.SparkException] {
spark.read.format("avro").load(dir.getAbsolutePath)
}.getMessage
assert(message.contains("Could not read file"))
}
}
}
test("Date field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("date", DateType, true)
))
withDefaultTimeZone(UTC) {
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, null),
Row(2f, new Date(1451948400000L)),
Row(3f, new Date(1460066400500L))
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
checkAnswer(
spark.read.format("avro").load(dir.toString).select("date"),
Seq(Row(null), Row(new Date(1451865600000L)), Row(new Date(1459987200000L))))
}
}
}
test("Array data types") {
withTempPath { dir =>
val testSchema = StructType(Seq(
StructField("byte_array", ArrayType(ByteType), true),
StructField("short_array", ArrayType(ShortType), true),
StructField("float_array", ArrayType(FloatType), true),
StructField("bool_array", ArrayType(BooleanType), true),
StructField("long_array", ArrayType(LongType), true),
StructField("double_array", ArrayType(DoubleType), true),
StructField("decimal_array", ArrayType(DecimalType(10, 0)), true),
StructField("bin_array", ArrayType(BinaryType), true),
StructField("timestamp_array", ArrayType(TimestampType), true),
StructField("array_array", ArrayType(ArrayType(StringType), true), true),
StructField("struct_array", ArrayType(
StructType(Seq(StructField("name", StringType, true)))))))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val rdd = spark.sparkContext.parallelize(Seq(
Row(arrayOfByte, Array[Short](1, 2, 3, 4), Array[Float](1f, 2f, 3f, 4f),
Array[Boolean](true, false, true, false), Array[Long](1L, 2L), Array[Double](1.0, 2.0),
Array[BigDecimal](BigDecimal.valueOf(3)), Array[Array[Byte]](arrayOfByte, arrayOfByte),
Array[Timestamp](new Timestamp(0)),
Array[Array[String]](Array[String]("CSH, tearing down the walls that divide us", "-jd")),
Array[Row](Row("Bobby G. can't swim")))))
val df = spark.createDataFrame(rdd, testSchema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("write with compression - sql configs") {
withTempPath { dir =>
val uncompressDir = s"$dir/uncompress"
val bzip2Dir = s"$dir/bzip2"
val xzDir = s"$dir/xz"
val deflateDir = s"$dir/deflate"
val snappyDir = s"$dir/snappy"
val zstandardDir = s"$dir/zstandard"
val df = spark.read.format("avro").load(testAvro)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "uncompressed")
df.write.format("avro").save(uncompressDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "bzip2")
df.write.format("avro").save(bzip2Dir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "xz")
df.write.format("avro").save(xzDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "deflate")
spark.conf.set(SQLConf.AVRO_DEFLATE_LEVEL.key, "9")
df.write.format("avro").save(deflateDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "snappy")
df.write.format("avro").save(snappyDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "zstandard")
df.write.format("avro").save(zstandardDir)
val uncompressSize = FileUtils.sizeOfDirectory(new File(uncompressDir))
val bzip2Size = FileUtils.sizeOfDirectory(new File(bzip2Dir))
val xzSize = FileUtils.sizeOfDirectory(new File(xzDir))
val deflateSize = FileUtils.sizeOfDirectory(new File(deflateDir))
val snappySize = FileUtils.sizeOfDirectory(new File(snappyDir))
val zstandardSize = FileUtils.sizeOfDirectory(new File(zstandardDir))
assert(uncompressSize > deflateSize)
assert(snappySize > deflateSize)
assert(snappySize > bzip2Size)
assert(bzip2Size > xzSize)
assert(uncompressSize > zstandardSize)
}
}
test("dsl test") {
val results = spark.read.format("avro").load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("old avro data source name works") {
val results =
spark.read.format("com.databricks.spark.avro")
.load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("support of various data types") {
// This test uses data from test.avro. You can see the data and the schema of this file in
// test.json and test.avsc
val all = spark.read.format("avro").load(testAvro).collect()
assert(all.length == 3)
val str = spark.read.format("avro").load(testAvro).select("string").collect()
assert(str.map(_(0)).toSet.contains("Terran is IMBA!"))
val simple_map = spark.read.format("avro").load(testAvro).select("simple_map").collect()
assert(simple_map(0)(0).getClass.toString.contains("Map"))
assert(simple_map.map(_(0).asInstanceOf[Map[String, Some[Int]]].size).toSet == Set(2, 0))
val union0 = spark.read.format("avro").load(testAvro).select("union_string_null").collect()
assert(union0.map(_(0)).toSet == Set("abc", "123", null))
val union1 = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(union1.map(_(0)).toSet == Set(66, 1, null))
val union2 = spark.read.format("avro").load(testAvro).select("union_float_double").collect()
assert(
union2
.map(x => java.lang.Double.valueOf(x(0).toString))
.exists(p => Math.abs(p - Math.PI) < 0.001))
val fixed = spark.read.format("avro").load(testAvro).select("fixed3").collect()
assert(fixed.map(_(0).asInstanceOf[Array[Byte]]).exists(p => p(1) == 3))
val enum = spark.read.format("avro").load(testAvro).select("enum").collect()
assert(enum.map(_(0)).toSet == Set("SPADES", "CLUBS", "DIAMONDS"))
val record = spark.read.format("avro").load(testAvro).select("record").collect()
assert(record(0)(0).getClass.toString.contains("Row"))
assert(record.map(_(0).asInstanceOf[Row](0)).contains("TEST_STR123"))
val array_of_boolean =
spark.read.format("avro").load(testAvro).select("array_of_boolean").collect()
assert(array_of_boolean.map(_(0).asInstanceOf[scala.collection.Seq[Boolean]].size).toSet ==
Set(3, 1, 0))
val bytes = spark.read.format("avro").load(testAvro).select("bytes").collect()
assert(bytes.map(_(0).asInstanceOf[Array[Byte]].length).toSet == Set(3, 1, 0))
}
test("sql test") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW avroTable
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM avroTable").collect().length === 8)
}
test("conversion to avro and back") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { dir =>
val avroDir = s"$dir/avro"
spark.read.format("avro").load(testAvro).write.format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
}
}
test("conversion to avro and back with namespace") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { tempDir =>
val name = "AvroTest"
val namespace = "org.apache.spark.avro"
val parameters = Map("recordName" -> name, "recordNamespace" -> namespace)
val avroDir = tempDir + "/namedAvro"
spark.read.format("avro").load(testAvro)
.write.options(parameters).format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
// Look at raw file and make sure has namespace info
val rawSaved = spark.sparkContext.textFile(avroDir)
val schema = rawSaved.collect().mkString("")
assert(schema.contains(name))
assert(schema.contains(namespace))
}
}
test("SPARK-34229: Avro should read decimal values with the file schema") {
withTempPath { path =>
sql("SELECT 3.14 a").write.format("avro").save(path.toString)
val data = spark.read.schema("a DECIMAL(4, 3)").format("avro").load(path.toString).collect()
assert(data.map(_ (0)).contains(new java.math.BigDecimal("3.140")))
}
}
test("converting some specific sparkSQL types to avro") {
withTempPath { tempDir =>
val testSchema = StructType(Seq(
StructField("Name", StringType, false),
StructField("Length", IntegerType, true),
StructField("Time", TimestampType, false),
StructField("Decimal", DecimalType(10, 2), true),
StructField("Binary", BinaryType, false)))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val cityRDD = spark.sparkContext.parallelize(Seq(
Row("San Francisco", 12, new Timestamp(666), null, arrayOfByte),
Row("Palo Alto", null, new Timestamp(777), null, arrayOfByte),
Row("Munich", 8, new Timestamp(42), Decimal(3.14), arrayOfByte)))
val cityDataFrame = spark.createDataFrame(cityRDD, testSchema)
val avroDir = tempDir + "/avro"
cityDataFrame.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 3)
// TimesStamps are converted to longs
val times = spark.read.format("avro").load(avroDir).select("Time").collect()
assert(times.map(_(0)).toSet ==
Set(new Timestamp(666), new Timestamp(777), new Timestamp(42)))
// DecimalType should be converted to string
val decimals = spark.read.format("avro").load(avroDir).select("Decimal").collect()
assert(decimals.map(_(0)).contains(new java.math.BigDecimal("3.14")))
// There should be a null entry
val length = spark.read.format("avro").load(avroDir).select("Length").collect()
assert(length.map(_(0)).contains(null))
val binary = spark.read.format("avro").load(avroDir).select("Binary").collect()
for (i <- arrayOfByte.indices) {
assert(binary(1)(0).asInstanceOf[Array[Byte]](i) == arrayOfByte(i))
}
}
}
test("correctly read long as date/timestamp type") {
withTempPath { tempDir =>
val currentTime = new Timestamp(System.currentTimeMillis())
val currentDate = new Date(System.currentTimeMillis())
val schema = StructType(Seq(
StructField("_1", DateType, false), StructField("_2", TimestampType, false)))
val writeDs = Seq((currentDate, currentTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 1)
val readDs = spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)]
assert(readDs.collect().sameElements(writeDs.collect()))
}
}
test("support of globbed paths") {
val resourceDir = testFile(".")
val e1 = spark.read.format("avro").load(resourceDir + "../*/episodes.avro").collect()
assert(e1.length == 8)
val e2 = spark.read.format("avro").load(resourceDir + "../../*/*/episodes.avro").collect()
assert(e2.length == 8)
}
test("does not coerce null date/timestamp value to 0 epoch.") {
withTempPath { tempDir =>
val nullTime: Timestamp = null
val nullDate: Date = null
val schema = StructType(Seq(
StructField("_1", DateType, nullable = true),
StructField("_2", TimestampType, nullable = true))
)
val writeDs = Seq((nullDate, nullTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
val readValues =
spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)].collect
assert(readValues.size == 1)
assert(readValues.head == ((nullDate, nullTime)))
}
}
test("support user provided avro schema") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "string",
| "type" : "string",
| "doc" : "Meaningless string of characters"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided avro schema url") {
val avroSchemaUrl = testFile("test_sub.avsc")
val result = spark.read.option("avroSchemaUrl", avroSchemaUrl)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided both avro schema and avro schema url") {
val avroSchemaUrl = testFile("test_sub.avsc")
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "union_int_long_null",
| "type" : ["int", "long", "null"]
| }]
|}
""".stripMargin
val result = spark.read
.option("avroSchema", avroSchema)
.option("avroSchemaUrl", avroSchemaUrl)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided wrong avro schema url") {
val e = intercept[FileNotFoundException] {
spark.read
.option("avroSchemaUrl", "not_exists.avsc")
.format("avro")
.load(testAvro)
.collect()
}
assertExceptionMsg[FileNotFoundException](e, "File not_exists.avsc does not exist")
}
test("support user provided avro schema with defaults for missing fields") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "missingField",
| "type" : "string",
| "default" : "foo"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro").load(testAvro).select("missingField").first
assert(result === Row("foo"))
}
test("support user provided avro schema for writing nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": [{ "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing data not in the enum will throw an exception
val e = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e,
""""NOT-IN-ENUM" cannot be written since it's not defined in enum""")
}
}
test("support user provided avro schema for writing non-nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": { "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }
| }]
|}
""".stripMargin
val dfWithNull = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val df = spark.createDataFrame(dfWithNull.na.drop().rdd,
StructType(Seq(StructField("Suit", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing nulls without using avro union type will
// throw an exception as avro uses union type to handle null.
val e1 = intercept[SparkException] {
dfWithNull.write.format("avro")
.option("avroSchema", avroSchema).save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[AvroTypeException](e1, "Not an enum: null")
// Writing df containing data not in the enum will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
""""NOT-IN-ENUM" cannot be written since it's not defined in enum""")
}
}
test("support user provided avro schema for writing nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": [{ "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(null))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e1,
"3 bytes of binary data cannot be written into FIXED type with size of 2 bytes")
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
"1 byte of binary data cannot be written into FIXED type with size of 2 bytes")
}
}
test("support user provided avro schema for writing non-nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": { "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(Array(1, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e1,
"3 bytes of binary data cannot be written into FIXED type with size of 2 bytes")
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
"1 byte of binary data cannot be written into FIXED type with size of 2 bytes")
}
}
test("support user provided avro schema for writing / reading fields with different ordering") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val avroSchemaReversed =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Name", "type": "string"},
| {"name": "Age", "type": "int"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))),
StructType(Seq(
StructField("Age", IntegerType, false),
StructField("Name", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
// Writing avro file with reversed field ordering
df.write.format("avro").option("avroSchema", avroSchemaReversed).save(tempSaveDir)
// Reading reversed avro file
checkAnswer(df.select("Name", "Age"), spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchemaReversed, getAvroSchemaStringFromFiles(tempSaveDir))
// Reading reversed avro file with provided original schema
val avroDf = spark.read.format("avro").option("avroSchema", avroSchema).load(tempSaveDir)
checkAnswer(df, avroDf)
assert(avroDf.schema.fieldNames.sameElements(Array("Age", "Name")))
}
}
test("support user provided non-nullable avro schema " +
"for nullable catalyst schema without any null record") {
withTempPath { tempDir =>
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, true),
StructField("Name", StringType, true)))
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
val message = intercept[Exception] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, null))), catalystSchema)
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("Caused by: java.lang.NullPointerException: " +
"null of string in string in field Name of test_schema in test_schema"))
}
}
test("support user provided nullable avro schema " +
"for non-nullable catalyst schema without any null record") {
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, nullable = false),
StructField("Name", StringType, nullable = false)))
val avroSchema = """
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": ["null", "int"]},
| {"name": "Name", "type": ["null", "string"]}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
withTempPath { tempDir =>
df.write.format("avro").option("avroSchema", avroSchema).save(tempDir.getPath)
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempDir.getPath))
}
}
test("SPARK-34365: support reading renamed schema using positionalFieldMatching") {
val renamedSchema = new StructType()
.add("foo", StringType)
.add("foo_map", MapType(StringType, IntegerType))
val dfLoaded = spark
.read
.option("positionalFieldMatching", true.toString)
.schema(renamedSchema)
.format("avro")
.load(testAvro)
assert(dfLoaded.schema === renamedSchema)
val expectedDf = spark.read.format("avro").load(testAvro).select("string", "simple_map")
assert(dfLoaded.select($"foo".as("string"), $"foo_map".as("simple_map")).collect().toSet ===
expectedDf.collect().toSet)
}
test("SPARK-34365: support writing with renamed schema using positionalFieldMatching") {
withTempDir { tempDir =>
val avroSchema = SchemaBuilder.record("renamed").fields()
.requiredString("foo")
.name("foo_map").`type`(Schema.createMap(Schema.create(Schema.Type.INT))).noDefault()
.endRecord()
val expectedDf = spark.read.format("avro").load(testAvro).select("string", "simple_map")
val savePath = s"$tempDir/save"
expectedDf.write
.option("avroSchema", avroSchema.toString)
.option("positionalFieldMatching", true.toString)
.format("avro")
.save(savePath)
val reloadedDf = spark.read.format("avro").load(savePath)
assert(reloadedDf.schema ===
new StructType().add("foo", StringType).add("foo_map", MapType(StringType, IntegerType)))
assert(reloadedDf.select($"foo".as("string"), $"foo_map".as("simple_map")).collect().toSet ===
expectedDf.collect().toSet)
}
}
test("unsupported nullable avro type") {
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, nullable = false),
StructField("Name", StringType, nullable = false)))
for (unsupportedAvroType <- Seq("""["null", "int", "long"]""", """["int", "long"]""")) {
val avroSchema = s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": $unsupportedAvroType},
| {"name": "Name", "type": ["null", "string"]}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
withTempPath { tempDir =>
val message = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(tempDir.getPath)
}.getCause.getMessage
assert(message.contains("Only UNION of a null type and a non-null type is supported"))
}
}
}
test("error handling for unsupported Interval data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
var msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.contains("Cannot save interval data type into external storage.") ||
msg.contains("AVRO data source does not support interval data type."))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new IntervalData())
sql("select testType()").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(s"avro data source does not support interval data type."))
}
}
test("support Null data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
val df = sql("select null")
df.write.format("avro").mode("overwrite").save(tempDir)
checkAnswer(spark.read.format("avro").load(tempDir), df)
}
}
test("throw exception if unable to write with user provided Avro schema") {
val input: Seq[(DataType, Schema.Type)] = Seq(
(NullType, NULL),
(BooleanType, BOOLEAN),
(ByteType, INT),
(ShortType, INT),
(IntegerType, INT),
(LongType, LONG),
(FloatType, FLOAT),
(DoubleType, DOUBLE),
(BinaryType, BYTES),
(DateType, INT),
(TimestampType, LONG),
(DecimalType(4, 2), BYTES)
)
def assertException(f: () => AvroSerializer): Unit = {
val message = intercept[org.apache.spark.sql.avro.IncompatibleSchemaException] {
f()
}.getMessage
assert(message.contains("Cannot convert SQL type"))
}
def resolveNullable(schema: Schema, nullable: Boolean): Schema = {
if (nullable && schema.getType != NULL) {
Schema.createUnion(schema, Schema.create(NULL))
} else {
schema
}
}
for {
i <- input
j <- input
nullable <- Seq(true, false)
} if (i._2 != j._2) {
val avroType = resolveNullable(Schema.create(j._2), nullable)
val avroArrayType = resolveNullable(Schema.createArray(avroType), nullable)
val avroMapType = resolveNullable(Schema.createMap(avroType), nullable)
val name = "foo"
val avroField = new Field(name, avroType, "", null.asInstanceOf[AnyVal])
val recordSchema = Schema.createRecord("name", "doc", "space", true, Seq(avroField).asJava)
val avroRecordType = resolveNullable(recordSchema, nullable)
val catalystType = i._1
val catalystArrayType = ArrayType(catalystType, nullable)
val catalystMapType = MapType(StringType, catalystType, nullable)
val catalystStructType = StructType(Seq(StructField(name, catalystType, nullable)))
for {
avro <- Seq(avroType, avroArrayType, avroMapType, avroRecordType)
catalyst <- Seq(catalystType, catalystArrayType, catalystMapType, catalystStructType)
} {
assertException(() => new AvroSerializer(catalyst, avro, nullable))
}
}
}
test("reading from invalid path throws exception") {
// Directory given has no avro files
intercept[AnalysisException] {
withTempPath(dir => spark.read.format("avro").load(dir.getCanonicalPath))
}
intercept[AnalysisException] {
spark.read.format("avro").load("very/invalid/path/123.avro")
}
// In case of globbed path that can't be matched to anything, another exception is thrown (and
// exception message is helpful)
intercept[AnalysisException] {
spark.read.format("avro").load("*/*/*/*/*/*/*/something.avro")
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
spark.read.format("avro").load(dir.toString)
}
}
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(dir.toString)
}
}
}
test("SQL test insert overwrite") {
withTempPath { tempDir =>
val tempEmptyDir = s"$tempDir/sqlOverwrite"
// Create a temp directory for table that will be overwritten
new File(tempEmptyDir).mkdirs()
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodes
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodesEmpty
|(name string, air_date string, doctor int)
|USING avro
|OPTIONS (path "$tempEmptyDir")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodes").collect().length === 8)
assert(spark.sql("SELECT * FROM episodesEmpty").collect().isEmpty)
spark.sql(
s"""
|INSERT OVERWRITE TABLE episodesEmpty
|SELECT * FROM episodes
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodesEmpty").collect().length == 8)
}
}
test("test save and load") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("test load with non-Avro file") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count() == 8)
}
}
}
test("SPARK-34133: Reading user provided schema respects case sensitivity for field matching") {
val wrongCaseSchema = new StructType()
.add("STRING", StringType, nullable = false)
.add("UNION_STRING_NULL", StringType, nullable = true)
val withSchema = spark.read
.schema(wrongCaseSchema)
.format("avro").load(testAvro).collect()
val withOutSchema = spark.read.format("avro").load(testAvro)
.select("STRING", "UNION_STRING_NULL")
.collect()
assert(withSchema.sameElements(withOutSchema))
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val out = spark.read.format("avro").schema(wrongCaseSchema).load(testAvro).collect()
assert(out.forall(_.isNullAt(0)))
assert(out.forall(_.isNullAt(1)))
}
}
test("SPARK-34133: Writing user provided schema respects case sensitivity for field matching") {
withTempDir { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "foo", "type": "int"},
| {"name": "BAR", "type": "int"}
| ]
|}
""".stripMargin
val df = Seq((1, 3), (2, 4)).toDF("FOO", "bar")
val savePath = s"$tempDir/save"
df.write.option("avroSchema", avroSchema).format("avro").save(savePath)
val loaded = spark.read.format("avro").load(savePath)
assert(loaded.schema === new StructType().add("foo", IntegerType).add("BAR", IntegerType))
assert(loaded.collect().map(_.getInt(0)).toSet === Set(1, 2))
assert(loaded.collect().map(_.getInt(1)).toSet === Set(3, 4))
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val e = intercept[SparkException] {
df.write.option("avroSchema", avroSchema).format("avro").save(s"$tempDir/save2")
}
assertExceptionMsg[IncompatibleSchemaException](e,
"Cannot find field 'FOO' (at position 0) in Avro schema at top-level record")
}
}
}
test("SPARK-34133: Writing user provided schema with multiple matching Avro fields fails") {
withTempDir { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "foo", "type": "int"},
| {"name": "FOO", "type": "string"}
| ]
|}
""".stripMargin
val errorMsg = "Searching for 'foo' in Avro schema at top-level record gave 2 matches. " +
"Candidates: [foo, FOO]"
assertExceptionMsg(intercept[SparkException] {
val fooBarDf = Seq((1, "3"), (2, "4")).toDF("foo", "bar")
fooBarDf.write.option("avroSchema", avroSchema).format("avro").save(s"$tempDir/save-fail")
}, errorMsg)
val savePath = s"$tempDir/save"
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val fooFooDf = Seq((1, "3"), (2, "4")).toDF("foo", "FOO")
fooFooDf.write.option("avroSchema", avroSchema).format("avro").save(savePath)
val loadedDf = spark.read.format("avro").schema(fooFooDf.schema).load(savePath)
assert(loadedDf.collect().toSet === fooFooDf.collect().toSet)
}
assertExceptionMsg(intercept[SparkException] {
val fooSchema = new StructType().add("foo", IntegerType)
spark.read.format("avro").schema(fooSchema).load(savePath).collect()
}, errorMsg)
}
}
test("read avro with user defined schema: read partial columns") {
val partialColumns = StructType(Seq(
StructField("string", StringType, false),
StructField("simple_map", MapType(StringType, IntegerType), false),
StructField("complex_map", MapType(StringType, MapType(StringType, StringType)), false),
StructField("union_string_null", StringType, true),
StructField("union_int_long_null", LongType, true),
StructField("fixed3", BinaryType, true),
StructField("fixed2", BinaryType, true),
StructField("enum", StringType, false),
StructField("record", StructType(Seq(StructField("value_field", StringType, false))), false),
StructField("array_of_boolean", ArrayType(BooleanType), false),
StructField("bytes", BinaryType, true)))
val withSchema = spark.read.schema(partialColumns).format("avro").load(testAvro).collect()
val withOutSchema = spark
.read
.format("avro")
.load(testAvro)
.select("string", "simple_map", "complex_map", "union_string_null", "union_int_long_null",
"fixed3", "fixed2", "enum", "record", "array_of_boolean", "bytes")
.collect()
assert(withSchema.sameElements(withOutSchema))
}
test("read avro with user defined schema: read non-exist columns") {
val schema =
StructType(
Seq(
StructField("non_exist_string", StringType, true),
StructField(
"record",
StructType(Seq(
StructField("non_exist_field", StringType, false),
StructField("non_exist_field2", StringType, false))),
false)))
val withEmptyColumn = spark.read.schema(schema).format("avro").load(testAvro).collect()
assert(withEmptyColumn.forall(_ == Row(null: String, Row(null: String, null: String))))
}
test("read avro file partitioned") {
withTempPath { dir =>
val df = (0 to 1024 * 3).toDS.map(i => s"record${i}").toDF("records")
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
assert(input.collect.toSet.size === 1024 * 3 + 1)
assert(input.rdd.partitions.size > 2)
}
}
case class NestedBottom(id: Int, data: String)
case class NestedMiddle(id: Int, data: NestedBottom)
case class NestedTop(id: Int, data: NestedMiddle)
test("Validate namespace in avro file that has nested records with the same name") {
withTempPath { dir =>
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
writeDf.write.format("avro").save(dir.toString)
val schema = getAvroSchemaStringFromFiles(dir.toString)
assert(schema.contains("\\"namespace\\":\\"topLevelRecord\\""))
assert(schema.contains("\\"namespace\\":\\"topLevelRecord.data\\""))
}
}
test("saving avro that has nested records with the same name") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
val outputFolder = s"$tempDir/duplicate_names/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("check namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee",
nameSpace = "foo.bar")
assert(employeeType.getFullName == "foo.bar.employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == "foo.bar")
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "foo.bar.employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "foo.bar.employee")
}
test("check empty namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee")
assert(employeeType.getFullName == "employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == null)
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "employee")
}
case class NestedMiddleArray(id: Int, data: Array[NestedBottom])
case class NestedTopArray(id: Int, data: NestedMiddleArray)
test("saving avro that has nested records with the same name inside an array") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopArray(1, NestedMiddleArray(2, Array(
NestedBottom(3, "1"), NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_array/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
case class NestedMiddleMap(id: Int, data: Map[String, NestedBottom])
case class NestedTopMap(id: Int, data: NestedMiddleMap)
test("saving avro that has nested records with the same name inside a map") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopMap(1, NestedMiddleMap(2, Map(
"1" -> NestedBottom(3, "1"), "2" -> NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_map/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("SPARK-24805: do not ignore files without .avro extension by default") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val fileWithoutExtension = s"${dir.getCanonicalPath}/episodes"
val df1 = spark.read.format("avro").load(fileWithoutExtension)
assert(df1.count == 8)
val schema = new StructType()
.add("title", StringType)
.add("air_date", StringType)
.add("doctor", IntegerType)
val df2 = spark.read.schema(schema).format("avro").load(fileWithoutExtension)
assert(df2.count == 8)
}
}
test("SPARK-24836: checking the ignoreExtension option") {
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
val newDf = spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("SPARK-24836: ignoreExtension must override hadoop's config") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val hadoopConf = spark.sessionState.newHadoopConf()
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark
.read
.option("ignoreExtension", "true")
.format("avro")
.load(s"${dir.getCanonicalPath}/episodes")
assert(newDf.count() == 8)
}
}
}
test("SPARK-24881: write with compression - avro options") {
def getCodec(dir: String): Option[String] = {
val files = new File(dir)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
files.map { file =>
val reader = new DataFileReader(file, new GenericDatumReader[Any]())
val r = reader.getMetaString("avro.codec")
r
}.map(v => if (v == "null") "uncompressed" else v).headOption
}
def checkCodec(df: DataFrame, dir: String, codec: String): Unit = {
val subdir = s"$dir/$codec"
df.write.option("compression", codec).format("avro").save(subdir)
assert(getCodec(subdir) == Some(codec))
}
withTempPath { dir =>
val path = dir.toString
val df = spark.read.format("avro").load(testAvro)
checkCodec(df, path, "uncompressed")
checkCodec(df, path, "deflate")
checkCodec(df, path, "snappy")
checkCodec(df, path, "bzip2")
checkCodec(df, path, "xz")
}
}
private def checkSchemaWithRecursiveLoop(avroSchema: String): Unit = {
val message = intercept[IncompatibleSchemaException] {
SchemaConverters.toSqlType(new Schema.Parser().parse(avroSchema))
}.getMessage
assert(message.contains("Found recursive reference in Avro schema"))
}
test("Detect recursive loop") {
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"}, // each element has a long
| {"name": "next", "type": ["null", "LongList"]} // optional next element
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields": [
| {
| "name": "value",
| "type": {
| "type": "record",
| "name": "foo",
| "fields": [
| {
| "name": "parent",
| "type": "LongList"
| }
| ]
| }
| }
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "array", "type": {"type": "array", "items": "LongList"}}
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "map", "type": {"type": "map", "values": "LongList"}}
| ]
|}
""".stripMargin)
}
test("log a warning of ignoreExtension deprecation") {
val logAppender = new LogAppender("deprecated Avro option 'ignoreExtension'")
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1), ("d", 2, 1))
.toDF("value", "p1", "p2")
.repartition(2)
.write
.format("avro")
.save(dir.getCanonicalPath)
withLogAppender(logAppender) {
spark
.read
.format("avro")
.option(AvroOptions.ignoreExtensionKey, false)
.load(dir.getCanonicalPath)
.count()
}
val deprecatedEvents = logAppender.loggingEvents
.filter(_.getRenderedMessage.contains(
s"Option ${AvroOptions.ignoreExtensionKey} is deprecated"))
assert(deprecatedEvents.size === 1)
}
}
// It generates input files for the test below:
// "SPARK-31183: compatibility with Spark 2.4 in reading dates/timestamps"
ignore("SPARK-31855: generate test files for checking compatibility with Spark 2.4") {
val resourceDir = "external/avro/src/test/resources"
val version = "2_4_6"
def save(
in: Seq[String],
t: String,
dstFile: String,
options: Map[String, String] = Map.empty): Unit = {
withTempDir { dir =>
in.toDF("dt")
.select($"dt".cast(t))
.repartition(1)
.write
.mode("overwrite")
.options(options)
.format("avro")
.save(dir.getCanonicalPath)
Files.copy(
dir.listFiles().filter(_.getName.endsWith(".avro")).head.toPath,
Paths.get(resourceDir, dstFile),
StandardCopyOption.REPLACE_EXISTING)
}
}
withDefaultTimeZone(LA) {
withSQLConf(
SQLConf.SESSION_LOCAL_TIMEZONE.key -> LA.getId) {
save(
Seq("1001-01-01"),
"date",
s"before_1582_date_v$version.avro")
save(
Seq("1001-01-01 01:02:03.123"),
"timestamp",
s"before_1582_timestamp_millis_v$version.avro",
// scalastyle:off line.size.limit
Map("avroSchema" ->
s"""
| {
| "namespace": "logical",
| "type": "record",
| "name": "test",
| "fields": [
| {"name": "dt", "type": ["null", {"type": "long","logicalType": "timestamp-millis"}], "default": null}
| ]
| }
|""".stripMargin))
// scalastyle:on line.size.limit
save(
Seq("1001-01-01 01:02:03.123456"),
"timestamp",
s"before_1582_timestamp_micros_v$version.avro")
}
}
}
private def runInMode(
modes: Seq[LegacyBehaviorPolicy.Value])(f: Map[String, String] => Unit): Unit = {
modes.foreach { mode =>
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> mode.toString) {
f(Map.empty)
}
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> EXCEPTION.toString) {
modes.foreach { mode =>
f(Map(AvroOptions.DATETIME_REBASE_MODE -> mode.toString))
}
}
}
test("SPARK-31183: compatibility with Spark 2.4 in reading dates/timestamps") {
// test reading the existing 2.4 files and new 3.0 files (with rebase on/off) together.
def checkReadMixedFiles(
fileName: String,
dt: String,
dataStr: String,
checkDefaultLegacyRead: String => Unit): Unit = {
withTempPaths(2) { paths =>
paths.foreach(_.delete())
val path2_4 = getResourceAvroFilePath(fileName)
val path3_0 = paths(0).getCanonicalPath
val path3_0_rebase = paths(1).getCanonicalPath
if (dt == "date") {
val df = Seq(dataStr).toDF("str").select($"str".cast("date").as("dt"))
checkDefaultLegacyRead(path2_4)
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) {
df.write.format("avro").mode("overwrite").save(path3_0)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
df.write.format("avro").save(path3_0_rebase)
}
// For Avro files written by Spark 3.0, we know the writer info and don't need the config
// to guide the rebase behavior.
runInMode(Seq(LEGACY)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path2_4, path3_0, path3_0_rebase),
1.to(3).map(_ => Row(java.sql.Date.valueOf(dataStr))))
}
} else {
val df = Seq(dataStr).toDF("str").select($"str".cast("timestamp").as("dt"))
val avroSchema =
s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "dt", "type": {"type": "long", "logicalType": "$dt"}}
| ]
|}""".stripMargin
// By default we should fail to write ancient datetime values.
val e = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(path3_0)
}
assert(e.getCause.getCause.getCause.isInstanceOf[SparkUpgradeException])
checkDefaultLegacyRead(path2_4)
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) {
df.write.format("avro").option("avroSchema", avroSchema).mode("overwrite").save(path3_0)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
df.write.format("avro").option("avroSchema", avroSchema).save(path3_0_rebase)
}
// For Avro files written by Spark 3.0, we know the writer info and don't need the config
// to guide the rebase behavior.
runInMode(Seq(LEGACY)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path2_4, path3_0, path3_0_rebase),
1.to(3).map(_ => Row(java.sql.Timestamp.valueOf(dataStr))))
}
}
}
}
def failInRead(path: String): Unit = {
val e = intercept[SparkException](spark.read.format("avro").load(path).collect())
assert(e.getCause.isInstanceOf[SparkUpgradeException])
}
def successInRead(path: String): Unit = spark.read.format("avro").load(path).collect()
Seq(
// By default we should fail to read ancient datetime values when parquet files don't
// contain Spark version.
"2_4_5" -> failInRead _,
"2_4_6" -> successInRead _
).foreach { case (version, checkDefaultRead) =>
checkReadMixedFiles(
s"before_1582_date_v$version.avro",
"date",
"1001-01-01",
checkDefaultRead)
checkReadMixedFiles(
s"before_1582_timestamp_micros_v$version.avro",
"timestamp-micros",
"1001-01-01 01:02:03.123456",
checkDefaultRead)
checkReadMixedFiles(
s"before_1582_timestamp_millis_v$version.avro",
"timestamp-millis",
"1001-01-01 01:02:03.123",
checkDefaultRead)
}
}
test("SPARK-31183: rebasing microseconds timestamps in write") {
val tsStr = "1001-01-01 01:02:03.123456"
val nonRebased = "1001-01-07 01:09:05.123456"
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq(tsStr).toDF("tsS")
.select($"tsS".cast("timestamp").as("ts"))
.write.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get the correct
// result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path),
Row(Timestamp.valueOf(tsStr)))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(spark.read.format("avro").load(path), Row(Timestamp.valueOf(nonRebased)))
}
}
}
test("SPARK-31183: rebasing milliseconds timestamps in write") {
val tsStr = "1001-01-01 01:02:03.123456"
val rebased = "1001-01-01 01:02:03.123"
val nonRebased = "1001-01-07 01:09:05.123"
Seq(
"""{"type": "long","logicalType": "timestamp-millis"}""",
""""long"""").foreach { tsType =>
val timestampSchema = s"""
|{
| "namespace": "logical",
| "type": "record",
| "name": "test",
| "fields": [
| {"name": "ts", "type": $tsType}
| ]
|}""".stripMargin
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq(tsStr).toDF("tsS")
.select($"tsS".cast("timestamp").as("ts"))
.write
.option("avroSchema", timestampSchema)
.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get the correct
// result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read.options(options).schema("ts timestamp").format("avro").load(path),
Row(Timestamp.valueOf(rebased)))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(
spark.read.schema("ts timestamp").format("avro").load(path),
Row(Timestamp.valueOf(nonRebased)))
}
}
}
}
test("SPARK-31183: rebasing dates in write") {
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq("1001-01-01").toDF("dateS")
.select($"dateS".cast("date").as("date"))
.write.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get the correct
// result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path),
Row(Date.valueOf("1001-01-01")))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(spark.read.format("avro").load(path), Row(Date.valueOf("1001-01-07")))
}
}
}
private def checkMetaData(path: java.io.File, key: String, expectedValue: String): Unit = {
val avroFiles = path.listFiles()
.filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_"))
assert(avroFiles.length === 1)
val reader = DataFileReader.openReader(avroFiles(0), new GenericDatumReader[GenericRecord]())
val value = reader.asInstanceOf[DataFileReader[_]].getMetaString(key)
assert(value === expectedValue)
}
test("SPARK-31327: Write Spark version into Avro file metadata") {
withTempPath { path =>
spark.range(1).repartition(1).write.format("avro").save(path.getCanonicalPath)
checkMetaData(path, SPARK_VERSION_METADATA_KEY, SPARK_VERSION_SHORT)
}
}
test("SPARK-33163: write the metadata key 'org.apache.spark.legacyDateTime'") {
def saveTs(dir: java.io.File): Unit = {
Seq(Timestamp.valueOf("2020-10-15 01:02:03")).toDF()
.repartition(1)
.write
.format("avro")
.save(dir.getAbsolutePath)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
withTempPath { dir =>
saveTs(dir)
checkMetaData(dir, SPARK_LEGACY_DATETIME, "")
}
}
Seq(CORRECTED, EXCEPTION).foreach { mode =>
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> mode.toString) {
withTempPath { dir =>
saveTs(dir)
checkMetaData(dir, SPARK_LEGACY_DATETIME, null)
}
}
}
}
test("SPARK-33314: RowReader doesn't over-consume when hasNextRow called twice") {
withTempPath { dir =>
Seq((1), (2), (3))
.toDF("index")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.orderBy("index")
checkAnswer(df,
Seq(Row(1), Row(2), Row(3)))
}
}
test("SPARK-35427: datetime rebasing in the EXCEPTION mode") {
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> EXCEPTION.toString) {
Seq("timestamp-millis", "timestamp-micros").foreach { dt =>
withTempPath { dir =>
val df = Seq("1001-01-01 01:02:03.123456")
.toDF("str")
.select($"str".cast("timestamp").as("dt"))
val avroSchema =
s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "dt", "type": {"type": "long", "logicalType": "$dt"}}
| ]
|}""".stripMargin
val e = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(dir.getCanonicalPath)
}
val errMsg = e.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
withTempPath { dir =>
val df = Seq(java.sql.Date.valueOf("1001-01-01")).toDF("dt")
val e = intercept[SparkException] {
df.write.format("avro").save(dir.getCanonicalPath)
}
val errMsg = e.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> EXCEPTION.toString) {
Seq(
"before_1582_date_v2_4_5.avro",
"before_1582_timestamp_micros_v2_4_5.avro",
"before_1582_timestamp_millis_v2_4_5.avro"
).foreach { fileName =>
val e = intercept[SparkException] {
spark.read.format("avro").load(getResourceAvroFilePath(fileName)).collect()
}
val errMsg = e.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
}
}
class AvroV1Suite extends AvroSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "avro")
}
class AvroV2Suite extends AvroSuite with ExplainSuiteHelper {
import testImplicits._
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
test("Avro source v2: support partition pruning") {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.partitionBy("p1", "p2")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("p1 = 1 and p2 = 2 and value != \\"a\\"")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
// The partitions filters should be pushed down and no need to be reevaluated.
assert(filterCondition.get.collectFirst {
case a: AttributeReference if a.name == "p1" || a.name == "p2" => a
}.isEmpty)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.nonEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
assert(fileScan.get.planInputPartitions().forall { partition =>
partition.asInstanceOf[FilePartition].files.forall { file =>
file.filePath.contains("p1=1") && file.filePath.contains("p2=2")
}
})
checkAnswer(df, Row("b", 1, 2))
}
}
test("Avro source v2: support passing data filters to FileScan without partitionFilters") {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("value = 'a'")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.isEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
checkAnswer(df, Row("a", 1, 2))
}
}
private def getBatchScanExec(plan: SparkPlan): BatchScanExec = {
plan.find(_.isInstanceOf[BatchScanExec]).get.asInstanceOf[BatchScanExec]
}
test("Avro source v2: same result with different orders of data filters and partition filters") {
withTempPath { path =>
val tmpDir = path.getCanonicalPath
spark
.range(10)
.selectExpr("id as a", "id + 1 as b", "id + 2 as c", "id + 3 as d")
.write
.partitionBy("a", "b")
.format("avro")
.save(tmpDir)
val df = spark.read.format("avro").load(tmpDir)
// partition filters: a > 1 AND b < 9
// data filters: c > 1 AND d < 9
val plan1 = df.where("a > 1 AND b < 9 AND c > 1 AND d < 9").queryExecution.sparkPlan
val plan2 = df.where("b < 9 AND a > 1 AND d < 9 AND c > 1").queryExecution.sparkPlan
assert(plan1.sameResult(plan2))
val scan1 = getBatchScanExec(plan1)
val scan2 = getBatchScanExec(plan2)
assert(scan1.sameResult(scan2))
}
}
test("explain formatted on an avro data source v2") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath + "/avro"
val expected_plan_fragment =
s"""
|\\\\(1\\\\) BatchScan
|Output \\\\[2\\\\]: \\\\[value#xL, id#x\\\\]
|DataFilters: \\\\[isnotnull\\\\(value#xL\\\\), \\\\(value#xL > 2\\\\)\\\\]
|Format: avro
|Location: InMemoryFileIndex\\\\([0-9]+ paths\\\\)\\\\[.*\\\\]
|PartitionFilters: \\\\[isnotnull\\\\(id#x\\\\), \\\\(id#x > 1\\\\)\\\\]
|PushedFilters: \\\\[IsNotNull\\\\(value\\\\), GreaterThan\\\\(value,2\\\\)\\\\]
|ReadSchema: struct\\\\<value:bigint\\\\>
|""".stripMargin.trim
spark.range(10)
.select(col("id"), col("id").as("value"))
.write.option("header", true)
.partitionBy("id")
.format("avro")
.save(basePath)
val df = spark
.read
.format("avro")
.load(basePath).where($"id" > 1 && $"value" > 2)
val normalizedOutput = getNormalizedExplain(df, FormattedMode)
assert(expected_plan_fragment.r.findAllMatchIn(normalizedOutput).length == 1,
normalizedOutput)
}
}
test("SPARK-32346: filters pushdown to Avro datasource v2") {
Seq(true, false).foreach { filtersPushdown =>
withSQLConf(SQLConf.AVRO_FILTER_PUSHDOWN_ENABLED.key -> filtersPushdown.toString) {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("value = 'a'")
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan) => f
}
assert(fileScan.nonEmpty)
if (filtersPushdown) {
assert(fileScan.get.pushedFilters.nonEmpty)
} else {
assert(fileScan.get.pushedFilters.isEmpty)
}
checkAnswer(df, Row("a", 1, 2))
}
}
}
}
}
| wangmiao1981/spark | external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSuite.scala | Scala | apache-2.0 | 88,070 |
/*
* Copyright 2010 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.grabbyhands
object ConfigSpec extends SpecBase(2) {
"config" should {
"set servers" in {
val config = new Config()
config.addServers(Array("host1:1", "host2:2"))
config.servers.size must be_==(2)
config.servers(0).name must be_==("host1:1")
config.servers(1).name must be_==("host2:2")
}
"set other values" in {
val config = new Config()
config.addServer("host1:1")
config.maxMessageBytes = 100
config.maxMessageBytes must be_==(100)
}
"overrides work" in {
val config = new Config()
config.addServer("host:1")
config.sendNumConnections must be_==(1)
config.recvNumConnections must be_==(1)
config.sendQueueDepth must be_==(1)
config.recvQueueDepth must be_==(1)
config.sendNumConnections = 2
config.recvNumConnections = 3
config.sendQueueDepth = 4
config.recvQueueDepth = 5
val cq1 = config.addQueue("q")
cq1.name must be_==("q")
cq1.sendNumConnections must be_==(2)
cq1.recvNumConnections must be_==(3)
cq1.sendQueueDepth must be_==(4)
cq1.recvQueueDepth must be_==(5)
cq1.sendNumConnections = 6
cq1.recvNumConnections = 7
cq1.sendQueueDepth = 8
cq1.recvQueueDepth = 9
config.queues.size must be_==(1)
config.queues.get("q") must beSome[ConfigQueue]
val cq2 = config.queues("q")
cq2.name must be_==("q")
cq2.sendNumConnections must be_==(6)
cq2.recvNumConnections must be_==(7)
cq2.sendQueueDepth must be_==(8)
cq2.recvQueueDepth must be_==(9)
}
"automatically increase queue depth" in {
val config = new Config()
config.addServer("host:1")
config.sendNumConnections must be_==(1)
config.recvNumConnections must be_==(1)
config.sendQueueDepth must be_==(1)
config.recvQueueDepth must be_==(1)
config.sendNumConnections = 2
config.sendQueueDepth must be_==(2)
config.sendQueueDepth = 5
config.sendQueueDepth must be_==(5)
config.sendNumConnections = 2
config.sendQueueDepth must be_==(10)
config.recvNumConnections = 3
config.recvQueueDepth must be_==(3)
config.recvQueueDepth = 7
config.recvQueueDepth must be_==(7)
config.recvNumConnections = 4
config.recvQueueDepth must be_==(28)
}
"support multiple queues" in {
val config = new Config()
config.addServer("host:1")
val queues = config.addQueues(List("q1", "q2"))
queues.size must be_==(2)
config.queues.size must be_==(2)
queues.get("q1") must beSome[ConfigQueue]
queues.get("q2") must beSome[ConfigQueue]
queues.get("q3") must beNone
config.addQueue("q3")
config.queues.size must be_==(3)
config.queues.get("q1") must beSome[ConfigQueue]
config.queues.get("q2") must beSome[ConfigQueue]
config.queues.get("q3") must beSome[ConfigQueue]
config.queues.get("q4") must beNone
}
"silently handle queue collisions" in {
val config = new Config()
config.addServer("host:1")
val queues = config.addQueues(List("q1", "q1"))
queues.size must be_==(1)
config.queues.get("q1") must beSome[ConfigQueue]
config.addQueue("q1")
queues.size must be_==(1)
config.queues.get("q1") must beSome[ConfigQueue]
config.addQueues(List("q1"))
queues.size must be_==(1)
config.queues.get("q1") must beSome[ConfigQueue]
}
"set transactional" in {
val config = new Config()
config.addServer("host:1")
val queues = config.addQueues(List("q1"))
config.recvTransactional = true
queues.size must be_==(1)
config.recvTransactional must be_==(true)
}
}
}
| twitter/grabby-hands | src/test/scala/com/twitter/grabbyhands/ConfigSpec.scala | Scala | apache-2.0 | 4,392 |
/*******************************************************************************
* Copyright (c) 2014 Guillaume DUBUISSON DUPLESSIS <guillaume.dubuisson_duplessis@insa-rouen.fr>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <guillaume.dubuisson_duplessis@insa-rouen.fr> - initial API and implementation
******************************************************************************/
package arithmetic
import org.scalatest.FunSuite
import arithmetic.Util._
class UtilTest extends FunSuite {
test("Taking 25 prime numbers should return the 25 first prime numbers") {
assert(primes.take(25).toList ==
List(2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97))
}
}
| GuillaumeDD/scala99problems | src/test/scala/arithmetic/UtilTest.scala | Scala | gpl-3.0 | 973 |
package scalax.collection.constrained
package constraints
import scala.annotation.unchecked.{uncheckedVariance => uV}
import scala.language.postfixOps
import scala.collection.Set
import scalax.collection.GraphPredef._
import scalax.collection.{Graph => SimpleGraph}
import scalax.collection.GraphTraversal.AnyConnected
import PreCheckFollowUp._
/** Ensures that the underlying `Graph` is connected if it is undirected
* or weakly connected if it is directed.
*/
class Connected[N, E <: EdgeLike[N], G <: Graph[N, E]](override val self: G) extends Constraint[N, E, G](self) {
/** Skips this pre-check to rely on the post-check `postAdd` except for trivial cases. */
override def preCreate(nodes: Iterable[N], edges: Iterable[E]) =
PreCheckResult(
if (
edges.isEmpty && nodes.size <= 1 ||
nodes.isEmpty && edges.size <= 1
) Complete
else PostCheck
)
/** `Complete` if `node` is contained even though no addition will be performed;
* otherwise `Abort` because `node` would become isolated.
*/
override def preAdd(node: N): PreCheckResult = PreCheckResult.complete(self contains node)
/** `Complete` if `edge` itself or at least one end of `edge` is already contained;
* otherwise `Abort`.
*/
override def preAdd(edge: E): PreCheckResult = PreCheckResult.complete(
(self contains edge.asInstanceOf[OuterEdge[N, E]]) ||
(edge exists (self contains _))
)
/** `Complete` if `elems` build a connected graph and at least one node of `elems`
* is already contained; otherwise `Abort`.
*/
override def preAdd(elems: InParam[N, E]*): PreCheckResult = PreCheckResult.complete {
val p = Param.Partitions(elems)
val graphAdd = SimpleGraph.from(p.toOuterNodes, p.toOuterEdges)
graphAdd.isConnected &&
(self.isEmpty ||
(graphAdd.nodes exists (self find _ isDefined)))
}
/** Check the whole `newGraph`. */
override def postAdd(
newGraph: Graph[N, E],
passedNodes: Iterable[N],
passedEdges: Iterable[E],
preCheck: PreCheckResult
) = newGraph.isConnected
override def postAdd(
newGraph: G @uV,
passedNodes: Iterable[N],
passedEdges: Iterable[E],
preCheck: PreCheckResult
): Either[PostCheckFailure, G] =
if (newGraph.isConnected) Right(newGraph)
else Left(PostCheckFailure(s"Unexpected isolated node found when adding $passedNodes, $passedEdges."))
/** Checks within any `preSubtract` whether the neighborhood of the elements
* to be subtracted remains connected after the subtraction thus preventing
* a full traversal of the graph.
*
* @param include nodes in the neighborhood of the nodes/edges to be subtracted.
* @param excludeNodes nodes to be subtracted.
* @param excludeEdges edges to be subtracted.
* @return `true`if all nodes in `include` are connected.
*/
protected def isConnected(
include: Set[self.NodeT],
excludeNodes: Set[self.NodeT],
excludeEdges: Set[self.EdgeT]
): Boolean =
include.headOption forall { head =>
val cnt = head
.withDirection(AnyConnected)
.withSubgraph(
nodes = n => (include contains n) && !(excludeNodes contains n),
edges = e => !(excludeEdges contains e)
)
.size
cnt == include.size
}
override def preSubtract(node: self.NodeT, forced: Boolean): PreCheckResult =
PreCheckResult.complete(isConnected(node.neighbors, Set(node), node.edges.toSet))
override def preSubtract(edge: self.EdgeT, simple: Boolean): PreCheckResult =
PreCheckResult.complete(
if (simple) isConnected(edge.ends.toSet, Set.empty, Set(edge))
else isConnected(edge.ends.toSet -- edge.privateNodes, edge.privateNodes, Set(edge))
)
override def preSubtract(nodes: => Set[self.NodeT], edges: => Set[self.EdgeT], simple: Boolean): PreCheckResult =
PreCheckResult.complete {
def neighbors(nodes: Set[self.NodeT]) =
(for (n <- nodes) yield n.neighbors).flatten
val nodesToInspect = nodes ++ (for {
e <- edges
n <- e
} yield n)
if (simple)
isConnected(neighbors(nodesToInspect) -- nodes, nodes, edges ++ (for (n <- nodes) yield n.edges).flatten)
else
isConnected(
neighbors(nodesToInspect) -- nodes,
nodes ++ (for (e <- edges) yield e.privateNodes).flatten,
edges ++ (for (n <- nodes) yield n.edges).flatten
)
}
}
object Connected extends ConstraintCompanion[Connected] {
def apply[N, E <: EdgeLike[N], G <: Graph[N, E]](self: G) = new Connected[N, E, G](self)
}
| scala-graph/scala-graph | constrained/src/main/scala/scalax/collection/constrained/constraints/Connected.scala | Scala | apache-2.0 | 4,631 |
package com.rasterfoundry.datamodel
import geotrellis.proj4.CRS
import geotrellis.raster.{CellType, GridExtent}
import geotrellis.vector.{MultiPolygon, Projected}
import io.circe._
import io.circe.generic.JsonCodec
import java.sql.Timestamp
import java.util.UUID
@JsonCodec
final case class SceneFilterFields(
cloudCover: Option[Float] = None,
acquisitionDate: Option[java.sql.Timestamp] = None,
sunAzimuth: Option[Float] = None,
sunElevation: Option[Float] = None
)
object SceneFilterFields {
def tupled = (SceneFilterFields.apply _).tupled
type TupleType =
(Option[Float], Option[java.sql.Timestamp], Option[Float], Option[Float])
}
@JsonCodec
final case class SceneStatusFields(
thumbnailStatus: JobStatus,
boundaryStatus: JobStatus,
ingestStatus: IngestStatus
)
object SceneStatusFields {
def tupled = (SceneStatusFields.apply _).tupled
type TupleType = (JobStatus, JobStatus, IngestStatus)
}
@JsonCodec
final case class SceneMetadataFields(
dataPath: Option[String] = None,
crs: Option[CRS] = None,
bandCount: Option[Int] = None,
cellType: Option[CellType] = None,
gridExtent: Option[GridExtent[Long]] = None,
resolutions: Option[List[GridExtent[Long]]] = None,
noDataValue: Option[Double] = None
)
@JsonCodec
final case class Scene(
id: UUID,
createdAt: java.sql.Timestamp,
createdBy: String,
modifiedAt: java.sql.Timestamp,
owner: String,
visibility: Visibility,
tags: List[String],
datasource: UUID,
sceneMetadata: Json,
name: String,
tileFootprint: Option[Projected[MultiPolygon]] = None,
dataFootprint: Option[Projected[MultiPolygon]] = None,
metadataFiles: List[String],
ingestLocation: Option[String] = None,
filterFields: SceneFilterFields = new SceneFilterFields(),
statusFields: SceneStatusFields,
sceneType: Option[SceneType] = None,
metadataFields: SceneMetadataFields = new SceneMetadataFields()
) {
def toScene: Scene = this
def withRelatedFromComponents(
images: List[Image.WithRelated],
thumbnails: List[Thumbnail],
datasource: Datasource
): Scene.WithRelated =
Scene.WithRelated(
this.id,
this.createdAt,
this.createdBy,
this.modifiedAt,
this.owner,
this.visibility,
this.tags,
datasource.toThin,
this.sceneMetadata,
this.name,
this.tileFootprint,
this.dataFootprint,
this.metadataFiles,
images,
thumbnails,
this.ingestLocation,
this.filterFields,
this.statusFields,
this.sceneType,
this.metadataFields
)
def browseFromComponents(
thumbnails: List[Thumbnail],
datasource: Datasource,
inProject: Option[Boolean],
inLayer: Option[Boolean]
): Scene.Browse = Scene.Browse(
this.id,
this.createdAt,
this.createdBy,
this.modifiedAt,
this.owner,
this.visibility,
this.tags,
datasource.toThin,
this.sceneMetadata,
this.name,
this.tileFootprint,
this.dataFootprint,
this.metadataFiles,
thumbnails,
this.ingestLocation,
this.filterFields,
this.statusFields,
this.sceneType,
inProject,
inLayer,
this.metadataFields
)
def projectSceneFromComponents(
thumbnails: List[Thumbnail],
datasource: Datasource,
sceneOrder: Option[Int]
): Scene.ProjectScene = Scene.ProjectScene(
this.id,
this.createdAt,
this.createdBy,
this.modifiedAt,
this.owner,
this.visibility,
this.tags,
datasource.toThin,
this.sceneMetadata,
this.name,
this.tileFootprint,
this.dataFootprint,
this.metadataFiles,
thumbnails.toList,
this.ingestLocation,
this.filterFields,
this.statusFields,
this.sceneType,
sceneOrder,
this.metadataFields
)
def bucketAndKey: Option[(String, String)] = ingestLocation map {
uriToBucketAndKey _
}
}
object Scene {
def cacheKey(id: UUID) = s"Scene:$id"
/** Case class extracted from a POST request */
@JsonCodec
final case class Create(
id: Option[UUID],
visibility: Visibility,
tags: List[String],
datasource: UUID,
sceneMetadata: Json,
name: String,
owner: Option[String],
tileFootprint: Option[Projected[MultiPolygon]],
dataFootprint: Option[Projected[MultiPolygon]],
metadataFiles: List[String],
images: List[Image.Banded],
thumbnails: List[Thumbnail.Identified],
ingestLocation: Option[String],
filterFields: SceneFilterFields = new SceneFilterFields(),
statusFields: SceneStatusFields,
sceneType: Option[SceneType] = None,
metadataFields: Option[SceneMetadataFields] = None
) extends OwnerCheck {
def toScene(user: User): Scene = {
val now = new Timestamp(new java.util.Date().getTime)
val ownerId = checkOwner(user, this.owner)
Scene(
id.getOrElse(UUID.randomUUID),
now, // createdAt
user.id, // createdBy
now, // modifiedAt
ownerId, // owner
visibility,
tags,
datasource,
sceneMetadata,
name,
tileFootprint,
dataFootprint,
metadataFiles,
ingestLocation,
filterFields,
statusFields,
sceneType,
metadataFields.getOrElse(new SceneMetadataFields)
)
}
}
@JsonCodec
final case class WithRelated(
id: UUID,
createdAt: Timestamp,
createdBy: String,
modifiedAt: Timestamp,
owner: String,
visibility: Visibility,
tags: List[String],
datasource: Datasource.Thin,
sceneMetadata: Json,
name: String,
tileFootprint: Option[Projected[MultiPolygon]],
dataFootprint: Option[Projected[MultiPolygon]],
metadataFiles: List[String],
images: List[Image.WithRelated],
thumbnails: List[Thumbnail],
ingestLocation: Option[String],
filterFields: SceneFilterFields = new SceneFilterFields(),
statusFields: SceneStatusFields,
sceneType: Option[SceneType] = None,
metadataFields: SceneMetadataFields = new SceneMetadataFields()
) {
def toScene: Scene =
Scene(
id,
createdAt,
createdBy,
modifiedAt,
owner,
visibility,
tags,
datasource.id,
sceneMetadata,
name,
tileFootprint,
dataFootprint,
metadataFiles,
ingestLocation,
filterFields,
statusFields,
sceneType,
metadataFields
)
}
@JsonCodec
final case class Browse(
id: UUID,
createdAt: Timestamp,
createdBy: String,
modifiedAt: Timestamp,
owner: String,
visibility: Visibility,
tags: List[String],
datasource: Datasource.Thin,
sceneMetadata: Json,
name: String,
tileFootprint: Option[Projected[MultiPolygon]],
dataFootprint: Option[Projected[MultiPolygon]],
metadataFiles: List[String],
thumbnails: List[Thumbnail],
ingestLocation: Option[String],
filterFields: SceneFilterFields = new SceneFilterFields(),
statusFields: SceneStatusFields,
sceneType: Option[SceneType] = None,
inProject: Option[Boolean] = None,
inLayer: Option[Boolean] = None,
metadataFields: SceneMetadataFields = new SceneMetadataFields()
) {
def toScene: Scene =
Scene(
id,
createdAt,
createdBy,
modifiedAt,
owner,
visibility,
tags,
datasource.id,
sceneMetadata,
name,
tileFootprint,
dataFootprint,
metadataFiles,
ingestLocation,
filterFields,
statusFields,
sceneType,
metadataFields
)
}
@JsonCodec
final case class ProjectScene(
id: UUID,
createdAt: Timestamp,
createdBy: String,
modifiedAt: Timestamp,
owner: String,
visibility: Visibility,
tags: List[String],
datasource: Datasource.Thin,
sceneMetadata: Json,
name: String,
tileFootprint: Option[Projected[MultiPolygon]],
dataFootprint: Option[Projected[MultiPolygon]],
metadataFiles: List[String],
thumbnails: List[Thumbnail],
ingestLocation: Option[String],
filterFields: SceneFilterFields = new SceneFilterFields(),
statusFields: SceneStatusFields,
sceneType: Option[SceneType] = None,
sceneOrder: Option[Int],
metadataFields: SceneMetadataFields = new SceneMetadataFields()
)
}
| raster-foundry/raster-foundry | app-backend/datamodel/src/main/scala/Scene.scala | Scala | apache-2.0 | 8,579 |
package reswing
import scala.swing.{Color, Component, Dimension, Font, Graphics2D}
import scala.swing.event._
abstract class ReComponent(
val background: ReSwingValue[Color] = (),
val foreground: ReSwingValue[Color] = (),
val font: ReSwingValue[Font] = (),
val enabled: ReSwingValue[Boolean] = (),
minimumSize: ReSwingValue[Dimension] = (),
maximumSize: ReSwingValue[Dimension] = (),
preferredSize: ReSwingValue[Dimension] = ()
) extends ReUIElement(minimumSize, maximumSize, preferredSize) {
override protected lazy val peer = new Component with ComponentMixin
val hasFocus = ReSwingValue.using({ () => peer.hasFocus }, classOf[FocusGained], classOf[FocusLost])
background.using({ () => peer.background }, peer.background_= _, "background")
foreground.using({ () => peer.foreground }, peer.foreground_= _, "foreground")
font.using({ () => peer.font }, peer.font_= _, "font")
enabled.using({ () => peer.enabled }, peer.enabled_= _, "enabled")
object mouse {
object clicks {
val clicked = ReSwingEvent.using(peer.mouse.clicks, classOf[MouseClicked])
val pressed = ReSwingEvent.using(peer.mouse.clicks, classOf[MousePressed])
val released = ReSwingEvent.using(peer.mouse.clicks, classOf[MouseReleased])
}
object moves {
val dragged = ReSwingEvent.using(peer.mouse.moves, classOf[MouseDragged])
val entered = ReSwingEvent.using(peer.mouse.moves, classOf[MouseEntered])
val exited = ReSwingEvent.using(peer.mouse.moves, classOf[MouseExited])
val moved = ReSwingEvent.using(peer.mouse.moves, classOf[MouseMoved])
}
object wheel {
val moved = ReSwingEvent.using(peer.mouse.wheel, classOf[MouseWheelMoved])
}
}
object keys {
val pressed = ReSwingEvent.using(peer.keys, classOf[KeyPressed])
val released = ReSwingEvent.using(peer.keys, classOf[KeyReleased])
val typed = ReSwingEvent.using(peer.keys, classOf[KeyTyped])
}
protected trait ComponentMixin extends Component {
override def paintComponent(g: Graphics2D) = ReComponent.this.paintComponent(g)
def __super__paintComponent(g: Graphics2D) = super.paintComponent(g)
override def paintBorder(g: Graphics2D) = ReComponent.this.paintBorder(g)
def __super__paintBorder(g: Graphics2D) = super.paintBorder(g)
override def paintChildren(g: Graphics2D) = ReComponent.this.paintChildren(g)
def __super__paintChildren(g: Graphics2D) = super.paintChildren(g)
override def paint(g: Graphics2D) = {
ReComponent.this.location() = location
ReComponent.this.bounds() = bounds
ReComponent.this.size() = size
ReComponent.this.paint(g)
}
def __super__paint(g: Graphics2D) = super.paint(g)
}
protected def paintComponent(g: Graphics2D) = peer.__super__paintComponent(g)
protected def paintBorder(g: Graphics2D) = peer.__super__paintBorder(g)
protected def paintChildren(g: Graphics2D) = peer.__super__paintChildren(g)
def paint(g: Graphics2D) = peer.__super__paint(g)
}
object ReComponent {
implicit def toComponent(component: ReComponent): Component = component.peer
}
| guidosalva/REScala | Code/Extensions/RESwing/src/main/scala/reswing/ReComponent.scala | Scala | apache-2.0 | 3,155 |
package spray.testkit
import org.specs2.execute.{ Failure, FailureException }
import org.specs2.specification.core.{ Fragments, SpecificationStructure }
import org.specs2.specification.create.DefaultFragmentFactory
trait Specs2Interface extends TestFrameworkInterface with SpecificationStructure {
def failTest(msg: String) = {
val trace = new Exception().getStackTrace.toList
val fixedTrace = trace.drop(trace.indexWhere(_.getClassName.startsWith("org.specs2")) - 1)
throw new FailureException(Failure(msg, stackTrace = fixedTrace))
}
override def map(fs: β Fragments) = super.map(fs).append(DefaultFragmentFactory.step(cleanUp()))
}
trait NoAutoHtmlLinkFragments extends org.specs2.specification.dsl.ReferenceDsl {
override def linkFragment(alias: String) = super.linkFragment(alias)
override def seeFragment(alias: String) = super.seeFragment(alias)
} | victorfranz/img-upload | src/test/scala/spray/testkit/Specs2Interface.scala | Scala | mit | 883 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack.Continue
import monix.execution.exceptions.DummyException
import monix.reactive.{Observable, Observer}
import monix.eval.Task
import scala.concurrent.Future
import scala.concurrent.duration._
object DelayExecutionWithSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val trigger = Task.now(1).delayExecution(1.second)
val o = Observable.range(0, sourceCount).delayExecutionWithF(trigger)
Sample(o, count(sourceCount), sum(sourceCount), waitFirst, waitNext)
}
def count(sourceCount: Int) = sourceCount
def sum(sourceCount: Int) = sourceCount * (sourceCount - 1) / 2
def waitFirst = 1.second
def waitNext = Duration.Zero
def observableInError(sourceCount: Int, ex: Throwable) = None
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
test("it delays") { implicit s =>
val obs = Observable.now(1).delayExecution(1.second)
var wasCompleted = false
var received = 0
obs.unsafeSubscribeFn(new Observer[Int] {
def onError(ex: Throwable) = ()
def onComplete() = wasCompleted = true
def onNext(elem: Int) = {
received += elem
Continue
}
})
s.tick()
assertEquals(received, 0)
s.tick(1.second)
assertEquals(received, 1)
assert(wasCompleted)
}
test("delayExecution.onFuture triggering an error") { implicit s =>
val obs = Observable.now(1).delayExecutionWithF(Future { throw DummyException("dummy") })
var errorThrown: Throwable = null
obs.unsafeSubscribeFn(new Observer[Int] {
def onError(ex: Throwable) =
errorThrown = ex
def onComplete() = ()
def onNext(elem: Int) = Continue
})
assertEquals(errorThrown, null)
s.tick()
assertEquals(errorThrown, DummyException("dummy"))
}
def cancelableObservables() = {
val obs = Observable.now(1L).delayExecutionWithF(Task.now(1).delayExecution(1.second))
Seq(Sample(obs, 0, 0, 0.seconds, 0.seconds))
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DelayExecutionWithSuite.scala | Scala | apache-2.0 | 2,723 |
/* ___ _ ___ _ _ *\
** / __| |/ (_) | | The SKilL Generator **
** \__ \ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\_\_|_|____| see LICENSE **
\* */
package de.ust.skill.generator.c.io
import scala.collection.JavaConversions._
import java.io.PrintWriter
import de.ust.skill.generator.c.GeneralOutputMaker
import de.ust.skill.ir.UserType
/**
* @author Fabian Harth, Timm Felden
* @todo rename skill state to skill file
* @todo ensure 80 characters margin
*/
trait ReaderSourceMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = files.open(s"io/${prefix}reader.c")
val prefixCapital = packagePrefix.toUpperCase
out.write(s"""
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <inttypes.h>
#include <glib.h>
#include "../io/${prefix}reader.h"
#include "../io/${prefix}binary_reader.h"
#include "../model/${prefix}string_access.h"
#include "../model/${prefix}storage_pool.h"
#include "../model/${prefix}skill_state.h"
#include "../model/${prefix}field_information.h"
#include "../model/${prefix}type_information.h"
#include "../model/${prefix}types.h"
// This reads all strings of the string block starting at the position of the buffer
// and advances the buffer to the end of the string block.
static void read_string_block ( ${prefix}string_access strings, char **buffer ) {
int64_t number_of_strings = ${prefix}read_v64 ( buffer );
// Store the offsets for all strings contained in the block
int32_t offsets[number_of_strings];
int64_t i;
for ( i = 0; i < number_of_strings; i++ ) {
offsets[i] = ${prefix}read_i32 ( buffer );
}
int64_t previous_offset = 0;
for ( i = 0; i < number_of_strings; i++ ) {
char *new_string = ${prefix}read_string ( buffer, offsets[i] - previous_offset );
${prefix}string_access_add_string ( strings, new_string );
previous_offset = offsets[i];
}
}
// Holds information for one type inside of one type block
typedef struct read_information {
char *type_name;
char *super_type_name;
GList *field_info; // GList of field_read_information. This also defines the order, in which the fields have to be read.
int64_t number_of_instances;
int64_t lbpsi; // Set to 0, if this has no super-type
GList *subtype_order; // GList of read_information. Unknown subtypes will cause an error, as their size is not known,
// thus they cannot be initialized
} read_information;
typedef struct field_read_information {
bool is_constant; // True, if this field is a constant value. In this case, we must not read field data for it.
char *field_name;
int64_t offset;
int64_t pool_index; // If this field holds a reference to a user defined type, this stores the index of its storage pool.
// This is stored so that it can be checked later, whether the referenced type is correct, which can
// not be done immediately, as the referenced type may not be known yet.
} field_read_information;
// This function checks, that the type information for a field in the binary file matches the expected type info for that field.
static void validate_field_type ( ${prefix}type_information target_type_info, ${prefix}type_information read_type_info, char *field_name ) {
if ( target_type_info->type != read_type_info->type ) {
printf ( "Error: expected type %s of the type named %s, but found type %s",
${prefix}type_enum_to_string ( target_type_info->type ), target_type_info->name, ${prefix}type_enum_to_string ( read_type_info->type ) );
exit ( EXIT_FAILURE );
}
if ( ${prefix}type_enum_is_constant ( target_type_info->type ) ) {
// Check the constant value...
if ( target_type_info->constant_value != read_type_info->constant_value ) {
printf ( "Error: expected constant value %" PRId64 " of the type named %s, but found %" PRId64 ".\\n",
target_type_info->constant_value, target_type_info->name, read_type_info->constant_value );
exit ( EXIT_FAILURE );
}
}
if ( ${prefix}type_enum_is_container_type ( target_type_info->type ) ) {
if ( target_type_info->type == ${prefix}MAP ) {
// First, check that the number of base types are equal
if ( g_list_length ( target_type_info->base_types ) != g_list_length ( read_type_info->base_types ) ) {
int64_t target_base_types_length = g_list_length ( target_type_info->base_types );
int64_t read_base_types_length = g_list_length ( read_type_info->base_types );
printf ( "Error: the map named %s of type %s should have %" PRId64 " base types, but found %" PRId64 " base types.\\n",
field_name, target_type_info->name, target_base_types_length, read_base_types_length );
exit ( EXIT_FAILURE );
}
// compare the type of each base type of the map
int64_t i;
${prefix}type_enum first;
${prefix}type_enum second;
for ( i = 0; i < g_list_length ( target_type_info->base_types ); i++ ) {
first = *( (${prefix}type_enum*) g_list_nth_data ( target_type_info->base_types, i ) );
second = *( (${prefix}type_enum*) g_list_nth_data ( read_type_info->base_types, i ) );
if ( first != second ) {
printf ( "Error: The base types of the map do not match. Field name %s, type name %s.\\n",
field_name, target_type_info->name );
exit ( EXIT_FAILURE );
}
}
} else {
if ( target_type_info->element_type->type != read_type_info->element_type->type ) {
printf ( "Expected %s as base type, but found %s: field name %s, type name %s.\\n",
${prefix}type_enum_to_string ( target_type_info->element_type->type ), ${prefix}type_enum_to_string ( read_type_info->element_type->type ),
field_name, target_type_info->name );
exit ( EXIT_FAILURE );
}
if ( target_type_info->type == ${prefix}CONSTANT_LENGTH_ARRAY ) {
if ( target_type_info->array_length != read_type_info->array_length ) {
printf ( "Error: Expected array length %" PRId64 ", but found length %" PRId64 ". Field name %s of type %s.\\n",
target_type_info->array_length, read_type_info->array_length, field_name, target_type_info->name );
exit ( EXIT_FAILURE );
}
}
}
}
}
static field_read_information *read_field_info ( ${prefix}skill_state state, ${prefix}string_access strings, char *type_name, char **buffer ) {
field_read_information *result = malloc ( sizeof ( field_read_information ) );
result->field_name = 0;
result->offset = 0;
result->pool_index = -1;
result->is_constant = false;
// The type information comes before the field name in the binary file.
// Therefore it is stored in a local variable so that it can be compared to the known information later
// if the type is known.
${prefix}type_information local_type_info = ${prefix}type_information_new ();
int64_t field_restrictions = ${prefix}read_v64 ( buffer );
if ( field_restrictions != 0 ) {
// TODO
printf ( "Error. Field restrictions not yet implemented.\\n" );
exit ( EXIT_FAILURE );
}
int8_t type_index = ${prefix}read_i8 ( buffer );
${prefix}type_enum type = ${prefix}type_enum_from_int ( type_index );
local_type_info->type = type;
if ( ${prefix}type_enum_is_constant ( type ) ) {
result->is_constant = true;
if ( type == ${prefix}CONSTANT_I8 ) {
local_type_info->constant_value = ${prefix}read_i8 ( buffer );
} else if ( type == ${prefix}CONSTANT_I16 ) {
local_type_info->constant_value = ${prefix}read_i16 ( buffer );
} else if ( type == ${prefix}CONSTANT_I32 ) {
local_type_info->constant_value = ${prefix}read_i32 ( buffer );
} else if ( type == ${prefix}CONSTANT_I64 ) {
local_type_info->constant_value = ${prefix}read_i64 ( buffer );
} else if ( type == ${prefix}CONSTANT_V64 ) {
local_type_info->constant_value = ${prefix}read_v64 ( buffer );
}
}
if ( type == ${prefix}USER_TYPE ) {
int64_t storage_pool_index = type_index - 32;
result->pool_index = storage_pool_index;
}
if ( ${prefix}type_enum_is_container_type ( type ) ) {
local_type_info->element_type = ${prefix}type_information_new ();
if ( type == ${prefix}CONSTANT_LENGTH_ARRAY ) {
int64_t array_length = ${prefix}read_v64 ( buffer );
local_type_info->array_length = array_length;
int64_t element_type_index = ${prefix}read_i8 ( buffer );
if ( ${prefix}type_enum_from_int ( element_type_index ) == ${prefix}USER_TYPE ) {
local_type_info->element_type->type = ${prefix}USER_TYPE;
} else {
local_type_info->element_type->type = ${prefix}type_enum_from_int ( element_type_index );
}
} else if ( type == ${prefix}MAP ) {
int64_t number_of_types = ${prefix}read_v64 ( buffer );
int64_t i;
${prefix}type_information current_type;
for ( i = 0; i < number_of_types; i++ ) {
current_type = ${prefix}type_information_new ();
current_type->type = ${prefix}type_enum_from_int ( ${prefix}read_i8 ( buffer ) );
local_type_info->base_types = g_list_append ( local_type_info->base_types, current_type );
}
} else {
int64_t element_type_index = ${prefix}read_i8 ( buffer );
if ( ${prefix}type_enum_from_int ( element_type_index ) == ${prefix}USER_TYPE ) {
local_type_info->element_type->type = ${prefix}USER_TYPE;
} else {
local_type_info->element_type->type = ${prefix}type_enum_from_int ( element_type_index );
}
}
}
int64_t field_name_index = ${prefix}read_v64 ( buffer );
char *field_name = ${prefix}string_access_get_string_by_id ( strings, field_name_index );
if ( !field_name ) {
// TODO
printf ( "Error: didn't find string with index %" PRId64 ".\\n", field_name_index );
exit ( EXIT_FAILURE );
}
result->field_name = field_name;
${prefix}storage_pool pool = (${prefix}storage_pool) g_hash_table_lookup ( state->pools, type_name );
if ( pool ) {
// This is a known type
${prefix}field_information field_info = (${prefix}field_information) g_hash_table_lookup ( pool->declaration->fields, field_name );
if ( field_info ) {
// This is a known field, so check, that the previously read type-information matches the known information of that field
validate_field_type ( field_info->type_info, local_type_info, field_info->name );
}
}
if ( !${prefix}type_enum_is_constant ( type ) ) {
int64_t offset = ${prefix}read_v64 ( buffer );
result->offset = offset;
}
${prefix}type_information_destroy ( local_type_info );
return result;
}
// Reads type information of one instantiated type and returns the read_information describing that type
static read_information *read_single_type_info ( ${prefix}skill_state state, ${prefix}string_access strings, int64_t *pool_id, GHashTable *seen_types, char **buffer ) {
bool known_type = false; // will be set to true, if this binding knows the type.
bool seen_type = false; // will be set to true, if there already was a storage pool with that type in the binary file.
read_information *result;
result = malloc ( sizeof ( read_information ) );
result->type_name = 0;
result->super_type_name = 0;
result->field_info = 0;
result->number_of_instances = 0;
result->subtype_order = 0;
result->lbpsi = 0;
// Read the name
int64_t type_name_index = ${prefix}read_v64 ( buffer );
char *type_name = ${prefix}string_access_get_string_by_id ( strings, type_name_index );
if ( type_name == 0 ) {
printf ( "Error: string with id '%" PRId64 "' not found.", type_name_index );
exit ( EXIT_FAILURE );
}
result->type_name = type_name;
if ( g_hash_table_contains ( seen_types, type_name ) ) {
seen_type = true;
}
${prefix}storage_pool pool = (${prefix}storage_pool) g_hash_table_lookup ( state->pools, type_name );
if ( pool ) {
known_type = true;
pool->declared_in_file = true;
if ( !seen_type ) {
pool->id = *pool_id;
}
} else {
known_type = false;
}
if ( !seen_type ) {
(*pool_id)++;
}
// If this type already appeared in a previous type block, the super type field is not present.
char *super_type_name = 0;
if ( seen_type ) {
super_type_name = ( (read_information*) g_hash_table_lookup ( seen_types, type_name ) )->super_type_name;
} else {
int64_t super_type_string_index = ${prefix}read_v64 ( buffer );
super_type_name = ${prefix}string_access_get_string_by_id ( strings, super_type_string_index );
}
if ( known_type && !seen_type ) {
${prefix}type_declaration declaration = pool->declaration;
// Check that the right super type is referenced
if ( super_type_name == 0 ) {
if ( result->super_type_name != 0 ) {
printf ( "Error: type %s defines no super type, but should be: %s.\\n", type_name, declaration->super_type->name );
exit ( EXIT_FAILURE );
}
} else {
if ( !declaration->super_type ) {
// TODO
printf ( "Binary file defines super type %s of type %s, but should be none.\\n", super_type_name, type_name );
exit ( EXIT_FAILURE );
}
if ( !( strcmp ( super_type_name, declaration->super_type->name ) == 0 ) ) {
// TODO
printf ( "Expected super-type '%s', but was '%s'.\\n", declaration->super_type->name, super_type_name );
exit ( EXIT_FAILURE );
}
}
}
if ( known_type && super_type_name ) {
// Type information about the super type should already be present.
${prefix}storage_pool super_pool = pool->super_pool;
read_information *super_read_info = (read_information*) g_hash_table_lookup ( seen_types, super_pool->declaration->name );
if ( super_read_info == 0 ) {
printf ( "Error. super type '%s' has to be declared before the supbtype '%s'.\\n", super_pool->declaration->name, pool->declaration->name );
exit ( EXIT_FAILURE );
}
super_read_info->subtype_order = g_list_append ( super_read_info->subtype_order, result );
}
if ( super_type_name ) {
result->super_type_name = super_type_name;
// The lbpsi field is only present, if there is a super type.
int64_t lbpsi = ${prefix}read_v64 ( buffer );
result->lbpsi = lbpsi;
result->super_type_name = super_type_name;
}
int64_t number_of_instances = ${prefix}read_v64 ( buffer );
result->number_of_instances = number_of_instances;
// If this type already appeared in a previous type block, the restrictions are not read again.
if ( !seen_type ) {
int64_t restrictions = ${prefix}read_v64 ( buffer );
if ( restrictions != 0 ) {
// TODO
printf ( "Restrictions not yet implemented.\\n" );
exit ( EXIT_FAILURE );
}
}
int64_t number_of_fields = ${prefix}read_v64 ( buffer );
// Check, how many fields of this type have already been declared in previous type blocks.
// Those fields don't declare their type, name, and restrictions again.
int64_t number_of_known_fields = 0;
read_information *previous_read_info = (read_information*) g_hash_table_lookup ( seen_types, type_name );
if ( previous_read_info ) {
number_of_known_fields = g_list_length ( previous_read_info->field_info );
}
int64_t i = 0;
for ( i = 0; i < number_of_known_fields; i++ ) {
field_read_information *pool_field_info = (field_read_information*) g_list_nth_data ( previous_read_info->field_info, i );
field_read_information *new_field_info = malloc ( sizeof ( field_read_information ) );
new_field_info->field_name = pool_field_info->field_name;
new_field_info->pool_index = pool_field_info->pool_index;
if ( number_of_instances > 0 ) {
// The offset for previously declared fields is only present, if new instances are added
new_field_info->offset = ${prefix}read_v64 ( buffer );
} else {
new_field_info->offset = 0;
}
result->field_info = g_list_append ( result->field_info, new_field_info );
}
if ( number_of_instances == 0 ) {
// If there are no new instances, we haven't actually read fields, only taken field information from previous blocks.
i = 0;
}
while ( i < number_of_fields ) {
// This is a field, which is declared in this block and has not beed declared before.
result->field_info = g_list_append ( result->field_info, read_field_info ( state, strings, type_name, buffer ) );
i++;
}
g_hash_table_insert ( seen_types, type_name, result );
return result;
}
// This reads the type information, but not the actual instances.
// It returns a list of read_information in the order in which they have to be read
static GList *read_type_information ( ${prefix}skill_state state, ${prefix}string_access strings, int64_t *pool_id, GHashTable *seen_types, char **buffer ) { // returns a GList of read_information
GList *result = 0;
int64_t number_of_instantiated_types = ${prefix}read_v64 ( buffer );
int64_t i;
for ( i = 0; i < number_of_instantiated_types; i++ ) {
result = g_list_append ( result, read_single_type_info ( state, strings, pool_id, seen_types, buffer ) );
}
return result;
}
// The SKilL specification requires fields to be in the same order as they appeared in previous type blocks.
// Thus, check for each previously declared field, that it appears in the current fields at the same position.
static void validate_field_order ( ${prefix}skill_state state, read_information *read_information ) {
${prefix}storage_pool pool = (${prefix}storage_pool) g_hash_table_lookup ( state->pools, read_information->type_name );
if ( !pool ) {
// This is an unknown type. Its fields are ignored anyway.
return;
}
GList *previous_fields = pool->fields;
GList *new_field_iter = read_information->field_info;
${prefix}field_information previous_field;
char *current_field_name;
while ( previous_fields ) {
previous_field = (${prefix}field_information) previous_fields->data;
if ( !new_field_iter ) {
// There are more previously defined fields than the current type block defines.
// TODO
printf ( "Field '%s' not found. It was declared in a previous block.\\n", previous_field->name );
exit ( EXIT_FAILURE );
}
current_field_name = ( (field_read_information*) new_field_iter->data )->field_name;
if ( !( strcmp ( previous_field->name, current_field_name ) == 0 ) ) {
printf ( "Error: expected field named '%s', which was defined in a previous block but found field named '%s'.\\n",
previous_field->name, current_field_name );
exit ( EXIT_FAILURE );
}
previous_fields = previous_fields->next;
new_field_iter = new_field_iter->next;
}
}
static void create_sub_pool_instances ( ${prefix}skill_state state, GArray *instances, read_information *read_info, GHashTable *seen_types, int64_t *index ) {
int64_t old_index = *index; // This will be required later.
${prefix}storage_pool pool = g_hash_table_lookup ( state->pools, read_info->type_name );
int64_t number_of_new_instances = read_info->number_of_instances;
int64_t number_of_old_instances = pool->instances->len;
int64_t number_of_sub_instances = 0;
GList *iterator;
read_information *subtype_read_info;
${prefix}storage_pool subtype_pool;
for ( iterator = read_info->subtype_order; iterator; iterator = iterator->next ) {
subtype_read_info = (read_information*) iterator->data;
number_of_sub_instances += subtype_read_info->number_of_instances;
}
if ( *index + number_of_new_instances > instances->len ) {
printf ( "Error: nuber of instances of subtype is not correct: type %s.\\n", pool->declaration->name );
exit ( EXIT_FAILURE );
}
int64_t i;
for ( i = 0; i < number_of_new_instances - number_of_sub_instances; i++ ) {
// Those are new instances of this exact type
g_array_index ( instances, ${prefix}skill_type, *index ) = calloc ( 1, pool->declaration->size );
g_array_index ( instances, ${prefix}skill_type, *index )->skill_id = 1;
g_array_index ( instances, ${prefix}skill_type, *index )->declaration = pool->declaration;
g_array_index ( instances, ${prefix}skill_type, *index )->state = state;
(*index)++;
}
// Now allocate memory for sub-types
for ( iterator = read_info->subtype_order; iterator; iterator = iterator->next ) {
create_sub_pool_instances ( state, instances, (read_information*) iterator->data, seen_types, index );
}
// At this point, all instances of this type (including subtypes) already have memory allocated
// and are referenced in the base-pool's instance array. Thus now, we can set the references of this pool's instance array.
g_array_set_size ( pool->instances, number_of_new_instances + number_of_old_instances );
for ( i = 0; i < number_of_new_instances; i++ ) {
g_array_index ( pool->instances, ${prefix}skill_type, number_of_old_instances + i ) = g_array_index ( instances, ${prefix}skill_type, old_index + i );
}
}
// This allocates memory for new instances.
// This has to be done for all types before reading any field data so that references to user defined types
// Can already be set to the correct target.
static void create_new_instances ( ${prefix}skill_state state, read_information *read_info, GHashTable *seen_types ) {
${prefix}storage_pool pool = (${prefix}storage_pool) g_hash_table_lookup ( state->pools, read_info->type_name );
if ( !pool ) {
// This is an unknown type. No instances of that type will be created.
return;
}
// Some of the new instances may be instances of subtypes. Therefore collect the number of new instances of sub-pools
int64_t number_of_sub_instances = 0;
GList *iterator;
read_information *subtype_read_info;
for ( iterator = read_info->subtype_order; iterator; iterator = iterator->next ) {
subtype_read_info = (read_information*) iterator->data;
number_of_sub_instances += subtype_read_info->number_of_instances;
}
int64_t number_of_old_instances = pool->instances->len;
int64_t number_of_new_instances = read_info->number_of_instances;
// create new instances
int64_t index;
g_array_set_size ( pool->instances, number_of_old_instances + number_of_new_instances );
// Allocating memory for new instances needs to use the size of the actual type (may be a subtype).
// So first, only the new instances of this exact type are allocated.
for ( index = number_of_old_instances; index < number_of_old_instances + number_of_new_instances - number_of_sub_instances; index++ ) {
g_array_index ( pool->instances, ${prefix}skill_type, index ) = calloc ( 1, pool->declaration->size );
g_array_index ( pool->instances, ${prefix}skill_type, index )->skill_id = 1;
g_array_index ( pool->instances, ${prefix}skill_type, index )->declaration = pool->declaration;
g_array_index ( pool->instances, ${prefix}skill_type, index )->state = state;
}
// index is now the position of the first instance of a sub-type.
// Now allocate memory for instances of sub-types.
// The order is given by read_info->subtype_order
for ( iterator = read_info->subtype_order; iterator; iterator = iterator->next ) {
create_sub_pool_instances ( state, pool->instances, (read_information*) iterator->data, seen_types, &index );
}
}
// Reads all instances of the type specified by the given read_information
static void read_field_data ( ${prefix}skill_state state, ${prefix}string_access strings, read_information *read_information, int64_t *last_offset, char **buffer ) {
${prefix}field_information field_info;
${prefix}storage_pool pool = (${prefix}storage_pool) g_hash_table_lookup ( state->pools, read_information->type_name );
if ( !pool ) {
// This is an unknown type, its field data will simply be skipped.
GList *iterator;
for ( iterator = read_information->field_info; iterator; iterator = iterator->next ) {
field_read_information *field_read_info = (field_read_information*) ( iterator->data );
if ( !field_read_info->is_constant ) {
(*buffer) += field_read_info->offset - *last_offset;
*last_offset = field_read_info->offset;
}
}
return;
}
// the fields have already been validated, so at this point, it is guaranteed, that the fields already declared
// in previous blocks, appear in this block in the same order and before new fields.
// Set the new_fields pointer to the first field, that hasn't been declared previously.
GList *new_fields = g_list_nth ( read_information->field_info, g_list_length ( pool->fields ) );
int64_t i;
int64_t number_of_new_instances = read_information->number_of_instances;
// To read field data, those steps need to be done:
// 1. If this block adds new instances, read data for those instances of fields,
// that have already been declared in the previous block
// 2. read new fields for all instances of that type. At this point, no new instances have to be created.
// Store the last read offset, so that field data can be skipped
// 1. read previously declared fields
field_read_information *pool_field_info;
for ( i = 0; i < g_list_length ( pool->fields ); i++ ) {
pool_field_info = (field_read_information*) g_list_nth_data ( read_information->field_info, i );
if ( !pool_field_info->is_constant ) {
field_info = (${prefix}field_information) g_hash_table_lookup ( pool->declaration->fields, pool_field_info->field_name );
if ( !field_info ) { // This is an unknown field, thus the field data can be skipped
(*buffer) += pool_field_info->offset - *last_offset;
*last_offset = pool_field_info->offset;
} else {
// This is the index of the first instance for which field data has to be set in the instance array of the storage pool
int64_t start = pool->instances->len - number_of_new_instances;
// This is the last index + 1 (so that it can be used in a for-loop)
int64_t end = pool->instances->len;
int64_t j;
for ( j = start; j < end; j++ ) {
field_info->read ( state, strings, g_array_index ( pool->instances, ${prefix}skill_type, j ), buffer );
}
*last_offset = pool_field_info->offset;
}
}
}
// 2. read new fields
GList *field_info_iter;
field_read_information *field_read_info;
for ( field_info_iter = new_fields; field_info_iter; field_info_iter = field_info_iter->next ) {
field_read_info = (field_read_information*) field_info_iter->data;
if ( !field_read_info->is_constant ) {
field_info = (${prefix}field_information) g_hash_table_lookup ( pool->declaration->fields, field_read_info->field_name );
if ( !field_info ) { // This is an unknown field, thus the field data can be skipped
(*buffer) += field_read_info->offset - *last_offset;
*last_offset = field_read_info->offset;
} else {
// This is field data of new fields, thus they have to be read for all existing instances of that type.
for ( i = 0; i < pool->instances->len; i++ ) {
field_info->read ( state, strings, g_array_index ( pool->instances, ${prefix}skill_type, i ), buffer );
}
*last_offset = field_read_info->offset;
}
}
}
// Now update the field-list of the storage_pool so that it contains the new fields as well.
for ( field_info_iter = new_fields; field_info_iter; field_info_iter = field_info_iter->next ) {
field_info = (${prefix}field_information) g_hash_table_lookup ( pool->declaration->fields, ( (field_read_information*) field_info_iter->data )->field_name );
// for an unknown type, the field_info will be null. In that case, just ignore it.
if ( field_info ) {
pool->fields = g_list_append ( pool->fields, field_info );
}
}
}
// This reads type information and all instances contained in one type block.
static void read_type_block ( ${prefix}skill_state state, ${prefix}string_access strings, char **buffer, int64_t *pool_id, GHashTable *seen_types ) {
GList *read_information_list = read_type_information ( state, strings, pool_id, seen_types, buffer );
GList *iterator;
for ( iterator = read_information_list; iterator; iterator = iterator->next ) {
validate_field_order ( state, (read_information*) iterator->data );
}
// Creating new instances needs to respect instances of sub-pools, thus it is only called on storage pools of base types.
for ( iterator = read_information_list; iterator; iterator = iterator->next ) {
if ( ( (read_information*) iterator->data )->super_type_name == 0 ) {
create_new_instances ( state, (read_information*) iterator->data, seen_types );
}
}
// Keep the last offset value in a variable, so that we can determine how many bytes to skip for unknown fields
int64_t last_offset = 0;
for ( iterator = read_information_list; iterator; iterator = iterator->next ) {
read_field_data ( state, strings, (read_information*) iterator->data, &last_offset, buffer );
}
}
// Reads the binary file at the given location and serializes all instances into this skill state.
void ${prefix}read_file ( ${prefix}skill_state state, char *filename ) {
FILE *file;
char *file_contents;
char *read_head;
int64_t file_length;
file = fopen ( filename, "rb");
if ( !file )
{
fprintf ( stderr, "Unable to open file %s\\n", filename );
exit ( EXIT_FAILURE );
// TODO
return;
}
// Get file length
fseek ( file, 0, SEEK_END );
file_length = ftell ( file );
fseek ( file, 0, SEEK_SET );
file_contents = (char *) malloc ( file_length + 1 );
if ( !file_contents )
{
fprintf ( stderr, "Memory error!\\n" );
fclose ( file );
exit ( EXIT_FAILURE );
// TODO
return;
}
// Read file contents
fread ( file_contents, file_length, 1, file ) ;
fclose ( file );
read_head = file_contents;
int64_t number_of_read_bytes = 0;
int64_t pool_id = 0;
// We need to store some information for the types that have already been read.
// This maps type-name -> read_information and will contain entries for all seen types during the deserialization.
GHashTable *seen_types = g_hash_table_new ( g_str_hash, g_str_equal );
${prefix}string_access strings = ${prefix}string_access_new ();
while ( number_of_read_bytes < file_length ) {
char *previous_position = read_head;
read_string_block ( strings, &read_head );
read_type_block ( state, strings, &read_head, &pool_id, seen_types );
number_of_read_bytes += read_head - previous_position;
}
g_hash_table_destroy ( seen_types );
state->filename = filename;
state->strings = strings;
free ( file_contents );
}
""")
out.close()
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/c/io/ReaderSourceMaker.scala | Scala | bsd-3-clause | 32,944 |
package smarthouse.restapi.utils
import com.typesafe.config.ConfigFactory
trait Config {
private val config = ConfigFactory.load()
private val httpConfig = config.getConfig("http")
private val databaseConfig = config.getConfig("database")
val httpHost = httpConfig.getString("interface")
val httpPort = httpConfig.getInt("port")
val jdbcUrl = databaseConfig.getString("url")
val dbUser = databaseConfig.getString("user")
val dbPassword = databaseConfig.getString("password")
}
| andrewobukhov/smart-house | src/main/scala/smarthouse/restapi/utils/Config.scala | Scala | mit | 497 |
/* FakeSQSAsync.scala
*
* Copyright (c) 2013 bizo.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package squishy.fake
import com.amazonaws.AmazonWebServiceRequest
import com.amazonaws.handlers.AsyncHandler
import com.amazonaws.services.sqs.AmazonSQSAsync
import com.amazonaws.services.sqs.model._
import java.util.concurrent.{
Callable,
Future,
Executors,
TimeUnit
}
/**
* A simple implementation of the Amazon SQS asynchronous interface for testing purposes.
*/
class FakeSQSAsync extends FakeSQS with AmazonSQSAsync {
/** The executor used to schedule requests. */
val executor = Executors.newCachedThreadPool()
override def createQueueAsync(request: CreateQueueRequest) =
submit(createQueue, request)
override def createQueueAsync(
request: CreateQueueRequest, handler: AsyncHandler[CreateQueueRequest, CreateQueueResult]) =
submit(createQueue, request, handler)
override def listQueuesAsync(request: ListQueuesRequest) =
submit(listQueues, request)
override def listQueuesAsync(request: ListQueuesRequest, handler: AsyncHandler[ListQueuesRequest, ListQueuesResult]) =
submit(listQueues, request, handler)
override def deleteQueueAsync(request: DeleteQueueRequest) =
submitVoid(deleteQueue, request)
override def deleteQueueAsync(request: DeleteQueueRequest, handler: AsyncHandler[DeleteQueueRequest, Void]) =
submitVoid(deleteQueue, request, handler)
override def getQueueUrlAsync(request: GetQueueUrlRequest) =
submit(getQueueUrl, request)
override def getQueueUrlAsync(
request: GetQueueUrlRequest, handler: AsyncHandler[GetQueueUrlRequest, GetQueueUrlResult]) =
submit(getQueueUrl, request, handler)
override def getQueueAttributesAsync(request: GetQueueAttributesRequest) =
submit(getQueueAttributes, request)
override def getQueueAttributesAsync(
request: GetQueueAttributesRequest, handler: AsyncHandler[GetQueueAttributesRequest, GetQueueAttributesResult]) =
submit(getQueueAttributes, request, handler)
override def setQueueAttributesAsync(request: SetQueueAttributesRequest) =
submitVoid(setQueueAttributes, request)
override def setQueueAttributesAsync(
request: SetQueueAttributesRequest, handler: AsyncHandler[SetQueueAttributesRequest, Void]) =
submitVoid(setQueueAttributes, request, handler)
override def sendMessageAsync(request: SendMessageRequest) =
submit(sendMessage, request)
override def sendMessageAsync(
request: SendMessageRequest, handler: AsyncHandler[SendMessageRequest, SendMessageResult]) =
submit(sendMessage, request, handler)
override def sendMessageBatchAsync(request: SendMessageBatchRequest) =
submit(sendMessageBatch, request)
override def sendMessageBatchAsync(
request: SendMessageBatchRequest, handler: AsyncHandler[SendMessageBatchRequest, SendMessageBatchResult]) =
submit(sendMessageBatch, request, handler)
override def receiveMessageAsync(request: ReceiveMessageRequest) =
submit(receiveMessage, request)
override def receiveMessageAsync(
request: ReceiveMessageRequest, handler: AsyncHandler[ReceiveMessageRequest, ReceiveMessageResult]) =
submit(receiveMessage, request, handler)
override def changeMessageVisibilityAsync(request: ChangeMessageVisibilityRequest) =
submitVoid(changeMessageVisibility, request)
override def changeMessageVisibilityAsync(
request: ChangeMessageVisibilityRequest, handler: AsyncHandler[ChangeMessageVisibilityRequest, Void]) =
submitVoid(changeMessageVisibility, request, handler)
override def changeMessageVisibilityBatchAsync(request: ChangeMessageVisibilityBatchRequest) =
submit(changeMessageVisibilityBatch, request)
override def changeMessageVisibilityBatchAsync(
request: ChangeMessageVisibilityBatchRequest,
handler: AsyncHandler[ChangeMessageVisibilityBatchRequest, ChangeMessageVisibilityBatchResult]) =
submit(changeMessageVisibilityBatch, request, handler)
override def deleteMessageAsync(request: DeleteMessageRequest) =
submitVoid(deleteMessage, request)
override def deleteMessageAsync(request: DeleteMessageRequest, handler: AsyncHandler[DeleteMessageRequest, Void]) =
submitVoid(deleteMessage, request, handler)
override def deleteMessageBatchAsync(request: DeleteMessageBatchRequest) =
submit(deleteMessageBatch, request)
override def deleteMessageBatchAsync(
request: DeleteMessageBatchRequest, handler: AsyncHandler[DeleteMessageBatchRequest, DeleteMessageBatchResult]) =
submit(deleteMessageBatch, request, handler)
override def addPermissionAsync(request: AddPermissionRequest) =
submit(addPermission, request)
override def addPermissionAsync(request: AddPermissionRequest, handler: AsyncHandler[AddPermissionRequest, Void]) =
submit(addPermission, request, handler)
override def removePermissionAsync(request: RemovePermissionRequest) =
submit(removePermission, request)
override def removePermissionAsync(
request: RemovePermissionRequest, handler: AsyncHandler[RemovePermissionRequest, Void]) =
submit(removePermission, request, handler)
override protected def dispose() {
super.dispose()
executor.shutdown()
try
executor.awaitTermination(1L, TimeUnit.SECONDS)
catch {
case e: InterruptedException =>
}
if (!executor.isTerminated)
executor.shutdownNow()
}
/** Submits a task to be executed in the future. */
private def submit[I <: AmazonWebServiceRequest, O](f: I => O, i: I): Future[O] =
executor.submit(new Callable[O] {
override def call() = f(i)
})
/** Submits a task to be executed in the future. */
private def submitVoid[I <: AmazonWebServiceRequest](f: I => Unit, i: I): Future[Void] =
submit[I, Void](f andThen (_ => null), i)
/** Submits a task to be executed in the future. */
private def submit[I <: AmazonWebServiceRequest, O](f: I => O, i: I, h: AsyncHandler[I, O]): Future[O] =
executor.submit(new Callable[O] {
override def call() = {
try {
val o = f(i)
h.onSuccess(i, o)
o
} catch {
case e: Exception =>
h.onError(e)
throw e
}
}
})
/** Submits a task to be executed in the future. */
private def submitVoid[I <: AmazonWebServiceRequest](f: I => Unit, i: I, h: AsyncHandler[I, Void]): Future[Void] =
submit[I, Void](f andThen (_ => null), i, h)
} | lpryor/squishy | src/main/scala/squishy/fake/FakeSQSAsync.scala | Scala | apache-2.0 | 6,965 |
/*
Copyright 2014 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.macros.impl
import scala.reflect.macros.Context
import scala.util.{ Failure, Success }
import com.twitter.scalding._
import com.twitter.bijection.macros.{ IsCaseClass, MacroGenerated }
import com.twitter.bijection.macros.impl.IsCaseClassImpl
/**
* Helper class for generating setters from case class to
* other types. E.g. cascading Tuple, jdbc PreparedStatement
*/
object CaseClassBasedSetterImpl {
def apply[T](c: Context)(container: c.TermName, allowUnknownTypes: Boolean,
fsetter: CaseClassFieldSetter)(implicit T: c.WeakTypeTag[T]): (Int, c.Tree) = {
import c.universe._
sealed trait SetterBuilder {
def columns: Int
/**
* This Tree assumes that "val $value = ..." has been set
*/
def setTree(value: Tree, offset: Int): Tree
}
case class PrimitiveSetter(tpe: Type) extends SetterBuilder {
def columns = 1
def setTree(value: Tree, offset: Int) = fsetter.from(c)(tpe, offset, container, value) match {
case Success(tree) => tree
case Failure(e) => c.abort(c.enclosingPosition,
s"Case class ${T} is supported. Error on $tpe, ${e.getMessage}")
}
}
case object DefaultSetter extends SetterBuilder {
def columns = 1
def setTree(value: Tree, offset: Int) = fsetter.default(c)(offset, container, value)
}
case class OptionSetter(inner: SetterBuilder) extends SetterBuilder {
def columns = inner.columns
def setTree(value: Tree, offset: Int) = {
val someVal = newTermName(c.fresh(s"someVal"))
val someValTree = q"$someVal"
q"""if($value.isDefined) {
val $someVal = $value.get
${inner.setTree(someValTree, offset)}
} else {
${fsetter.absent(c)(offset, container)}
}"""
}
}
case class CaseClassSetter(members: Vector[(Tree => Tree, SetterBuilder)]) extends SetterBuilder {
val columns = members.map(_._2.columns).sum
def setTree(value: Tree, offset: Int) = {
val setters = members.scanLeft((offset, Option.empty[Tree])) {
case ((off, _), (access, sb)) =>
val cca = newTermName(c.fresh(s"access"))
val ccaT = q"$cca"
(off + sb.columns, Some(q"val $cca = ${access(value)}; ${sb.setTree(ccaT, off)}"))
}
.collect { case (_, Some(tree)) => tree }
q"""..$setters"""
}
}
@annotation.tailrec
def normalized(tpe: Type): Type = {
val norm = tpe.normalize
if (!(norm =:= tpe))
normalized(norm)
else
tpe
}
def matchField(outerType: Type): SetterBuilder = {
// we do this just to see if the setter matches.
val dummyIdx = 0
val dummyTree = q"t"
outerType match {
case tpe if fsetter.from(c)(tpe, dummyIdx, container, dummyTree).isSuccess =>
PrimitiveSetter(tpe)
case tpe if tpe.erasure =:= typeOf[Option[Any]] =>
val innerType = tpe.asInstanceOf[TypeRefApi].args.head
OptionSetter(matchField(innerType))
case tpe if (tpe.typeSymbol.isClass && tpe.typeSymbol.asClass.isCaseClass) =>
CaseClassSetter(expandMethod(normalized(tpe)).map {
case (fn, tpe) =>
(fn, matchField(tpe))
})
case tpe if allowUnknownTypes =>
DefaultSetter
case _ =>
c.abort(c.enclosingPosition,
s"Case class ${T.tpe} is not supported at type: $outerType")
}
}
def expandMethod(outerTpe: Type): Vector[(Tree => Tree, Type)] =
outerTpe
.declarations
.collect { case m: MethodSymbol if m.isCaseAccessor => m }
.map { accessorMethod =>
val fieldType = normalized(accessorMethod.returnType.asSeenFrom(outerTpe, outerTpe.typeSymbol.asClass))
({ pTree: Tree => q"""$pTree.$accessorMethod""" }, fieldType)
}
.toVector
// in TupleSetterImpl, the outer-most input val is called t, so we pass that in here:
val sb = matchField(normalized(T.tpe))
if (sb.columns == 0) c.abort(c.enclosingPosition, "Didn't consume any elements in the tuple, possibly empty case class?")
(sb.columns, sb.setTree(q"t", 0))
}
}
| sriramkrishnan/scalding | scalding-core/src/main/scala/com/twitter/scalding/macros/impl/CaseClassBasedSetterImpl.scala | Scala | apache-2.0 | 4,797 |
/*
* Artificial Intelligence for Humans
* Volume 2: Nature Inspired Algorithms
* Java Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
*
* Copyright 2014 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.examples.capstone.model.milestone3
import au.com.bytecode.opencsv.CSVWriter
import com.heatonresearch.aifh.examples.capstone.model.TitanicConfig
import com.heatonresearch.aifh.examples.capstone.model.milestone1.NormalizeTitanic
import com.heatonresearch.aifh.examples.capstone.model.milestone1.TitanicStats
import com.heatonresearch.aifh.examples.capstone.model.milestone2.CrossValidate
import com.heatonresearch.aifh.examples.capstone.model.milestone2.CrossValidateFold
import com.heatonresearch.aifh.examples.capstone.model.milestone2.FitTitanic
import com.heatonresearch.aifh.learning.RBFNetwork
import java.io._
import java.text.SimpleDateFormat
import java.util.Date
/**
* The final titanic milestone. We use the test data from Kaggle and prepare a submission.
*/
object SubmitTitanic {
/**
* The main method.
*
* @param args The path to the data file.
*/
def main(args: Array[String]) {
var filename: String = null
if (args.length != 1) {
filename = System.getProperty("FILENAME")
if (filename == null) {
println("Please call this program with a single parameter that specifies your data directory.\\n" + "If you are calling with gradle, consider:\\n" + "gradle runCapstoneTitanic3 -Pdata_path=[path to your data directory]\\n")
System.exit(0)
}
} else {
filename = args(0)
}
val dataPath = new File(filename)
val fit = new FitTitanic
fit.process(dataPath)
val bestNetwork = fit.getBestNetwork
val submit = new SubmitTitanic
submit.submit(dataPath, bestNetwork, fit.getCrossvalidation)
}
}
class SubmitTitanic {
/**
* Prepare a Kaggle submission for Titanic.
*
* @param dataPath The data path.
* @param bestNetwork The best network.
* @param cross The cross validated data.
*/
def submit(dataPath: File, bestNetwork: RBFNetwork, cross: CrossValidate) {
try {
val now: String = new SimpleDateFormat("yyyyMMddhhmm").format(new Date)
val trainingPath = new File(dataPath, TitanicConfig.TrainingFilename)
val testPath = new File(dataPath, TitanicConfig.TestFilename)
val score: Int = (cross.getScore * 10000).asInstanceOf[Int]
val submitPath = new File(dataPath, "submit-" + now + "_" + score + ".csv")
val submitInfoPath = new File(dataPath, "submit-" + now + ".txt")
val pw: PrintWriter = new PrintWriter(new FileWriter(submitInfoPath))
pw.println("Crossvalidation stats:")
for(i <- 0 until cross.size) {
val fold: CrossValidateFold = cross.folds.get(i)
pw.println("Fold #" + (i + 1) + " : Score: " + fold.score)
}
pw.println("Average Score: " + cross.getScore)
pw.println()
pw.println(java.util.Arrays.toString(bestNetwork.getLongTermMemory))
pw.close()
val fos = new FileOutputStream(submitPath)
val csv = new CSVWriter(new OutputStreamWriter(fos))
csv.writeNext(Array[String]("PassengerId", "Survived"))
val stats = new TitanicStats
NormalizeTitanic.analyze(stats, trainingPath)
NormalizeTitanic.analyze(stats, testPath)
val ids: java.util.List[String] = new java.util.ArrayList[String]
val training = NormalizeTitanic.normalize(stats, testPath, ids, TitanicConfig.InputNormalizeLow, TitanicConfig.InputNormalizeHigh, TitanicConfig.PredictSurvive, TitanicConfig.PredictPerish)
var idx: Int = 0
for (data <- training) {
val output = bestNetwork.computeRegression(data.input)
val survived: Int = if (output(0) > 0.5) 1 else 0
val line: Array[String] = Array(ids.get(idx), "" + survived)
csv.writeNext(line)
idx += 1
}
csv.close()
fos.close()
}
catch {
case ex: IOException =>
ex.printStackTrace()
}
}
} | PeterLauris/aifh | vol2/vol2-scala-examples/src/main/scala/com/heatonresearch/aifh/examples/capstone/model/milestone3/SubmitTitanic.scala | Scala | apache-2.0 | 4,764 |
package advanced
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import scala.concurrent.duration._
import Headers._
object SomeOtherScenario {
val otherScn = scenario("Other Scenario Name")
.exec(
http("other_request_1")
.get("/")
.check(status.is(302)))
.pause(0 milliseconds, 100 milliseconds)
.exec(
http("other_request_2")
.get("/public/login.html"))
.pause(12, 13)
.feed(csv("user_credentials.csv"))
.exec(
http("other_request_3")
.post("/login")
.param("username", "${username}")
.param("password", "${password}")
.check(status.is(302)))
.pause(0 milliseconds, 100 milliseconds)
.exec(
http("other_request_9")
.get("/logout")
.headers(headers_1)
.check(status.is(302)))
.pause(0 milliseconds, 100 milliseconds)
.exec(
http("other_request_10")
.get("/public/login.html"))
}
| honghaier2020/chess-https | srv/gatling2.0.0/user-files/simulations/advanced/SomeOtherScenario.scala | Scala | mit | 878 |
object StaticMethodScala {
val x = StaticMethod.bar(1, true)
val f = (i: Int) => StaticMethod.foo(i, true)
} | LPTK/intellij-scala | testdata/changeSignature/fromJava/StaticMethod_after.scala | Scala | apache-2.0 | 113 |
/*
* Copyright (c) 2012 Dame Ningen.
* All rights reserved.
*
* This file is part of Gausel.
*
* Gausel is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Gausel is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Gausel. If not, see <http://www.gnu.org/licenses/>.
*/
package gausel.test
import gausel.data._
/** Tests basic System features.
*
* @author dameNingen <dame.ningen@mail.com>
* @version $Revision$
* $Id$
*/
object TestSystem extends App with gausel.lib.Verb {
// Verbose related stuff.
val name = "TestSystem"
val verbLevel = 1
val color = Colors.cyan
// Creating a matrix as a List[List[Option[String]]].
// Easily done by a parser reading a file.
// Note that the matrix is filled, zeros are None-s but
// they are present.
val (a13,a21,a22,a32,a33) =
(Ident("a13"),Ident("a21"),Ident("a22"),Ident("a32"),Ident("a33"))
val (b1,b2,b3) =
(Ident("b1"),Ident("b2"),Ident("b3"))
val matrix: List[List[Option[Arith]]] =
(None :: None :: Some(a13) :: Nil) ::
(Some(a21) :: Some(a22) :: None :: Nil) ::
(None :: Some(a32) :: Some(a33) :: Nil) :: Nil
// Creating the vector as a List[String].
val vector : List[Option[Arith]] = Some(b1) :: Some(b2) :: Some(b3) :: Nil
// Creating the actual system class.
val system = new System(matrix,vector)
/** Prints the system with each line prefixed by two spaces. */
def verbSystem() = {
verbln("System:")
// verbln(" " + system.toStringList)
verbList(system.toStringList.map(" " +_),1)
verbln(1)
}
verbln(1)
verbSystem()
verbln("Testing cardFirstZeros.")
verbln(" " + system.cardFirstZeros())
verbln(1)
val newOrder = 2::3::1::Nil
verbln("Testing line reorganization: " + newOrder + ".")
system reorganizeLines newOrder
verbSystem()
verbln("Retesting cardFirstZeros.")
verbln(" " + system.cardFirstZeros())
verbln(1)
val (line1,line2) = (system.getLine(1),system.getLine(2))
val factor = Ident("[factor]")
verbln("Testing line multiplication:")
verbln(" " + line1)
verbln(" times " + factor)
val factorLine1 = line1 mult factor
verbln(" result: " + factorLine1)
verbln(1)
verbln("Testing line substraction:")
verbln(" " + line2)
verbln(" minus " + factorLine1)
val subLine = line2 minus factorLine1
verbln(" result: " + subLine)
verbln(1)
verbln("Now updating line 2 of the system with the result.")
system.updateLine(2,subLine)
verbSystem
verbln("That was pretty nice, I'm done.")
verbln("See ya.")
verbln(1)
}
| Uchuu/Gausel | trunk/src/test/scala/TestSystem.scala | Scala | gpl-3.0 | 3,031 |
package scoverage
import scoverage.Platform.File
import org.scalatest.{BeforeAndAfter, FunSuite}
/**
* Verify that [[Invoker.invoked()]] can handle a multi-module project
*/
class InvokerMultiModuleTest extends FunSuite with BeforeAndAfter {
val measurementDir = Array(
new File("target/invoker-test.measurement0"),
new File("target/invoker-test.measurement1")
)
before {
deleteMeasurementFiles()
measurementDir.foreach(_.mkdirs())
}
test("calling Invoker.invoked on with different directories puts measurements in different directories") {
val testIds: Set[Int] = (1 to 10).toSet
testIds.map { i: Int => Invoker.invoked(i, measurementDir(i % 2).toString) }
// Verify measurements went to correct directory
val measurementFiles0 = Invoker.findMeasurementFiles(measurementDir(0))
val idsFromFile0 = Invoker.invoked(measurementFiles0.toIndexedSeq)
idsFromFile0 === testIds.filter { i: Int => i % 2 == 0 }
val measurementFiles1 = Invoker.findMeasurementFiles(measurementDir(0))
val idsFromFile1 = Invoker.invoked(measurementFiles1.toIndexedSeq)
idsFromFile1 === testIds.filter { i: Int => i % 2 == 1 }
}
after {
deleteMeasurementFiles()
measurementDir.foreach(_.delete)
}
private def deleteMeasurementFiles(): Unit = {
measurementDir.foreach((md) => {
if (md.isDirectory)
md.listFiles().foreach(_.delete())
})
}
}
| gslowikowski/scalac-scoverage-plugin | scalac-scoverage-runtime/shared/src/test/scala/scoverage/InvokerMultiModuleTest.scala | Scala | apache-2.0 | 1,428 |
package com.twitter.finagle.ssl
/**
* SslConfigurationException is an exception which is thrown by
* a particular engine factory when the engine factory does not
* support the collection of parameters specified by the
* [[SslClientConfiguration]] or the [[SslServerConfiguration]].
*/
case class SslConfigurationException(
msg: String,
cause: Throwable)
extends Exception(msg, cause)
private[finagle] object SslConfigurationException {
def notSupported(featureName: String, engineFactoryName: String): SslConfigurationException = {
SslConfigurationException(s"$featureName is not supported at this time for $engineFactoryName", null)
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/ssl/SslConfigurationException.scala | Scala | apache-2.0 | 665 |
package cilib
import scalaz._
import Scalaz._
object Hypothesis {
def chiSquared(bins: Int, critical: Double, sample: Vector[Double]) = {
val n = sample.size
val b = 10
// The expected bins for the uniform distribution imply that the probability for each number is 1/n
val expected = Range.inclusive(1, b).map(_ => n/b).toList
val observed = sample.groupBy(x => (x * b).toInt).toList.map(x => x._2.length)
def calc(o: Int, e: Int): Double = {
val dev = o - e
(dev * dev) / e.toDouble
}
val sum = Align[List].pad(observed, expected).foldLeft(0.0)((a, c) => a + (c match {
case (Some(o), Some(e)) => calc(o, e)
case (None, Some(e)) => calc(0, e)
case _ => sys.error("impossible")
}))
sum < 27.83 && sample.forall(x => x >= 0.0 && x < 1.0)
}
}
| robgarden/cilib | tests/src/test/scala/cilib/Hypothesis.scala | Scala | gpl-3.0 | 823 |
package com.grandata.commons.files
import java.nio.file.{Files}
import java.nio.file.FileSystem
import scala.util.Try
import java.nio.file.Path
/**
* Created by gustavo on 26/03/15.
*/
trait FileSystemComponent {
def fileSystem: FileSystem
}
trait GlobImpl {
this: FileSystemComponent =>
import collection.JavaConversions._
private lazy val magicRegex = """[*?\\[\\{]""".r
private def withoutMagic(pattern: String): Boolean = magicRegex.findFirstIn(pattern).isEmpty
private def processPath(magic: Array[String], foundPath: Path): Iterator[String] = {
if (magic.size <= 1)
Iterator(foundPath.toString)
else if (Files.isDirectory(foundPath))
iglob(foundPath.toString, magic.tail)
else
Iterator.empty
}
private def iglob(basedir: String, patterns: Array[String]): Iterator[String] = {
if (patterns.isEmpty) Iterator.empty
else {
val (noMagic, magic) = patterns.span(withoutMagic)
val newBase = fileSystem.getPath(basedir, noMagic.mkString("/"))
magic.headOption match {
case Some(magicHead) =>
Try(Files.newDirectoryStream(newBase, magicHead))
.map(stream =>
try {
stream.map { foundPath =>
processPath(magic, foundPath)
}.fold(Iterator.empty)(_ ++ _)
} finally {
stream.close()
}
).getOrElse(Iterator.empty)
case None =>
if (Files.exists(newBase)) {
Iterator(newBase.toString)
} else {
Iterator.empty
}
}
}
}
def glob(pattern: String): Iterator[String] = {
if (withoutMagic(pattern)) {
if (Files.exists(fileSystem.getPath(pattern)))
Iterator(pattern)
else
Iterator.empty
} else {
iglob(if (pattern.startsWith("/")) "/" else "", pattern.split("/"))
}
}
}
| GranData/grandata-commons | src/main/scala/com/grandata/commons/files/GlobImpl.scala | Scala | mit | 2,006 |
package rpgboss.player
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.graphics.Texture
import rpgboss.lib._
import rpgboss.model._
import rpgboss.model.Constants._
import rpgboss.model.battle._
import rpgboss.model.resource._
import rpgboss.player.entity._
import Predef._
import com.typesafe.scalalogging.slf4j.LazyLogging
import org.mozilla.javascript.NativeObject
import rpgboss.save.SaveFile
import rpgboss.save.SaveInfo
import rpgboss.model.event.EventJavascript
import com.badlogic.gdx.graphics.Color
import com.badlogic.gdx.graphics.g2d.SpriteBatch
import com.badlogic.gdx.graphics.Color
import org.mozilla.javascript.Context
import org.mozilla.javascript.ScriptableObject
import scalaj.http.Http
import com.badlogic.gdx.graphics.glutils.ShapeRenderer.ShapeType
case class EntityInfo(x: Float = 0, y: Float = 0, dir: Int = 0,
screenX: Float = 0, screenY: Float = 0, screenTopLeftX: Float = 0, screenTopLeftY: Float = 0, width: Float = 0, height: Float = 0)
object EntityInfo {
def apply(e: Entity, mapScreen: MapScreen): EntityInfo = {
val pxPerTileX = mapScreen.screenW / mapScreen.screenWTiles
val pxPerTileY = mapScreen.screenH / mapScreen.screenHTiles
val screenX =
(e.x - mapScreen.camera.x) * pxPerTileX + (mapScreen.screenW / 2)
val screenY =
(e.y - mapScreen.camera.y) * pxPerTileY + (mapScreen.screenH / 2)
val width = pxPerTileX * e.graphicW
val height = pxPerTileY * e.graphicH
val screenTopLeftX = screenX - width / 2
val screenTopLeftY = screenY - height / 2
apply(e.x, e.y, e.dir, screenX, screenY, screenTopLeftX, screenTopLeftY, width, height)
}
}
case class CurrentAndProposedStats(
current: BattleStats, proposed: BattleStats)
trait HasScriptConstants {
val LEFT = Window.Left
val CENTER = Window.Center
val RIGHT = Window.Right
val PLAYER_LOC = "playerLoc"
def VEHICLE_LOC(vehicleId: Int) = "vehicleLoc-%d".format(vehicleId)
val GOLD = "gold"
val PLAYER_MOVEMENT_LOCKS = "playerMovementLocks"
val EVENTS_ENABLED = "eventsEnabled"
val MENU_ENABLED = "menuEnabled"
val PARTY = "party"
val INVENTORY_ITEM_IDS = "inventoryIdxs"
val INVENTORY_QTYS = "inventoryQtys"
val CHARACTER_NAMES = "characterNames"
val CHARACTER_LEVELS = "characterLevels"
val CHARACTER_HPS = "characterHps"
val CHARACTER_MPS = "characterMps"
val CHARACTER_EXPS = "characterExps"
val CHARACTER_ROWS = "characterRow"
def CHARACTER_EQUIP(characterId: Int) =
"characterEquip-%d".format(characterId)
def CHARACTER_STATUS_EFFECTS(characterId: Int) =
"characterStatusEffects-%d".format(characterId)
def CHARACTER_LEARNED_SKILLS(characterId: Int) =
"characterLearnedSkills-%d".format(characterId)
// Synchronized with LayoutType RpgEnum.
val CENTERED = 0
val NORTH = 1
val EAST = 2
val SOUTH = 3
val WEST = 4
val NORTHEAST = 5
val SOUTHEAST = 6
val SOUTHWEST = 7
val NORTHWEST = 8
// Synchronized with SizeType RpgEnum.
val FIXED = 0
val SCALE_SOURCE = 1
val SCREEN = 2
val COVER = 3
val CONTAIN = 4
}
/**
* ScriptInterface is bound to a particular screen.
*
* These methods should be called only from scripting threads. Calling these
* methods on the Gdx threads will likely cause deadlocks.
*
* @param game Allowed to be null so this may be used when there is only an
* activeScreen.
*
* TODO: Eliminate the mapScreen argument. Map-related scripting commands should
* probably not even be defined when there is no map.
*/
class ScriptInterface(
game: RpgGame,
activeScreen: RpgScreen)
extends HasScriptConstants
with ThreadChecked
with LazyLogging {
assume(activeScreen != null)
private def mapScreen = game.mapScreen
private def persistent = game.persistent
def project = game.project
def syncRun[T](op: => T): T = {
def f() = {
try {
op
} catch {
case e: scala.runtime.NonLocalReturnControl[T] => e.value
}
}
if (onBoundThread())
f()
else
GdxUtils.syncRun(f)
}
/*
* The below functions are all called from the script threads only.
*/
/*
* Accessors to various game data
*/
def getScreenH() = project.data.startup.screenH
def getScreenW() = project.data.startup.screenW
def getMap(loc: MapLoc) =
RpgMap.readFromDisk(project, loc.map)
def layout(layoutTypeId: Int, sizeTypeId: Int, wArg: Float, hArg: Float) =
Layout(layoutTypeId, sizeTypeId, wArg, hArg)
def layoutWithOffset(layoutTypeId: Int, sizeTypeId: Int,
wArg: Float, hArg: Float,
xOffset: Float, yOffset: Float) =
Layout(layoutTypeId, sizeTypeId, wArg, hArg, xOffset, yOffset)
/*
* Things to do with the player's location and camera
*/
def setPlayerLoc(mapName: String, x: Float, y: Float) = syncRun {
game.setPlayerLoc(MapLoc(mapName, x, y))
}
/**
* FIXME: This method has a lot of problems.
*
* 1. This method cannot be posted to the main Gdx thread, as it has some
* game.sleep(...) calls, which would hang the whole game.
*
* 2. Unfortunately, game.setPlayerLoc will kill any event script threads,
* which is generally the thread that this is running on.
*
* Thus the only way to use this safely is to call the setPlayerLoc method
* LAST in this whole function. That means that in transitions.js,
* setPlayerLoc must be the last call.
*
* Unfortunately this still causes a whole barf of exceptions to appear every
* time the player switches maps.
*
* This is very fragile and should be simplified / removed. Probably by
* removing the custom transitions scripts.
*/
def teleport(mapName: String, x: Float, y: Float,
transitionId: Int = Transitions.FADE.id) = {
val loc = MapLoc(mapName, x, y)
val settedTransition = getInt("useTransition")
var transition = Transitions.get(transitionId)
val fadeDuration = Transitions.fadeLength
if (settedTransition != -1) transition = Transitions.get(settedTransition)
stopSound()
// This must be last, as the actual setPlayerLoc method is called in these
// methods, and the thread is killed once that method is called.
game.mapScreen.scriptFactory.runFromFile(
ResourceConstants.transitionsScript,
"transition" + transition + "('" + mapName + "'," + x.toString() + "," + y.toString() + "," + fadeDuration.toString() + ")",
runOnNewThread = false)
}
def placeVehicle(vehicleId: Int, mapName: String, x: Float,
y: Float) = syncRun {
val loc = MapLoc(mapName, x, y)
if (!loc.isEmpty) {
mapScreen.mapAndAssetsOption.map { mapAndAssets =>
if (mapAndAssets.mapName == mapName) {
mapScreen.insertVehicleEntity(vehicleId, loc)
}
}
}
persistent.setLoc(VEHICLE_LOC(vehicleId), loc)
}
def setPlayerInVehicle(inVehicle: Boolean, vehicleId: Int) = syncRun {
mapScreen.playerEntity.setInVehicle(inVehicle, vehicleId)
}
/**
* Moves the map camera.
*/
def moveCamera(dx: Float, dy: Float, async: Boolean, duration: Float) = {
val move = syncRun { mapScreen.camera.enqueueMove(dx, dy, duration) }
if (!async)
move.awaitFinish()
}
/**
* Gets the position of the map camera.
*/
def getCameraPos() = syncRun {
mapScreen.camera.info
}
def setCameraFollowEvent(eventId: Int) = syncRun {
mapScreen.setCameraFollow(Some(eventId))
}
def setCameraFollowPlayer() = syncRun {
mapScreen.setCameraFollow(Some(EntitySpec.playerEntityId))
}
def setCameraFollowNone() = syncRun {
mapScreen.setCameraFollow(None)
}
/*
* Things to do with the screen
*/
def setTransition(
endAlpha: Float,
duration: Float) = syncRun {
activeScreen.windowManager.setTransition(endAlpha, duration)
}
def shakeScreen(xAmplitude: Float, yAmplitude: Float, frequency: Float,
duration: Float) = syncRun {
activeScreen.shakeManager.startShake(
xAmplitude, yAmplitude, frequency, duration)
}
/**
* @param r Between 0.0f and 1.0f.
* @param g Between 0.0f and 1.0f.
* @param b Between 0.0f and 1.0f.
* @param a Between 0.0f and 1.0f.
* @param fadeDuration In seconds. 0f means instantaneous
*/
def tintScreen(r: Float, g: Float, b: Float, a: Float,
fadeDuration: Float) = syncRun {
def activeScreenTint = activeScreen.windowManager.tintColor
// If no existing tint, set color immediately and tween alpha only.
if (activeScreenTint.a == 0) {
activeScreenTint.set(r, g, b, 0f)
}
activeScreen.windowManager.tintTweener.tweenTo(new Color(r, g, b, a),
fadeDuration)
}
def overrideMapBattleSettings(battleBackground: String,
battleMusic: String,
battleMusicVolume: Float,
randomEncountersOn: Boolean) = syncRun {
mapScreen.mapAndAssetsOption map {
_.setOverrideBattleSettings(battleBackground, battleMusic,
battleMusicVolume, randomEncountersOn)
}
}
def startBattle(encounterId: Int) = {
syncRun {
game.startBattle(encounterId)
}
// Blocks until the battle screen finishes on way or the other
game.battleScreen.finishChannel.read
}
def setTimer(time: Int) = {
setInt("timer", time)
}
def clearTimer() = {
// set it way below 0 to does not make problems with conditions
setInt("timer", -5000)
}
def endBattleBackToMap() = {
setTransition(1, 0.5f)
sleep(0.5f)
syncRun {
game.setScreen(game.mapScreen)
// TODO fix hack of manipulating mapScreen directly
game.mapScreen.windowManager.setTransition(0, 0.5f)
game.battleScreen.endBattle()
}
}
def showPicture(slot: Int, name: String, layout: Layout) = syncRun {
activeScreen.windowManager.showPictureByName(slot, name, layout, 1.0f)
}
def showPicture(slot: Int, name: String, layout: Layout, alpha: Float) = syncRun {
activeScreen.windowManager.showPictureByName(slot, name, layout, alpha)
}
def showPictureLoop(slot: Int, folderPath: String, layout: Layout,
alpha: Float, fps: Int) = syncRun {
activeScreen.windowManager.showPictureLoop(slot, folderPath, layout, alpha, fps)
}
def hidePicture(slot: Int) = syncRun {
activeScreen.windowManager.hidePicture(slot)
}
def playMusic(slot: Int, specOpt: Option[SoundSpec],
loop: Boolean, fadeDuration: Float) = syncRun {
activeScreen.playMusic(slot, specOpt, loop, fadeDuration)
}
def playMusic(slot: Int, music: String, volume: Float, loop: Boolean,
fadeDuration: Float) = syncRun {
activeScreen.playMusic(
slot, Some(SoundSpec(music, volume)), loop, fadeDuration)
}
def stopMusic(slot: Int, fadeDuration: Float) = syncRun {
activeScreen.playMusic(
slot, None, false, fadeDuration)
}
def playAnimation(animationId: Int, screenX: Float, screenY: Float,
speedScale: Float, sizeScale: Float) = syncRun {
activeScreen.playAnimation(animationId,
new FixedAnimationTarget(screenX, screenY), speedScale, sizeScale)
}
def playAnimationOnEvent(animationId: Int, eventId: Int, speedScale: Float,
sizeScale: Float) = {
mapScreen.allEntities.get(eventId) map { entity =>
activeScreen.playAnimation(animationId,
new MapEntityAnimationTarget(mapScreen, entity),
speedScale, sizeScale)
}
}
def playAnimationOnPlayer(animationId: Int, speedScale: Float,
sizeScale: Float) = {
activeScreen.playAnimation(animationId,
new MapEntityAnimationTarget(mapScreen, mapScreen.playerEntity),
speedScale, sizeScale)
}
def playSound(sound: String) = syncRun {
activeScreen.playSound(SoundSpec(sound))
}
def playSound(sound: String, volume: Float, pitch: Float) = syncRun {
activeScreen.playSound(SoundSpec(sound, volume, pitch))
}
def stopSound() = syncRun {
activeScreen.stopSound()
}
def httpRequest(url: String): String = {
val result = Http(url).asString
return result.toString()
}
/*
* Things to do with user interaction
*/
def sleep(duration: Float) = {
assert(!onBoundThread(),
"Do not use game.sleep on the main GDX thread. That hangs the game.")
Thread.sleep((duration * 1000).toInt)
}
/**
* TODO: This is named different currently to allow newChoiceWindow to call
* into this and use its default arguments. This should be renamed.
*/
def newChoiceWindow(
lines: Array[String],
layout: Layout,
options: NativeObject): ChoiceWindow#ChoiceWindowScriptInterface = {
newChoiceWindow(
lines, layout,
JsonUtils.nativeObjectToCaseClass[TextChoiceWindowOptions](options))
}
def newChoiceWindow(
lines: Array[String],
layout: Layout,
options: TextChoiceWindowOptions): ChoiceWindow#ChoiceWindowScriptInterface = {
val window = syncRun {
new TextChoiceWindow(
game.persistent,
activeScreen.windowManager,
activeScreen.inputs,
lines,
layout,
options)
}
window.scriptInterface
}
/**
* Choices are arrays of [x, y, w, h] in screen coordinates. Returns either
* the choice index, or -1 if the choices were invalid.
*/
def getSpatialChoice(choices: Array[Array[Int]], defaultChoice: Int): Int = {
if (choices.length == 0)
return -1
for (choice <- choices) {
if (choice.length != 4)
return -1
if (choice(2) <= 0 || choice(3) <= 0)
return -1
}
getSpatialChoice(
choices.map(x => Set(Rect(x(0), x(1), x(2), x(3)))), defaultChoice)
}
/**
* TODO: No idea how this would be called from Javascript, but it's convenient
* from Scala.
*/
def getSpatialChoice(choices: Array[Set[Rect]],
defaultChoice: Int): Int = {
assert(!choices.isEmpty)
val window = syncRun {
new SpatialChoiceWindow(
game.persistent,
activeScreen.windowManager,
activeScreen.inputs,
choices,
defaultChoice = defaultChoice)
}
val choice = window.scriptInterface.getChoice()
window.scriptInterface.close()
choice
}
def newTextWindow(text: Array[String], layout: Layout,
options: NativeObject): PrintingTextWindow#PrintingTextWindowScriptInterface = {
newTextWindow(text, layout,
JsonUtils.nativeObjectToCaseClass[PrintingTextWindowOptions](options))
}
def newTextWindow(
text: Array[String],
layout: Layout = Layout(SOUTH, FIXED, 640, 180),
options: PrintingTextWindowOptions = PrintingTextWindowOptions()): PrintingTextWindow#PrintingTextWindowScriptInterface = {
val window = syncRun {
new PrintingTextWindow(
game.persistent,
activeScreen.windowManager,
activeScreen.inputs,
text,
layout,
options)
}
window.scriptInterface
}
def showTextScala(
text: Array[String],
options: PrintingTextWindowOptions = PrintingTextWindowOptions()): Int = {
val window = newTextWindow(text, options = options)
window.awaitClose()
}
def showTextScala(text: Array[String], options: NativeObject): Int = {
showTextScala(
text,
JsonUtils.nativeObjectToCaseClass[PrintingTextWindowOptions](options))
}
def getChoice(
question: Array[String],
choices: Array[String],
allowCancel: Boolean,
questionOptions: PrintingTextWindowOptions = PrintingTextWindowOptions()) = {
val questionWindow = newTextWindow(question, options = questionOptions)
val fontbmp = activeScreen.windowManager.fontbmp
val choicesWidth = Window.maxWidth(choices, fontbmp, TextChoiceWindow.xpad)
// Removing 0.5*xpad at the end makes it look better.
val choicesHeight =
choices.length * WindowText.DefaultLineHeight +
1.5f * TextChoiceWindow.ypad
val choiceLayout = layoutWithOffset(
SOUTHEAST, FIXED, choicesWidth, choicesHeight, 0,
-questionWindow.getRect().h)
val choiceWindow = newChoiceWindow(
choices,
choiceLayout,
TextChoiceWindowOptions(
allowCancel = allowCancel, justification = RIGHT))
val choice = choiceWindow.getChoice()
choiceWindow.close()
questionWindow.close()
choice
}
def getChoice(
question: Array[String],
choices: Array[String],
allowCancel: Boolean,
questionOptions: NativeObject): Int = {
getChoice(question, choices, allowCancel,
JsonUtils.nativeObjectToCaseClass[PrintingTextWindowOptions](
questionOptions))
}
def setWindowskin(windowskinPath: String) = syncRun {
game.setWindowskin(windowskinPath)
}
def getPlayerEntityInfo(): EntityInfo = syncRun {
mapScreen.getPlayerEntityInfo()
}
def getEventEntityInfo(id: Int): EntityInfo = {
mapScreen.allEntities.get(id).map(EntityInfo.apply(_, mapScreen)).orNull
}
/**
* Returns -1 on error. Life percentage rounded to integer (0-100) otherwise.
*/
def getEnemyLifePercentage(enemyId: Int): Int = syncRun {
if (activeScreen != game.battleScreen || game.battleScreen.battle.isEmpty)
return -1
val battle = game.battleScreen.battle.get
if (enemyId < 0 || enemyId >= battle.enemyStatus.length)
return -1
val enemy = battle.enemyStatus(enemyId)
val result = ((enemy.hp.toFloat / enemy.stats.mhp) * 100).round.toInt
assert(result >= 0)
assert(result <= 100)
return result
}
/**
* Returns true if successful, false otherwise.
*/
def setEnemyVitals(enemyId: Int, hpPercentage: Float,
mpPercentage: Float): Boolean = syncRun {
if (activeScreen != game.battleScreen || game.battleScreen.battle.isEmpty)
return false
val battle = game.battleScreen.battle.get
if (enemyId < 0 || enemyId >= battle.enemyStatus.length)
return false
val enemy = battle.enemyStatus(enemyId)
enemy.hp = (enemy.stats.mhp * hpPercentage).round
enemy.mp = (enemy.stats.mmp * mpPercentage).round
return true
}
def activateEvent(id: Int, awaitFinish: Boolean) = {
val eventOpt = mapScreen.allEntities.get(id)
if (eventOpt.isEmpty)
logger.error("Could not activate event id: %d".format(id))
val scriptOpt = eventOpt.flatMap(_.activate(SpriteSpec.Directions.NONE))
if (awaitFinish)
scriptOpt.map(_.awaitFinish())
scriptOpt.isDefined
}
def moveEvent(id: Int, dx: Float, dy: Float,
affixDirection: Boolean = false,
async: Boolean = false) = {
val move = syncRun {
mapScreen.moveEvent(id, dx, dy, affixDirection)
}
if (move != null && !async)
move.awaitFinish()
}
def movePlayer(dx: Float, dy: Float,
affixDirection: Boolean = false,
async: Boolean = false) = {
val move = syncRun {
mapScreen.movePlayer(dx, dy, affixDirection)
}
if (move != null && !async)
move.awaitFinish()
}
/**
* Returns true if succeeds.
*/
def exitVehicle(): Boolean = {
def playerEntity = mapScreen.playerEntity
val (ux, uy) = playerEntity.getDirectionUnitVector()
for (i <- 0 to 10) {
val dx = ux * i * 0.1f
val dy = uy * i * 0.1f
if (playerEntity.canStandAt(dx, dy)) {
syncRun {
setLoc(
VEHICLE_LOC(playerEntity.inVehicleId),
MapLoc(playerEntity.mapName.get, playerEntity.x, playerEntity.y))
playerEntity.setInVehicle(false, -1)
}
setPlayerCollision(false)
movePlayer(dx, dy)
setPlayerCollision(true)
return true
}
}
return false
}
def setPlayerCollision(collisionOn: Boolean) = syncRun {
mapScreen.playerEntity._collisionOn = collisionOn
}
def setEventSpeed(id: Int, speed: Float) = syncRun {
mapScreen.allEntities.get(id).map(_.speed = speed)
}
def setPlayerSpeed(speed: Float) = syncRun {
mapScreen.allEntities.get(EntitySpec.playerEntityId).map(_.speed = speed)
}
def getEventState(mapName: String, eventId: Int) = syncRun {
persistent.getEventState(mapName, eventId)
}
def setEventState(mapName: String, eventId: Int, newState: Int) = syncRun {
persistent.setEventState(mapName, eventId, newState)
}
def incrementEventState(eventId: Int) = syncRun {
mapScreen.mapName.map { mapName =>
val newState = persistent.getEventState(mapName, eventId) + 1
persistent.setEventState(mapName, eventId, newState)
}
}
def getParty() = syncRun {
persistent.getIntArray(PARTY)
}
def modifyParty(add: Boolean, characterId: Int): Boolean = syncRun {
if (characterId >= project.data.enums.characters.size)
return false
persistent.modifyParty(add, characterId)
}
def giveExperience(
characterIds: Array[Int],
experience: Int,
showNotifications: Boolean) = {
val leveled = syncRun {
game.persistent.giveExperience(
project.data,
characterIds,
experience)
}
if (showNotifications) {
val leveledCharacterNames = leveled.map(getCharacterName(_))
showTextScala(Array("Received %d XP.".format(experience)))
for (name <- leveledCharacterNames) {
showTextScala(Array("%s leveled!".format(name)))
}
}
}
def giveCharacterExperience(characterId: Int, experience: Int,
showNotifications: Boolean) = {
giveExperience(Array(characterId), experience, showNotifications)
}
def givePartyExperience(experience: Int, showNotifications: Boolean) = {
giveExperience(getParty(), experience, showNotifications)
}
def setLevels(characterIds: Array[Int], level: Int) = syncRun {
game.persistent.setCharacterLevels(project.data, characterIds, level)
}
def setCharacterLevel(characterId: Int, level: Int) = {
setLevels(Array(characterId), level)
}
def setPartyLevel(level: Int) = {
setLevels(getParty(), level)
}
def openStore(itemIdsSold: Array[Int], buyPriceMultiplier: Float,
sellPriceMultiplier: Float) = {
assert(activeScreen == game.mapScreen)
val finishable = syncRun {
val statement = EventJavascript.jsStatement(
"openStore", itemIdsSold, buyPriceMultiplier, sellPriceMultiplier)
game.mapScreen.scriptFactory.runFromFile(
"sys/store.js",
statement,
None)
}
finishable.awaitFinish()
}
def addRemoveItem(itemId: Int, qtyDelta: Int) = syncRun {
persistent.addRemoveItem(itemId, qtyDelta)
}
def countItems(itemId: Int) = syncRun {
persistent.countItems(itemId)
}
def addRemoveGold(delta: Int) = syncRun {
persistent.addRemoveGold(delta)
}
def addRemoveSkill(add: Boolean, characterId: Int, skillId: Int) = syncRun {
persistent.addRemoveLearnedSkills(add, characterId, skillId)
}
def getKnownSkills(characterId: Int): Array[Int] = syncRun {
val characterStatus = BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
characterId, index = -1)
val allSkills = project.data.enums.skills
characterStatus.knownSkillIds
}
def useItemInMenu(itemId: Int, characterId: Int) = syncRun {
if (persistent.addRemoveItem(itemId, -1)) {
val item = project.data.enums.items(itemId)
val characterStatus = BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
characterId, index = -1)
val damages = item.effects.flatMap(_.applyAsSkillOrItem(characterStatus))
for (damage <- damages) {
logger.debug("Character %d took %d damage from item.".format(
characterId, damage.value))
}
characterStatus.clampVitals()
persistent.saveCharacterVitals(characterId, characterStatus.hp,
characterStatus.mp, characterStatus.tempStatusEffectIds)
}
}
def useSkillInMenu(casterCharacterId: Int, skillId: Int,
targetCharacterId: Int) = syncRun {
assert(skillId < project.data.enums.skills.length)
val skill = project.data.enums.skills(skillId)
val casterStatus = BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
casterCharacterId, index = -1)
// Reuse the existing object if it's a self-cast. Otherwise, saving the
// character vitals below has unexpected results.
val targetStatus =
if (targetCharacterId != casterCharacterId) {
BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
targetCharacterId, index = -1)
} else {
casterStatus
}
assert(skill.cost <= casterStatus.mp)
casterStatus.mp -= skill.cost
skill.applySkill(casterStatus, targetStatus)
persistent.saveCharacterVitals(casterCharacterId, casterStatus.hp,
casterStatus.mp, casterStatus.tempStatusEffectIds)
if (targetCharacterId != casterCharacterId) {
persistent.saveCharacterVitals(targetCharacterId, targetStatus.hp,
targetStatus.mp, targetStatus.tempStatusEffectIds)
}
}
/**
* @param hpPercentage Between 0.0f and 1.0f.
* @param mpPercentage Between 0.0f and 1.0f.
*/
def healCharacter(characterId: Int, hpPercentage: Float,
mpPercentage: Float, removeStatusEffects: Boolean = false) = syncRun {
val characterStatus = BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
characterId, index = -1)
if (removeStatusEffects) {
characterStatus.updateTempStatusEffectIds(Array.empty)
}
characterStatus.hp +=
(characterStatus.stats.mhp * hpPercentage).round
characterStatus.mp +=
(characterStatus.stats.mmp * mpPercentage).round
characterStatus.clampVitals()
persistent.saveCharacterVitals(characterId, characterStatus.hp,
characterStatus.mp, characterStatus.tempStatusEffectIds)
}
def healParty(hpPercentage: Float, mpPercentage: Float,
removeStatusEffects: Boolean = false) = syncRun {
for (characterId <- persistent.getIntArray(PARTY)) {
healCharacter(characterId, hpPercentage, mpPercentage,
removeStatusEffects)
}
}
def damageCharacter(characterId: Int, hpPercentage: Float,
mpPercentage: Float) =
healCharacter(characterId, -hpPercentage, -mpPercentage)
def damageParty(hpPercentage: Float, mpPercentage: Float) =
healParty(-hpPercentage, -mpPercentage)
def getBattleStats(characterId: Int, proposedSlotId: Int,
proposedItemId: Int) = {
val partyParams = syncRun {
persistent.getPartyParameters(project.data.enums.characters)
}
val currentBattleStats = BattleStatus.fromCharacter(
project.data, partyParams, characterId)
if (proposedSlotId > 0 && proposedItemId > 0) {
partyParams.characterEquip(characterId).update(
proposedSlotId, proposedItemId)
}
val proposedBattleStats =
BattleStatus.fromCharacter(project.data, partyParams, characterId)
CurrentAndProposedStats(currentBattleStats.stats, proposedBattleStats.stats)
}
def getInt(key: String): Int = syncRun {
persistent.getInt(key)
}
def setInt(key: String, value: Int) = syncRun {
persistent.setInt(key, value)
}
def addInt(key: String, value: Int) = syncRun {
var currentValue = getInt(key)
currentValue += value
setInt(key, currentValue)
}
def substractInt(key: String, value: Int) = syncRun {
var currentValue = getInt(key)
currentValue -= value
setInt(key, currentValue)
}
def multiplyInt(key: String, value: Int) = syncRun {
var currentValue = getInt(key)
currentValue *= value
setInt(key, currentValue)
}
def divideInt(key: String, value: Int) = syncRun {
var currentValue = getInt(key)
currentValue /= value
setInt(key, currentValue)
}
def modInt(key: String, value: Int) = syncRun {
var currentValue = getInt(key)
currentValue = currentValue % value
setInt(key, currentValue)
}
def getString(key: String) = syncRun {
persistent.getString(key)
}
def setString(key: String, value: String) = syncRun {
persistent.setString(key, value)
}
def getIntArray(key: String): Array[Int] = syncRun {
persistent.getIntArray(key)
}
def setIntArray(key: String, value: Array[Int]) = syncRun {
persistent.setIntArray(key, value)
}
def getStringArray(key: String): Array[String] = syncRun {
persistent.getStringArray(key)
}
def setStringArray(key: String, value: Array[String]) = syncRun {
persistent.setStringArray(key, value)
}
def setStringArrayElement(key: String, index: Int, value: String) = syncRun {
val array = persistent.getStringArray(key)
array.update(index, value)
persistent.setStringArray(key, array)
}
def getLoc(key: String) = syncRun {
persistent.getLoc(key)
}
def setLoc(key: String, loc: MapLoc) = syncRun {
persistent.setLoc(key, loc)
}
def getCharacterName(characterId: Int) = syncRun {
persistent.getCharacterName(project.data, characterId)
}
def getEquippableItems(characterId: Int, equipTypeId: Int) = syncRun {
persistent.getEquippableItems(project.data, characterId, equipTypeId)
}
def equipItem(characterId: Int, slotId: Int, itemId: Int) = syncRun {
persistent.equipItem(characterId, slotId, itemId)
}
def startNewGame() = syncRun {
game.startNewGame()
}
def getSaveInfos(maxSlots: Int): Array[SaveInfo] = {
val seq = for (slot <- 0 until maxSlots) yield {
SaveFile.readInfo(project, slot)
}
seq.toArray
}
def loadFromSaveSlot(slot: Int) = syncRun {
game.loadGame(slot)
}
def saveToSaveSlot(slot: Int) = syncRun {
game.saveGame(slot)
}
def quit() = syncRun {
game.quit()
}
def toTitleScreen() = syncRun {
game.gameOver()
}
def runScript(scriptPath: String, functionToCall: String) = {
game.mapScreen.scriptFactory.runFromFile(
scriptPath, functionToCall, runOnNewThread = false)
}
def drawText(id: Int, text: String, x: Int, y: Int, color: Color = new Color(255, 255, 255, 1), size: Int = 12) = syncRun {
logger.debug("drawText: " + id + ", text: " + text + " on " + x + ", " + y + ", size:"+size);
mapScreen.windowManager.addDrawText(new ScreenText(id, text, x, y, color, size))
}
def drawRectangle(id: Int, x: Int, y: Int, width: Int, height: Int, color: Color = new Color(255, 255, 255, 1), recttype: String = "filled", radius: Int = 0) = syncRun {
logger.debug("drawRectangle: " + id + ", size: " + width + "x" + height + " on " + x + ", " + y);
var typeof = ShapeType.Filled
if (recttype == "filled") {
typeof = ShapeType.Filled
}
if (recttype == "line") {
typeof = ShapeType.Line
}
if (radius == 0) {
mapScreen.windowManager.addDrawRectangle(new Rectangle(id, x, y, width, height, color, typeof))
} else {
mapScreen.windowManager.addDrawRectangle(new RoundedRectangle(id, radius, x, y, width, height, color, typeof))
}
}
def removeDrawedText(id: Int) = syncRun {
mapScreen.windowManager.removeDrawText(id)
}
def removeDrawedRectangle(id: Int) = syncRun {
mapScreen.windowManager.removeDrawRectangle(id)
}
def color(r: Float, g: Float, b: Float, alpha: Float): Color = {
var R = r / 255
var G = g / 255
var B = b / 255
return new Color(R, G, B, alpha)
}
def log(text: String) = syncRun {
logger.debug(text)
}
def takeDamage(characterId: Int, hp: Int, mp: Int) = syncRun {
val characterStatus = BattleStatus.fromCharacter(
project.data,
persistent.getPartyParameters(project.data.enums.characters),
characterId, index = -1)
characterStatus.hp -= hp
characterStatus.mp -= mp
characterStatus.clampVitals()
persistent.saveCharacterVitals(characterId, characterStatus.hp,
characterStatus.mp, characterStatus.tempStatusEffectIds)
}
def getKeyInput(capturedKeys: Array[Int]): Int = {
val inputHandler = syncRun {
val inputHandler = new OneTimeInputHandler(capturedKeys.toSet)
activeScreen.inputs.prepend(inputHandler)
inputHandler
}
val result = inputHandler.awaitFinish()
syncRun {
activeScreen.inputs.remove(inputHandler)
}
return result
}
/**
* Returns the value associated with the message key, or "$key$" otherwise.
*/
def getMessage(key: String) = {
project.data.messages.get(key).getOrElse("$%s$".format(key))
}
def getMapName(): String = {
return mapScreen.mapName.get
}
def getScriptAsString(scriptPath: String): String = {
Script.readFromDisk(project, scriptPath).readAsString
}
def addScriptHook(jsFunction: org.mozilla.javascript.Function) = syncRun {
mapScreen.scriptHooks.addScriptHook(jsFunction)
}
/**
* Should be used for testing only
*/
def mapScreenKeyPress(key: Int, duration: Float) = {
GdxUtils.syncRun { mapScreen.inputs.myKeyDown(key) }
sleep(duration)
GdxUtils.syncRun { mapScreen.inputs.myKeyUp(key) }
}
def mapScreenKeyPress(key: Int): Unit = {
mapScreenKeyPress(key, 0.1f)
}
}
object ScriptInterfaceConstants extends HasScriptConstants {
} | hendrik-weiler/rpgboss | common/src/main/scala/rpgboss/player/ScriptInterface.scala | Scala | agpl-3.0 | 33,676 |
package com.giyeok.jparser.tests.metalang
import com.giyeok.jparser.Inputs.ConcreteSource
import com.giyeok.jparser.examples.basics.TrickyLookaheadsTests1
import com.giyeok.jparser.examples.metalang.{ExpressionGrammars, LexicalGrammars, MetaLangGrammar}
import com.giyeok.jparser.examples.{GrammarWithExamples, MetaLang1Example, StringExamples}
import com.giyeok.jparser.metalang.{GrammarGrammar, MetaGrammar}
import com.giyeok.jparser.tests.BasicParseTest
import com.giyeok.jparser.{Grammar, Inputs}
object GrammarGrammarTests1 extends GrammarWithExamples with StringExamples {
val grammar: GrammarGrammar.type = GrammarGrammar
private val paperGrammar =
"""S = `Stmt+
|Stmt = LetStmt
| | ExprStmt
|LetStmt = Let ' ' Id ' ' Expr ';'
|Let = Let0&Name
|Let0 = 'l' 'e' 't'
|Name = L(`[a-z]+)
|Id = Name-Let
|ExprStmt = Expr ';' la(LetStmt)
|Token = '+' | Id
|Expr = `Token+
|`Stmt+ = `Stmt+ Stmt | Stmt
|`Token+ = `Token+ Token | Token
|`[a-z]+ = `[a-z]+ `[a-z] | `[a-z]
|`[a-z] = [a-z]
|""".stripMargin('|')
val correctExamples: Set[String] = Set(paperGrammar, paperGrammar * 10)
val incorrectExamples: Set[String] = Set[String]()
}
case class GrammarTestCasesFromMetaGram1Example(example: MetaLang1Example) extends GrammarWithExamples {
lazy val grammar: Grammar = MetaGrammar.translateForce(example.name, example.grammar)
val correctExampleInputs: Set[ConcreteSource] = example.correctExamples.toSet map Inputs.fromString
val incorrectExampleInputs: Set[ConcreteSource] = example.incorrectExamples.toSet map Inputs.fromString
}
object GrammarGrammarTests {
val tests: Set[GrammarWithExamples] = Set(
GrammarGrammarTests1,
GrammarTestCasesFromMetaGram1Example(MetaLangGrammar.metaGrammar1),
GrammarTestCasesFromMetaGram1Example(MetaLangGrammar.metaGrammar2),
GrammarTestCasesFromMetaGram1Example(ExpressionGrammars.basic),
GrammarTestCasesFromMetaGram1Example(ExpressionGrammars.basic1),
GrammarTestCasesFromMetaGram1Example(TrickyLookaheadsTests1.tricky1),
GrammarTestCasesFromMetaGram1Example(TrickyLookaheadsTests1.tricky2),
GrammarTestCasesFromMetaGram1Example(LexicalGrammars.basic0),
GrammarTestCasesFromMetaGram1Example(LexicalGrammars.basic1),
GrammarTestCasesFromMetaGram1Example(LexicalGrammars.basic2)
)
}
class GrammarGrammarTestSuite1 extends BasicParseTest(GrammarGrammarTests.tests)
| Joonsoo/moon-parser | metalang/src/test/scala/com/giyeok/jparser/tests/metalang/GrammarGrammarTests1.scala | Scala | mit | 2,583 |
package org.jetbrains.plugins.scala.codeInspection.annotations
import com.intellij.codeInspection.ProblemsHolder
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.AbstractInspection
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScAnnotation
/**
* User: Dmitry.Naydanov
* Date: 30.09.15.
*/
class MultipleArgLists extends AbstractInspection("ScalaAnnotMultipleArgLists", "MultipleArgListsInAnnotation") {
override def actionFor(implicit holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case annotation: ScAnnotation if annotation.constructor.arguments.length > 1 =>
holder.registerProblem(annotation,
"Implementation limitation: multiple argument lists on annotations are currently not supported")
}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/annotations/MultipleArgLists.scala | Scala | apache-2.0 | 786 |
package smt
import org.scalacheck.Gen
import org.scalacheck.Gen._
import GenUtil._
import smt.migration.{Script, Migration, Group}
object MigrationGen {
def migGen: Gen[Migration] = {
for {
name <- alphaStr
numberOfGroups <- choose(1, 10)
groups <- listOfN(numberOfGroups, groupGen)
} yield Migration(name, groups, Seq())
}
def groupGen: Gen[Group] = {
for {
numberOfUpScripts <- choose(1, 10)
ups <- listOfN(numberOfUpScripts, scriptGen)
numberOfDownScripts <- choose(1, 10)
downs <- listOfN(numberOfDownScripts, scriptGen)
} yield Group(ups, downs)
}
def scriptGen: Gen[Script] = {
for {
name <- alphaStr
length <- choose(20, 100)
content <- listOfN(length, alphaChar).map(_.mkString(""))
} yield Script(name, content)
}
def migEq(m1: Migration, m2: Migration): Boolean = {
def contentSeq(m: Migration): Seq[String] = m.groups.flatMap(_.ups).map(_.content)
contentSeq(m1) == contentSeq(m2)
}
def listOfDistinctMig(l: Int): Gen[List[Migration]] = {
listOfDistinctN(l, migGen, migEq)
}
}
| davidpeklak/smt | src/test/scala/smt/MigrationGen.scala | Scala | mit | 1,111 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.source_context
/** `SourceContext` represents information about the source of a
* protobuf element, like the file in which it is defined.
*
* @param fileName
* The path-qualified name of the .proto file that contained the associated
* protobuf element. For example: `"google/protobuf/source_context.proto"`.
*/
@SerialVersionUID(0L)
final case class SourceContext(
fileName: _root_.scala.Predef.String = "",
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[SourceContext] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
{
val __value = fileName
if (!__value.isEmpty) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(1, __value)
}
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = fileName
if (!__v.isEmpty) {
_output__.writeString(1, __v)
}
};
unknownFields.writeTo(_output__)
}
def withFileName(__v: _root_.scala.Predef.String): SourceContext = copy(fileName = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => {
val __t = fileName
if (__t != "") __t else null
}
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PString(fileName)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.source_context.SourceContext
}
object SourceContext extends scalapb.GeneratedMessageCompanion[com.google.protobuf.source_context.SourceContext] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.source_context.SourceContext] = this
def merge(`_message__`: com.google.protobuf.source_context.SourceContext, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.source_context.SourceContext = {
var __fileName = `_message__`.fileName
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__fileName = _input__.readStringRequireUtf8()
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.source_context.SourceContext(
fileName = __fileName,
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.source_context.SourceContext] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.source_context.SourceContext(
fileName = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Predef.String]).getOrElse("")
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = SourceContextProto.javaDescriptor.getMessageTypes().get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = SourceContextProto.scalaDescriptor.messages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.source_context.SourceContext(
fileName = ""
)
implicit class SourceContextLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.source_context.SourceContext]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.source_context.SourceContext](_l) {
def fileName: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.fileName)((c_, f_) => c_.copy(fileName = f_))
}
final val FILE_NAME_FIELD_NUMBER = 1
def of(
fileName: _root_.scala.Predef.String
): _root_.com.google.protobuf.source_context.SourceContext = _root_.com.google.protobuf.source_context.SourceContext(
fileName
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.SourceContext])
}
| trueaccord/ScalaPB | scalapb-runtime/src/main/scalajs/com/google/protobuf/source_context/SourceContext.scala | Scala | apache-2.0 | 5,994 |
/* Copyright 2012 Christian Douven
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package almhirt.xml
import java.util.{UUID β JUUID}
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.xml.{Node, NodeSeq, Elem}
import scalaz.Validation
import scalaz.syntax.Ops
import almhirt.common._
import almhirt.almvalidation._
import _root_.java.time.{ZonedDateTime, LocalDateTime}
trait XmlOps0 extends Ops[Elem]{
def extractString(): AlmValidation[String] =
almhirt.almvalidation.funs.notEmptyOrWhitespace(self.text)
def extractOptionalString(): Option[String] =
extractString.toOption
def extractBoolean(): AlmValidation[Boolean] =
funs.booleanFromXmlNode(self)
def extractOptionalBoolean(): AlmValidation[Option[Boolean]] =
funs.optionalBooleanFromXmlNode(self)
def extractByte(): AlmValidation[Byte] =
funs.byteFromXmlNode(self)
def extractOptionalByte(): AlmValidation[Option[Byte]] =
funs.optionalByteFromXmlNode(self)
def extractShort(): AlmValidation[Short] =
funs.shortFromXmlNode(self)
def extractOptionalShort(): AlmValidation[Option[Short]] =
funs.optionalShortFromXmlNode(self)
def extractInt(): AlmValidation[Int] =
funs.intFromXmlNode(self)
def extractOptionalInt(): AlmValidation[Option[Int]] =
funs.optionalIntFromXmlNode(self)
def extractLong(): AlmValidation[Long] =
funs.longFromXmlNode(self)
def extractOptionalLong(): AlmValidation[Option[Long]] =
funs.optionalLongFromXmlNode(self)
def extractBigInt(): AlmValidation[BigInt] =
funs.bigIntFromXmlNode(self)
def extractOptionalBigInt(): AlmValidation[Option[BigInt]] =
funs.optionalBigIntFromXmlNode(self)
def extractFloat(): AlmValidation[Float] =
funs.floatFromXmlNode(self)
def extractOptionalFloat(): AlmValidation[Option[Float]] =
funs.optionalFloatFromXmlNode(self)
def extractDouble(): AlmValidation[Double] =
funs.doubleFromXmlNode(self)
def extractOptionalDouble(): AlmValidation[Option[Double]] =
funs.optionalDoubleFromXmlNode(self)
def extractDecimal(): AlmValidation[BigDecimal] =
funs.decimalFromXmlNode(self)
def extractOptionalDecimal(): AlmValidation[Option[BigDecimal]] =
funs.optionalDecimalFromXmlNode(self)
def extractDateTime(): AlmValidation[ZonedDateTime] =
funs.dateTimeFromXmlNode(self)
def extractLocalDateTime(): AlmValidation[LocalDateTime] =
funs.localDateTimeFromXmlNode(self)
def extractOptionalDateTime(): AlmValidation[Option[ZonedDateTime]] =
funs.optionalDateTimeFromXmlNode(self)
def extractDuration(): AlmValidation[FiniteDuration] =
funs.durationFromXmlNode(self)
def extractOptionalDuration(): AlmValidation[Option[FiniteDuration]] =
funs.optionalDurationFromXmlNode(self)
def extractUuid(): AlmValidation[JUUID] =
funs.uuidFromXmlNode(self)
def extractOptionalUuid(): AlmValidation[Option[JUUID]] =
funs.optionalUuidFromXmlNode(self)
def extractUri(): AlmValidation[java.net.URI] =
funs.uriFromXmlNode(self)
def extractOptionalUri(): AlmValidation[Option[java.net.URI]] =
funs.optionalUriFromXmlNode(self)
def extractStringFromChild(label: String): AlmValidation[String] =
funs.stringFromChild(self, label)
def extractOptionalStringFromChild(label: String): Option[String] =
funs.stringOptionFromChild(self, label)
def extractShortFromChild(label: String): AlmValidation[Short] =
funs.shortFromChild(self, label)
def extractOptionalShortFromChild(label: String): AlmValidation[Option[Short]] =
funs.shortOptionFromChild(self, label)
def extractIntFromChild(label: String): AlmValidation[Int] =
funs.intFromChild(self, label)
def extractOptionalIntFromChild(label: String): AlmValidation[Option[Int]] =
funs.intOptionFromChild(self, label)
def extractLongFromChild(label: String): AlmValidation[Long] =
funs.longFromChild(self, label)
def extractOptionalLongFromChild(label: String): AlmValidation[Option[Long]] =
funs.longOptionFromChild(self, label)
def extractDoubleFromChild(label: String): AlmValidation[Double] =
funs.doubleFromChild(self, label)
def extractOptionalDoubleFromChild(label: String): AlmValidation[Option[Double]] =
funs.doubleOptionFromChild(self, label)
def extractDateTimeFromChild(label: String): AlmValidation[ZonedDateTime] =
funs.dateTimeFromChild(self, label)
def extractOptionalDateTimeFromChild(label: String): AlmValidation[Option[ZonedDateTime]] =
funs.dateTimeOptionFromChild(self, label)
def extractUuidFromChild(label: String): AlmValidation[JUUID] =
funs.uuidFromChild(self, label)
def extractOptionalUuidFromChild(label: String): AlmValidation[Option[JUUID]] =
funs.uuidOptionFromChild(self, label)
def firstChildNode(label: String): AlmValidation[Elem] =
funs.firstChildNodeMandatory(self, label)
def firstChildNode: AlmValidation[Elem] =
funs.getFirstChildNode(self)
def firstChildNodeExcluding(excludeLabel: String): AlmValidation[Elem] =
funs.getFirstChildNodeExcluding(self, excludeLabel)
def mapOptionalFirstChild[T](label: String, compute: Elem β AlmValidation[T]): AlmValidation[Option[T]] =
funs.mapOptionalFirstChild(self, label, compute)
def flatMapOptionalFirstChild[T](label: String, compute: Elem β AlmValidation[Option[T]]): AlmValidation[Option[T]] =
funs.flatMapOptionalFirstChild(self, label, compute)
def \\?(label: String) = funs.elems(self)(label)
def \\?(predicate: String β Boolean) = funs.allElems(self).filter(xml β predicate(xml.label))
def \!(label: String): AlmValidation[Elem] = funs.getChild(self)(label)
def \?(label: String): AlmValidation[Option[Elem]] = funs.tryGetChild(self)(label)
def \??(label: String): Option[Elem] = funs.tryGetChild(self)(label).fold(_ β None, succ β succ)
def elems = funs.allElems(self)
def \@!(name: String): AlmValidation[String] = funs.getAttributeValue(self, name)
def \@?(name: String): Option[String] = funs.getOptionalAttributeValue(self, name)
}
import language.implicitConversions
trait ToXmlOps {
implicit def FromElemToXmlOps0(a: Elem) = new XmlOps0{ def self = a }
}
| chridou/almhirt | almhirt-common/src/main/scala/almhirt/xml/XmlOps.scala | Scala | apache-2.0 | 6,908 |
package sbinary;
trait DefaultProtocol extends StandardTypes with JavaFormats;
object DefaultProtocol extends DefaultProtocol;
| mikegoatly/sbinary | core/src/defaultprotocol.scala | Scala | mit | 128 |
package ch.inventsoft.graph.layout
package spring
import scala.language.higherKinds
import scalax.collection._
import GraphPredef._
import ch.inventsoft.graph.vector._
/** Spring layout that uses the barnes hut algorithm for repulsion. Use for larger (> 1000 nodes) graphs. */
object BarnesHutLayout {
def apply[N, E[X] <: EdgeLikeIn[X]](graph: Graph[N, E], in: Box3, theta: Double): IncrementalLayout[N] =
apply(graph, _ => Vector3.random(in), theta)
def apply[N, E[X] <: EdgeLikeIn[X]](graph: Graph[N, E], positions: Layout[N], theta: Double): IncrementalLayout[N] = {
val in = Box3.containing(graph.nodes.map(_.value).map(positions))
val springConstant = 1d / (graph.edges.map(_.weight).max * 5)
implicit val repulsionConstant = RepulsionConstant {
val density = Math.pow(in.size.volume / graph.size, 1d / 3)
density * density
}
implicit val epsilon = Epsilon(in.size.length / 10000000)
implicit val mac = MultipoleAcceptanceCriterion(theta)
val nodes = graph.nodes.map(_.value).toVector
val nodeMap = nodes.zipWithIndex.toMap
val springs = graph.edges.map { e =>
Spring(nodeMap(e._1.value), nodeMap(e._2.value), e.weight, springConstant)
}
val bodies = nodes.map(n => Body(positions(n.value)))
new BarnesHutLayout(nodeMap, springs.toVector, bodies)
}
private class BarnesHutLayout[N](
lookupMap: Map[N, Int],
springs: Vector[Spring],
bodies: Vector[Body])(
implicit repulsionConstant: RepulsionConstant,
epsilon: Epsilon,
mac: MultipoleAcceptanceCriterion) extends IncrementalLayout[N] {
def apply(n: N) = bodies(lookupMap(n)).centerOfMass
def improve = {
val oct = Oct.create(bodies)
val forces = bodies.toArray.par.map(body => body.centerOfMass + oct.force(body))
springs.foreach { spring =>
val force = spring.force(bodies(spring.node1).centerOfMass, bodies(spring.node2).centerOfMass)
forces(spring.node1) -= force
forces(spring.node2) += force
}
new BarnesHutLayout(lookupMap, springs, forces.seq.view.map(Body).toVector)
}
}
private case class RepulsionConstant(value: Double) extends AnyVal
private case class Epsilon(value: Double) extends AnyVal
private case class MultipoleAcceptanceCriterion(value: Double) extends AnyVal {
def accepts(boxSize: Double, distance: Double) = boxSize / distance < value
}
private sealed trait Node {
def mass: Double
def centerOfMass: Vector3
def distance(to: Node) = (centerOfMass - to.centerOfMass).length
def force(against: Body)(implicit repulsionConstant: RepulsionConstant, epsilon: Epsilon, mac: MultipoleAcceptanceCriterion): Vector3
}
private case class Body(centerOfMass: Vector3) extends Node {
override def mass = 1
def applyForce(f: Vector3) = copy(centerOfMass = centerOfMass + f)
override def force(against: Body)(implicit repulsionConstant: RepulsionConstant, epsilon: Epsilon, mac: MultipoleAcceptanceCriterion) = {
val vec = against.centerOfMass - centerOfMass
val distance = vec.length
vec * (repulsionConstant.value / (distance * distance * distance + epsilon.value))
}
}
private case object Empty extends Node {
override def mass = 0d
override def centerOfMass = Vector3.zero
override def force(against: Body)(implicit repulsionConstant: RepulsionConstant, epsilon: Epsilon, mac: MultipoleAcceptanceCriterion) =
Vector3.zero
}
private case class Oct private (
bounds: Box3,
children: IndexedSeq[Node]) extends Node {
override val mass = children.foldLeft(0d)(_ + _.mass)
override val centerOfMass = {
children.foldLeft(Vector3.zero) { (sum, child) =>
sum + child.centerOfMass * child.mass
} / mass
}
def size = bounds.size.x //same size in each direction
override def force(body: Body)(implicit repulsionConstant: RepulsionConstant, epsilon: Epsilon, mac: MultipoleAcceptanceCriterion) = {
val vec = body.centerOfMass - centerOfMass
val distance = vec.length
if (mac.accepts(size, distance)) {
// distance is big enough so we can threat us as a cluster regarding the body
vec * (repulsionConstant.value * mass / (distance * distance * distance + epsilon.value))
} else {
// need to calculate the force for each child
val v = children(0).force(body).toMutable
v += children(1).force(body)
v += children(2).force(body)
v += children(3).force(body)
v += children(4).force(body)
v += children(5).force(body)
v += children(6).force(body)
v += children(7).force(body)
v.toVector3
}
}
}
private object Oct {
def create(contents: Traversable[Body]): Node = {
val rawBounds = Box3.containing(contents.view.map(_.centerOfMass))
val size = rawBounds.size.x max rawBounds.size.y max rawBounds.size.z
val bounds = Box3(rawBounds.origin, Vector3(size, size, size))
create(bounds, contents)
}
def create(bounds: Box3, contents: Traversable[Body]): Node = {
if (contents.isEmpty) Empty
else if (contents.tail.isEmpty) contents.head
else {
val center = bounds.center
val array = Array[List[Body]](Nil, Nil, Nil, Nil, Nil, Nil, Nil, Nil)
contents.foreach { body =>
val p = body.centerOfMass
val index = (if (p.x < center.x) 0 else 1) +
(if (p.y < center.y) 0 else 2) +
(if (p.z < center.z) 0 else 4)
array(index) = body :: array(index)
}
val size = bounds.size / 2
val children = Array(
create(Box3(bounds.origin, size), array(0)),
create(Box3(Vector3(center.x, bounds.origin.y, bounds.origin.z), size), array(1)),
create(Box3(Vector3(bounds.origin.x, center.y, bounds.origin.z), size), array(2)),
create(Box3(Vector3(center.x, center.y, bounds.origin.z), size), array(3)),
create(Box3(Vector3(bounds.origin.x, bounds.origin.y, center.z), size), array(4)),
create(Box3(Vector3(center.x, bounds.origin.y, center.z), size), array(5)),
create(Box3(Vector3(bounds.origin.x, center.y, center.z), size), array(6)),
create(Box3(center, size), array(7)))
new Oct(bounds, children)
}
}
}
private case class Spring(node1: Int, node2: Int, strength: Double, springConstant: Double) {
private val factor = springConstant * strength
def force(nodeA: Vector3, nodeB: Vector3) = (nodeA - nodeB) * factor
}
} | msiegenthaler/graph-layout | src/main/scala/ch/inventsoft/graph/layout/spring/BarnesHutLayout.scala | Scala | apache-2.0 | 6,573 |
package demo
package components
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom
import org.scalajs.dom.ext.PimpedNodeList
object CodeHighlight {
val component = ReactComponentB[String]("CodeHighLighter")
.render_P(P => <.code(^.`class` := "scala", ^.padding := "20px", P))
.configure(installSyntaxHighlighting)
.build
def installSyntaxHighlighting[P, S, B, N <: TopNode] =
(_: ReactComponentB[P, S, B, N])
.componentDidMount(_ => applySyntaxHighlight)
.componentDidUpdate(_ => applySyntaxHighlight)
def applySyntaxHighlight = Callback {
import scala.scalajs.js.Dynamic.{global => g}
val nodeList = dom.document.querySelectorAll("code").toArray
nodeList.foreach(n => g.hljs.highlightBlock(n))
}
def apply(code: String) = component(code)
}
| oyvindberg/scalajs-react-components | demo/src/main/scala/demo/components/CodeHighlight.scala | Scala | apache-2.0 | 848 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.util
import java.util.{ArrayList, Arrays, List}
import org.apache.hadoop.hive.ql.exec.UDF
import org.apache.spark.sql.{CarbonEnv, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.expressions.{Expression, ScalaUDF}
import org.apache.spark.sql.secondaryindex.util.{CarbonInternalScalaUtil, FileInternalUtil, IndexTableUtil}
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.schema.indextable.IndexTableInfo
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
/**
* This class contains all carbon hive metadata related utilities
*/
object CarbonHiveMetadataUtil {
@transient
val LOGGER = LogServiceFactory.getLogService(CarbonHiveMetadataUtil.getClass.getName)
/**
* This method invalidates the table from HiveMetastoreCatalog before dropping table
*
* @param databaseName
* @param tableName
* @param sparkSession
*/
def invalidateAndDropTable(databaseName: String,
tableName: String,
sparkSession: SparkSession): Unit = {
try {
val tabelIdentifier = TableIdentifier(tableName, Some(databaseName))
sparkSession.sessionState.catalog.dropTable(tabelIdentifier, true, false)
} catch {
case e: Exception =>
LOGGER.error(
s"Error While deleting the table $databaseName.$tableName during drop carbon table" +
e.getMessage)
throw e
}
}
def refreshTable(dbName: String, tableName: String, sparkSession: SparkSession): Unit = {
val tableWithDb = dbName + "." + tableName
val tableIdent = sparkSession.sessionState.sqlParser.parseTableIdentifier(tableWithDb)
sparkSession.sessionState.catalog.refreshTable(tableIdent)
}
/**
* This method invalidates the table from HiveMetastoreCatalog before dropping table and also
* removes the index table info from parent carbon table.
*
* @param indexTableIdentifier
* @param indexInfo
* @param parentCarbonTable
* @param sparkSession
*/
def invalidateAndUpdateIndexInfo(indexTableIdentifier: TableIdentifier,
indexInfo: String, parentCarbonTable: CarbonTable)(sparkSession: SparkSession): Unit = {
val catalog = CarbonEnv.getInstance(sparkSession).carbonMetaStore
val dbName = indexTableIdentifier.database
.getOrElse(CarbonCommonConstants.DATABASE_DEFAULT_NAME)
val tableName = indexTableIdentifier.table
try {
if (indexInfo != null) {
removeIndexInfoFromParentTable(indexInfo,
parentCarbonTable,
dbName,
tableName)(sparkSession)
}
} catch {
case e: Exception =>
LOGGER.error(
s"Error While deleting the table $dbName.$tableName during drop carbon table" +
e.getMessage)
}
}
def removeIndexInfoFromParentTable(indexInfo: String,
parentCarbonTable: CarbonTable,
dbName: String,
tableName: String)(sparkSession: SparkSession): Unit = {
val parentTableName = parentCarbonTable.getTableName
val newIndexInfo = removeIndexTable(indexInfo, dbName, tableName)
CarbonInternalScalaUtil.removeIndexTableInfo(parentCarbonTable, tableName)
sparkSession.sql(
s"""ALTER TABLE $dbName.$parentTableName SET SERDEPROPERTIES ('indexInfo'='$newIndexInfo')
""".stripMargin)
FileInternalUtil.touchSchemaFileTimestamp(dbName, parentTableName,
parentCarbonTable.getTablePath, System.currentTimeMillis())
FileInternalUtil.touchStoreTimeStamp()
refreshTable(dbName, parentTableName, sparkSession)
}
/**
* removes index table info from parent table properties
*
* @param gsonData
* @param dbName
* @param tableName
* @return
*/
def removeIndexTable(gsonData: String, dbName: String, tableName: String): String = {
val indexTableInfos: Array[IndexTableInfo] = IndexTableInfo.fromGson(gsonData)
if (null == indexTableInfos) {
IndexTableInfo.toGson(Array())
} else {
val indexTables = indexTableInfos.toList
.filterNot(indexTable => indexTable.getDatabaseName.equalsIgnoreCase(dbName) &&
indexTable.getTableName.equalsIgnoreCase(tableName))
IndexTableInfo.toGson(indexTables.toArray)
}
}
def transformToRemoveNI(expression: Expression): Expression = {
val newExpWithoutNI = expression.transform {
case hiveUDF: HiveSimpleUDF if hiveUDF.function.isInstanceOf[NonIndexUDFExpression] =>
hiveUDF.asInstanceOf[HiveSimpleUDF].children.head
case scalaUDF: ScalaUDF if "NI".equalsIgnoreCase(scalaUDF.udfName.get) =>
scalaUDF.children.head
}
newExpWithoutNI
}
def checkNIUDF(condition: Expression): Boolean = {
condition match {
case hiveUDF: HiveSimpleUDF if hiveUDF.function.isInstanceOf[NonIndexUDFExpression] => true
case scalaUDF: ScalaUDF if "NI".equalsIgnoreCase(scalaUDF.udfName.get) => true
case _ => false
}
}
def getNIChildren(condition: Expression): Expression = {
condition.asInstanceOf[HiveSimpleUDF].children.head
}
}
private class NonIndexUDFExpression extends UDF {
def evaluate(input: Any): Boolean = true
}
| jackylk/incubator-carbondata | integration/spark/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetadataUtil.scala | Scala | apache-2.0 | 6,101 |
package uk.gov.gds.ier.transaction.overseas.parentsAddress
import uk.gov.gds.ier.transaction.overseas.InprogressOverseas
import uk.gov.gds.ier.step.StepTemplate
trait ParentsAddressLookupMustache extends StepTemplate[InprogressOverseas] {
val title = "What was your parent or guardian's last UK address?"
val questionNumber = ""
case class LookupModel (
question: Question,
postcode: Field
) extends MustacheData
val mustache = MustacheTemplate("overseas/parentsAddressLookup") { (form, post) =>
implicit val progressForm = form
LookupModel(
question = Question(
postUrl = post.url,
title = title,
errorMessages = form.globalErrors.map(_.message)
),
postcode = Field(
id = keys.parentsAddress.postcode.asId(),
name = keys.parentsAddress.postcode.key,
value = form(keys.parentsAddress.postcode).value.getOrElse(""),
classes = if (form(keys.parentsAddress.postcode).hasErrors) {
"invalid"
} else {
""
}
)
)
}
}
| alphagov/ier-frontend | app/uk/gov/gds/ier/transaction/overseas/parentsAddress/ParentsAddressLookupMustache.scala | Scala | mit | 1,068 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers.should
import org.scalatest.FailureMessages
import org.scalatest.Resources
import org.scalatest.Assertion
import org.scalatest.Assertions
import org.scalatest.Suite
import org.scalatest.UnquotedString
import org.scalatest.CompileMacro
import org.scalactic._
import org.scalatest.enablers._
import org.scalatest.matchers._
import org.scalatest.matchers.dsl._
import org.scalatest.verbs.CompileWord
import org.scalatest.verbs.TypeCheckWord
import org.scalatest.verbs.ShouldVerb
import org.scalatest.matchers.dsl.ResultOfNoElementsOfApplication
import org.scalatest.matchers.dsl.ResultOfOneElementOfApplication
import scala.collection.GenTraversable
import scala.reflect.{classTag, ClassTag}
import scala.util.matching.Regex
import DefaultEquality.areEqualComparingArraysStructurally
import org.scalatest.matchers.MatchersHelper
import MatchersHelper.transformOperatorChars
import TripleEqualsSupport.Spread
import TripleEqualsSupport.TripleEqualsInvocation
import TripleEqualsSupport.TripleEqualsInvocationOnSpread
import ArrayHelper.deep
// SKIP-SCALATESTJS,NATIVE-START
import MatchersHelper.accessProperty
import MatchersHelper.matchSymbolToPredicateMethod
// SKIP-SCALATESTJS,NATIVE-END
import scala.language.experimental.macros
import scala.language.higherKinds
import MatchersHelper.endWithRegexWithGroups
import MatchersHelper.fullyMatchRegexWithGroups
import MatchersHelper.includeRegexWithGroups
import MatchersHelper.indicateFailure
import MatchersHelper.indicateSuccess
import MatchersHelper.newTestFailedException
import MatchersHelper.startWithRegexWithGroups
import org.scalatest.exceptions._
// TODO: drop generic support for be as an equality comparison, in favor of specific ones.
// TODO: Put links from ShouldMatchers to wherever I reveal the matrix and algo of how properties are checked dynamically.
// TODO: double check that I wrote tests for (length (7)) and (size (8)) in parens
// TODO: document how to turn off the === implicit conversion
// TODO: Document you can use JMock, EasyMock, etc.
/**
* Trait that provides a domain specific language (DSL) for expressing assertions in tests
* using the word <code>should</code>.
*
* <p>
* For example, if you mix <code>Matchers</code> into
* a suite class, you can write an equality assertion in that suite like this:
* </p>
*
* <pre class="stHighlight">
* result should equal (3)
* </pre>
*
* <p>
* Here <code>result</code> is a variable, and can be of any type. If the object is an
* <code>Int</code> with the value 3, execution will continue (<em>i.e.</em>, the expression will result
* in the unit value, <code>()</code>). Otherwise, a <a href="../../exceptions/TestFailedException.html"><code>TestFailedException</code></a>
* will be thrown with a detail message that explains the problem, such as <code>"7 did not equal 3"</code>.
* This <code>TestFailedException</code> will cause the test to fail.
* </p>
*
* <p>
* Here is a table of contents for this documentation:
* </p>
*
* <ul>
* <li><a href="#matchersMigration">Matchers migration in ScalaTest 2.0</a></li>
* <li><a href="#checkingEqualityWithMatchers">Checking equality with matchers</a></li>
* <li><a href="#checkingSizeAndLength">Checking size and length</a></li>
* <li><a href="#checkingStrings">Checking strings</a></li>
* <li><a href="#greaterAndLessThan">Greater and less than</a></li>
* <li><a href="#checkingBooleanPropertiesWithBe">Checking <code>Boolean</code> properties with <code>be</code></a></li>
* <li><a href="#usingCustomBeMatchers">Using custom <code>BeMatchers</code></a></li>
* <li><a href="#checkingObjectIdentity">Checking object identity</a></li>
* <li><a href="#checkingAnObjectsClass">Checking an object's class</a></li>
* <li><a href="#checkingNumbersAgainstARange">Checking numbers against a range</a></li>
* <li><a href="#checkingForEmptiness">Checking for emptiness</a></li>
* <li><a href="#workingWithContainers">Working with "containers"</a></li>
* <li><a href="#workingWithAggregations">Working with "aggregations"</a></li>
* <li><a href="#workingWithSequences">Working with "sequences"</a></li>
* <li><a href="#workingWithSortables">Working with "sortables"</a></li>
* <li><a href="#workingWithIterators">Working with iterators</a></li>
* <li><a href="#inspectorShorthands">Inspector shorthands</a></li>
* <li><a href="#singleElementCollections">Single-element collections</a></li>
* <li><a href="#javaCollectionsAndMaps">Java collections and maps</a></li>
* <li><a href="#stringsAndArraysAsCollections"><code>String</code>s and <code>Array</code>s as collections</a></li>
* <li><a href="#beAsAnEqualityComparison">Be as an equality comparison</a></li>
* <li><a href="#beingNegative">Being negative</a></li>
* <li><a href="#checkingThatCodeDoesNotCompile">Checking that a snippet of code does not compile</a></li>
* <li><a href="#logicalExpressions">Logical expressions with <code>and</code> and <code>or</code></a></li>
* <li><a href="#workingWithOptions">Working with <code>Option</code>s</a></li>
* <li><a href="#checkingArbitraryProperties">Checking arbitrary properties with <code>have</code></a></li>
* <li><a href="#lengthSizeHavePropertyMatchers">Using <code>length</code> and <code>size</code> with <code>HavePropertyMatcher</code>s</a></li>
* <li><a href="#matchingAPattern">Checking that an expression matches a pattern</a></li>
* <li><a href="#usingCustomMatchers">Using custom matchers</a></li>
* <li><a href="#checkingForExpectedExceptions">Checking for expected exceptions</a></li>
* <li><a href="#thosePeskyParens">Those pesky parens</a></li>
* </ul>
*
* <p>
* Trait <a href="../must/Matchers.html"><code>must.Matchers</code></a> is an alternative to <!-- PRESERVE --><code>should.Matchers</code> that provides the exact same
* meaning, syntax, and behavior as <!-- PRESERVE --><code>should.Matchers</code>, but uses the verb <code>must</code> instead of <!-- PRESERVE --><code>should</code>.
* The two traits differ only in the English semantics of the verb: <!-- PRESERVE --><code>should</code>
* is informal, making the code feel like conversation between the writer and the reader; <code>must</code> is more formal, making the code feel more like
* a written specification.
* </p>
*
* <a name="checkingEqualityWithMatchers"></a>
* <h2>Checking equality with matchers</h2>
*
* <p>
* ScalaTest matchers provides five different ways to check equality, each designed to address a different need. They are:
* </p>
*
* <pre class="stHighlight">
* result should equal (3) // can customize equality
* result should === (3) // can customize equality and enforce type constraints
* result should be (3) // cannot customize equality, so fastest to compile
* result shouldEqual 3 // can customize equality, no parentheses required
* result shouldBe 3 // cannot customize equality, so fastest to compile, no parentheses required
* </pre>
*
* <p>
* The “<code>left</code> <code>should</code> <code>equal</code> <code>(right)</code>” syntax requires an
* <code>org.scalactic.Equality[L]</code> to be provided (either implicitly or explicitly), where
* <code>L</code> is the left-hand type on which <code>should</code> is invoked. In the "<code>left</code> <code>should</code> <code>equal</code> <code>(right)</code>" case,
* for example, <code>L</code> is the type of <code>left</code>. Thus if <code>left</code> is type <code>Int</code>, the "<code>left</code> <code>should</code>
* <code>equal</code> <code>(right)</code>"
* statement would require an <code>Equality[Int]</code>.
* </p>
*
* <p>
* By default, an implicit <code>Equality[T]</code> instance is available for any type <code>T</code>, in which equality is implemented
* by simply invoking <code>==</code> on the <code>left</code>
* value, passing in the <code>right</code> value, with special treatment for arrays. If either <code>left</code> or <code>right</code> is an array, <code>deep</code>
* will be invoked on it before comparing with <em>==</em>. Thus, the following expression
* will yield false, because <code>Array</code>'s <code>equals</code> method compares object identity:
* </p>
*
* <pre class="stHighlight">
* Array(1, 2) == Array(1, 2) // yields false
* </pre>
*
* <p>
* The next expression will by default <em>not</em> result in a <code>TestFailedException</code>, because default <code>Equality[Array[Int]]</code> compares
* the two arrays structurally, taking into consideration the equality of the array's contents:
* </p>
*
* <pre class="stHighlight">
* Array(1, 2) should equal (Array(1, 2)) // succeeds (i.e., does not throw TestFailedException)
* </pre>
*
* <p>
* If you ever do want to verify that two arrays are actually the same object (have the same identity), you can use the
* <code>be theSameInstanceAs</code> syntax, <a href="#checkingObjectIdentity">described below</a>.
* </p>
*
* <p>
* You can customize the meaning of equality for a type when using "<code>should</code> <code>equal</code>," "<code>should</code> <code>===</code>,"
* or <code>shouldEqual</code> syntax by defining implicit <code>Equality</code> instances that will be used instead of default <code>Equality</code>.
* You might do this to normalize types before comparing them with <code>==</code>, for instance, or to avoid calling the <code>==</code> method entirely,
* such as if you want to compare <code>Double</code>s with a tolerance.
* For an example, see the main documentation of trait <code>org.scalactic.Equality</code>.
* </p>
*
* <p>
* You can always supply implicit parameters explicitly, but in the case of implicit parameters of type <code>Equality[T]</code>, Scalactic provides a
* simple "explictly" DSL. For example, here's how you could explicitly supply an <code>Equality[String]</code> instance that normalizes both left and right
* sides (which must be strings), by transforming them to lowercase:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> import org.scalactic.Explicitly._
* import org.scalactic.Explicitly._
*
* scala> import org.scalactic.StringNormalizations._
* import org.scalactic.StringNormalizations._
*
* scala> "Hi" should equal ("hi") (after being lowerCased)
* </pre>
*
* <p>
* The <code>after</code> <code>being</code> <code>lowerCased</code> expression results in an <code>Equality[String]</code>, which is then passed
* explicitly as the second curried parameter to <code>equal</code>. For more information on the explictly DSL, see the main documentation
* for trait <code>org.scalactic.Explicitly</code>.
* </p>
*
* <p>
* The "<code>should</code> <code>be</code>" and <code>shouldBe</code> syntax do not take an <code>Equality[T]</code> and can therefore not be customized.
* They always use the default approach to equality described above. As a result, "<code>should</code> <code>be</code>" and <code>shouldBe</code> will
* likely be the fastest-compiling matcher syntax for equality comparisons, since the compiler need not search for
* an implicit <code>Equality[T]</code> each time.
* </p>
*
* <p>
* The <code>should</code> <code>===</code> syntax (and its complement, <code>should</code> <code>!==</code>) can be used to enforce type
* constraints at compile-time between the left and right sides of the equality comparison. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> import org.scalactic.TypeCheckedTripleEquals._
* import org.scalactic.TypeCheckedTripleEquals._
*
* scala> Some(2) should === (2)
* <console>:17: error: types Some[Int] and Int do not adhere to the equality constraint
* selected for the === and !== operators; the missing implicit parameter is of
* type org.scalactic.CanEqual[Some[Int],Int]
* Some(2) should === (2)
* ^
* </pre>
*
* <p>
* By default, the "<code>Some(2)</code> <code>should</code> <code>===</code> <code>(2)</code>" statement would fail at runtime. By mixing in
* the equality constraints provided by <code>TypeCheckedTripleEquals</code>, however, the statement fails to compile. For more information
* and examples, see the main documentation for trait <code>org.scalactic.TypeCheckedTripleEquals</code>.
* </p>
*
* <a name="checkingSizeAndLength"></a>
* <h2>Checking size and length</h2>
*
* <p>
* You can check the size or length of any type of object for which it
* makes sense. Here's how checking for length looks:
* </p>
* <pre class="stHighlight">
* result should have length 3
* </pre>
*
* <p>
* Size is similar:
* </p>
*
* <pre class="stHighlight">
* result should have size 10
* </pre>
*
* <p>
* The <code>length</code> syntax can be used with <code>String</code>, <code>Array</code>, any <code>scala.collection.GenSeq</code>,
* any <code>java.util.List</code>, and any type <code>T</code> for which an implicit <code>Length[T]</code> type class is
* available in scope.
* Similarly, the <code>size</code> syntax can be used with <code>Array</code>, any <code>scala.collection.GenTraversable</code>,
* any <code>java.util.Collection</code>, any <code>java.util.Map</code>, and any type <code>T</code> for which an implicit <code>Size[T]</code> type class is
* available in scope. You can enable the <code>length</code> or <code>size</code> syntax for your own arbitrary types, therefore,
* by defining <a href="../../enablers/Length.html"><code>Length</code></a> or <a href="../../enablers/Size.html"><code>Size</code></a> type
* classes for those types.
* </p>
*
* <p>
* In addition, the <code>length</code> syntax can be used with any object that has a field or method named <code>length</code>
* or a method named <code>getLength</code>. Similarly, the <code>size</code> syntax can be used with any
* object that has a field or method named <code>size</code> or a method named <code>getSize</code>.
* The type of a <code>length</code> or <code>size</code> field, or return type of a method, must be either <code>Int</code>
* or <code>Long</code>. Any such method must take no parameters. (The Scala compiler will ensure at compile time that
* the object on which <code>should</code> is being invoked has the appropriate structure.)
* </p>
*
* <a name="checkingStrings"></a>
* <h2>Checking strings</h2>
*
* <p>
* You can check for whether a string starts with, ends with, or includes a substring like this:
* </p>
*
* <pre class="stHighlight">
* string should startWith ("Hello")
* string should endWith ("world")
* string should include ("seven")
* </pre>
*
* <p>
* You can check for whether a string starts with, ends with, or includes a regular expression, like this:
* </p>
*
* <pre class="stHighlight">
* string should startWith regex "Hel*o"
* string should endWith regex "wo.ld"
* string should include regex "wo.ld"
* </pre>
*
* <p>
* And you can check whether a string fully matches a regular expression, like this:
* </p>
*
* <pre class="stHighlight">
* string should fullyMatch regex """(-)?(\\d+)(\\.\\d*)?"""
* </pre>
*
* <p>
* The regular expression passed following the <code>regex</code> token can be either a <code>String</code>
* or a <code>scala.util.matching.Regex</code>.
* </p>
*
* <p>
* With the <code>startWith</code>, <code>endWith</code>, <code>include</code>, and <code>fullyMatch</code>
* tokens can also be used with an optional specification of required groups, like this:
* </p>
*
* <pre class="stHighlight">
* "abbccxxx" should startWith regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* "xxxabbcc" should endWith regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* "xxxabbccxxx" should include regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* "abbcc" should fullyMatch regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* </pre>
*
* <p>
* You can check whether a string is empty with <code>empty</code>:
* </p>
*
* <pre class="stHighlight">
* s shouldBe empty
* </pre>
*
* <p>
* You can also use most of ScalaTest's matcher syntax for collections on <code>String</code> by
* treating the <code>String</code>s as collections of characters. For examples, see the
* <a href="#stringsAndArraysAsCollections"><code>String</code>s and <code>Array</code>s as collections</a> section below.
* </p>
*
* <a name="greaterAndLessThan"></a>
* <h2>Greater and less than</h2>
*
* <p>
* You can check whether any type for which an implicit <code>Ordering[T]</code> is available
* is greater than, less than, greater than or equal, or less
* than or equal to a value of type <code>T</code>. The syntax is:
* </p>
* <pre class="stHighlight">
* one should be < 7
* one should be > 0
* one should be <= 7
* one should be >= 0
* </pre>
*
* <a name="checkingBooleanPropertiesWithBe"></a>
* <h2>Checking <code>Boolean</code> properties with <code>be</code></h2>
*
* <p>
* If an object has a method that takes no parameters and returns boolean, you can check
* it by placing a <code>Symbol</code> (after <code>be</code>) that specifies the name
* of the method (excluding an optional prefix of "<code>is</code>"). A symbol literal
* in Scala begins with a tick mark and ends at the first non-identifier character. Thus,
* <code>'traversableAgain</code> results in a <code>Symbol</code> object at runtime, as does
* <code>'completed</code> and <code>'file</code>. Here's an example:
* </p>
*
* <pre class="stHighlight">
* iter shouldBe 'traversableAgain
* </pre>
*
* Given this code, ScalaTest will use reflection to look on the object referenced from
* <code>emptySet</code> for a method that takes no parameters and results in <code>Boolean</code>,
* with either the name <code>empty</code> or <code>isEmpty</code>. If found, it will invoke
* that method. If the method returns <code>true</code>, execution will continue. But if it returns
* <code>false</code>, a <code>TestFailedException</code> will be thrown that will contain a detail message, such as:
*
* <pre class="stHighlight">
* non-empty iterator was not traversableAgain
* </pre>
*
* <p>
* This <code>be</code> syntax can be used with any reference (<code>AnyRef</code>) type. If the object does
* not have an appropriately named predicate method, you'll get a <code>TestFailedException</code>
* at runtime with a detailed message that explains the problem.
* (For the details on how a field or method is selected during this
* process, see the documentation for <a href="matchers.dsl/BeWord.html"><code>BeWord</code></a>.)
* </p>
*
* <p>
* If you think it reads better, you can optionally put <code>a</code> or <code>an</code> after
* <code>be</code>. For example, <code>java.io.File</code> has two predicate methods,
* <code>isFile</code> and <code>isDirectory</code>. Thus with a <code>File</code> object
* named <code>temp</code>, you could write:
* </p>
*
* <pre class="stHighlight">
* temp should be a 'file
* </pre>
*
* <p>
* Or, given <code>java.awt.event.KeyEvent</code> has a method <code>isActionKey</code> that takes
* no arguments and returns <code>Boolean</code>, you could assert that a <code>KeyEvent</code> is
* an action key with:
*</p>
*
* <pre class="stHighlight">
* keyEvent should be an 'actionKey
* </pre>
*
* <p>
* If you prefer to check <code>Boolean</code> properties in a type-safe manner, you can use a <code>BePropertyMatcher</code>.
* This would allow you to write expressions such as:
* </p>
*
* <pre class="stHighlight">
* xs shouldBe traversableAgain
* temp should be a file
* keyEvent should be an actionKey
* </pre>
*
* <p>
* These expressions would fail to compile if <code>should</code> is used on an inappropriate type, as determined
* by the type parameter of the <code>BePropertyMatcher</code> being used. (For example, <code>file</code> in this example
* would likely be of type <code>BePropertyMatcher[java.io.File]</code>. If used with an appropriate type, such an expression will compile
* and at run time the <code>Boolean</code> property method or field will be accessed directly; <em>i.e.</em>, no reflection will be used.
* See the documentation for <a href="matchers/BePropertyMatcher.html"><code>BePropertyMatcher</code></a> for more information.
* </p>
*
* <a name="usingCustomBeMatchers"></a>
* <h2>Using custom <code>BeMatchers</code></h2>
*
* If you want to create a new way of using <code>be</code>, which doesn't map to an actual property on the
* type you care about, you can create a <code>BeMatcher</code>. You could use this, for example, to create <code>BeMatcher[Int]</code>
* called <code>odd</code>, which would match any odd <code>Int</code>, and <code>even</code>, which would match
* any even <code>Int</code>.
* Given this pair of <code>BeMatcher</code>s, you could check whether an <code>Int</code> was odd or even with expressions like:
* </p>
*
* <pre class="stHighlight">
* num shouldBe odd
* num should not be even
* </pre>
*
* For more information, see the documentation for <a href="matchers/BeMatcher.html"><code>BeMatcher</code></a>.
*
* <a name="checkingObjectIdentity"></a>
* <h2>Checking object identity</h2>
*
* <p>
* If you need to check that two references refer to the exact same object, you can write:
* </p>
*
* <pre class="stHighlight">
* ref1 should be theSameInstanceAs ref2
* </pre>
*
* <a name="checkingAnObjectsClass"></a>
* <h2>Checking an object's class</h2>
*
* <p>
* If you need to check that an object is an instance of a particular class or trait, you can supply the type to
* “<code>be</code> <code>a</code>” or “<code>be</code> <code>an</code>”:
* </p>
*
* <pre class="stHighlight">
* result1 shouldBe a [Tiger]
* result1 should not be an [Orangutan]
* </pre>
*
* <p>
* Because type parameters are erased on the JVM, we recommend you insert an underscore for any type parameters
* when using this syntax. Both of the following test only that the result is an instance of <code>List[_]</code>, because at
* runtime the type parameter has been erased:
* </p>
*
* <pre class="stHighlight">
* result shouldBe a [List[_]] // recommended
* result shouldBe a [List[Fruit]] // discouraged
* </pre>
*
* <a name="checkingNumbersAgainstARange"></a>
* <h2>Checking numbers against a range</h2>
*
* <p>
* Often you may want to check whether a number is within a
* range. You can do that using the <code>+-</code> operator, like this:
* </p>
*
* <pre class="stHighlight">
* sevenDotOh should equal (6.9 +- 0.2)
* sevenDotOh should === (6.9 +- 0.2)
* sevenDotOh should be (6.9 +- 0.2)
* sevenDotOh shouldEqual 6.9 +- 0.2
* sevenDotOh shouldBe 6.9 +- 0.2
* </pre>
*
* <p>
* Any of these expressions will cause a <code>TestFailedException</code> to be thrown if the floating point
* value, <code>sevenDotOh</code> is outside the range <code>6.7</code> to <code>7.1</code>.
* You can use <code>+-</code> with any type <code>T</code> for which an implicit <code>Numeric[T]</code> exists, such as integral types:
* </p>
*
* <pre class="stHighlight">
* seven should equal (6 +- 2)
* seven should === (6 +- 2)
* seven should be (6 +- 2)
* seven shouldEqual 6 +- 2
* seven shouldBe 6 +- 2
* </pre>
*
* <a name="checkingForEmptiness"></a>
* <h2>Checking for emptiness</h2>
*
* <p>
* You can check whether an object is "empty", like this:
* </p>
*
* <pre class="stHighlight">
* traversable shouldBe empty
* javaMap should not be empty
* </pre>
*
* <p>
* The <code>empty</code> token can be used with any type <code>L</code> for which an implicit <code>Emptiness[L]</code> exists.
* The <code>Emptiness</code> companion object provides implicits for <code>GenTraversable[E]</code>, <code>java.util.Collection[E]</code>,
* <code>java.util.Map[K, V]</code>, <code>String</code>, <code>Array[E]</code>, and <code>Option[E]</code>. In addition, the
* <code>Emptiness</code> companion object provides structural implicits for types that declare an <code>isEmpty</code> method that
* returns a <code>Boolean</code>. Here are some examples:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> List.empty shouldBe empty
*
* scala> None shouldBe empty
*
* scala> Some(1) should not be empty
*
* scala> "" shouldBe empty
*
* scala> new java.util.HashMap[Int, Int] shouldBe empty
*
* scala> new { def isEmpty = true} shouldBe empty
*
* scala> Array(1, 2, 3) should not be empty
* </pre>
*
* <a name="workingWithContainers"></a>
* <h2>Working with "containers"</h2>
*
* <p>
* You can check whether a collection contains a particular element like this:
* </p>
*
* <pre class="stHighlight">
* traversable should contain ("five")
* </pre>
*
* <p>
* The <code>contain</code> syntax shown above can be used with any type <code>C</code> that has a "containing" nature, evidenced by
* an implicit <code>org.scalatest.enablers.Containing[L]</code>, where <code>L</code> is left-hand type on
* which <code>should</code> is invoked. In the <code>Containing</code>
* companion object, implicits are provided for types <code>GenTraversable[E]</code>, <code>java.util.Collection[E]</code>,
* <code>java.util.Map[K, V]</code>, <code>String</code>, <code>Array[E]</code>, and <code>Option[E]</code>.
* Here are some examples:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> List(1, 2, 3) should contain (2)
*
* scala> Map('a' -> 1, 'b' -> 2, 'c' -> 3) should contain ('b' -> 2)
*
* scala> Set(1, 2, 3) should contain (2)
*
* scala> Array(1, 2, 3) should contain (2)
*
* scala> "123" should contain ('2')
*
* scala> Some(2) should contain (2)
* </pre>
*
* <p>
* ScalaTest's implicit methods that provide the <code>Containing[L]</code> type classes require an <code>Equality[E]</code>, where
* <code>E</code> is an element type. For example, to obtain a <code>Containing[Array[Int]]</code> you must supply an <code>Equality[Int]</code>,
* either implicitly or explicitly. The <code>contain</code> syntax uses this <code>Equality[E]</code> to determine containership.
* Thus if you want to change how containership is determined for an element type <code>E</code>, place an implicit <code>Equality[E]</code>
* in scope or use the explicitly DSL. Although the implicit parameter required for the <code>contain</code> syntax is of type <code>Containing[L]</code>,
* implicit conversions are provided in the <code>Containing</code> companion object from <code>Equality[E]</code> to the various
* types of containers of <code>E</code>. Here's an example:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> List("Hi", "Di", "Ho") should contain ("ho")
* org.scalatest.exceptions.TestFailedException: List(Hi, Di, Ho) did not contain element "ho"
* at ...
*
* scala> import org.scalactic.Explicitly._
* import org.scalactic.Explicitly._
*
* scala> import org.scalactic.StringNormalizations._
* import org.scalactic.StringNormalizations._
*
* scala> (List("Hi", "Di", "Ho") should contain ("ho")) (after being lowerCased)
* </pre>
*
* <p>
* Note that when you use the explicitly DSL with <code>contain</code> you need to wrap the entire
* <code>contain</code> expression in parentheses, as shown here.
* </p>
*
* <pre>
* (List("Hi", "Di", "Ho") should contain ("ho")) (after being lowerCased)
* ^ ^
* </pre>
*
* <p>
* In addition to determining whether an object contains another object, you can use <code>contain</code> to
* make other determinations.
* For example, the <code>contain</code> <code>oneOf</code> syntax ensures that one and only one of the specified elements are
* contained in the containing object:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain oneOf (5, 7, 9)
* Some(7) should contain oneOf (5, 7, 9)
* "howdy" should contain oneOf ('a', 'b', 'c', 'd')
* </pre>
*
* <p>
* Note that if multiple specified elements appear in the containing object, <code>oneOf</code> will fail:
* </p>
*
* <pre class="stREPL">
* scala> List(1, 2, 3) should contain oneOf (2, 3, 4)
* org.scalatest.exceptions.TestFailedException: List(1, 2, 3) did not contain one (and only one) of (2, 3, 4)
* at ...
* </pre>
*
* <p>
* If you really want to ensure one or more of the specified elements are contained in the containing object,
* use <code>atLeastOneOf</code>, described below, instead of <code>oneOf</code>. Keep in mind, <code>oneOf</code>
* means "<em>exactly</em> one of."
* </p>
*
* <p>
* Note also that with any <code>contain</code> syntax, you can place custom implicit <code>Equality[E]</code> instances in scope
* to customize how containership is determined, or use the explicitly DSL. Here's an example:
* </p>
*
* <pre class="stHighlight">
* (Array("Doe", "Ray", "Me") should contain oneOf ("X", "RAY", "BEAM")) (after being lowerCased)
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in a "one of" comparison, you can use "oneElementOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain oneElementOf List(5, 7, 9)
* Some(7) should contain oneElementOf Vector(5, 7, 9)
* "howdy" should contain oneElementOf Set('a', 'b', 'c', 'd')
* (Array("Doe", "Ray", "Me") should contain oneElementOf List("X", "RAY", "BEAM")) (after being lowerCased)
* </pre>
*
* <p>
* The <code>contain</code> <code>noneOf</code> syntax does the opposite of <code>oneOf</code>: it ensures none of the specified elements
* are contained in the containing object:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain noneOf (7, 8, 9)
* Some(0) should contain noneOf (7, 8, 9)
* "12345" should contain noneOf ('7', '8', '9')
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in a "none of" comparison, you can use "noElementsOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain noElementsOf List(7, 8, 9)
* Some(0) should contain noElementsOf Vector(7, 8, 9)
* "12345" should contain noElementsOf Set('7', '8', '9')
* </pre>
*
* <a name="workingWithAggregations"></a>
* <h2>Working with "aggregations"</h2>
*
* <p>
* As mentioned, the "<code>contain</code>," "<code>contain</code> <code>oneOf</code>," and "<code>contain</code> <code>noneOf</code>" syntax requires a
* <code>Containing[L]</code> be provided, where <code>L</code> is the left-hand type. Other <code>contain</code> syntax, which
* will be described in this section, requires an <code>Aggregating[L]</code> be provided, where again <code>L</code> is the left-hand type.
* (An <code>Aggregating[L]</code> instance defines the "aggregating nature" of a type <code>L</code>.)
* The reason, essentially, is that <code>contain</code> syntax that makes sense for <code>Option</code> is enabled by
* <code>Containing[L]</code>, whereas syntax that does <em>not</em> make sense for <code>Option</code> is enabled
* by <code>Aggregating[L]</code>. For example, it doesn't make sense to assert that an <code>Option[Int]</code> contains all of a set of integers, as it
* could only ever contain one of them. But this does make sense for a type such as <code>List[Int]</code> that can aggregate zero to many integers.
* </p>
*
* <p>
* The <code>Aggregating</code> companion object provides implicit instances of <code>Aggregating[L]</code>
* for types <code>GenTraversable[E]</code>, <code>java.util.Collection[E]</code>,
* <code>java.util.Map[K, V]</code>, <code>String</code>, <code>Array[E]</code>. Note that these are the same types as are supported with
* <code>Containing</code>, but with <code>Option[E]</code> missing.
* Here are some examples:
* </p>
*
* <p>
* The <code>contain</code> <code>atLeastOneOf</code> syntax, for example, works for any type <code>L</code> for which an <code>Aggregating[L]</code> exists. It ensures
* that at least one of (<em>i.e.</em>, one or more of) the specified objects are contained in the containing object:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain atLeastOneOf (2, 3, 4)
* Array(1, 2, 3) should contain atLeastOneOf (3, 4, 5)
* "abc" should contain atLeastOneOf ('c', 'a', 't')
* </pre>
*
* <p>
* Similar to <code>Containing[L]</code>, the implicit methods that provide the <code>Aggregating[L]</code> instances require an <code>Equality[E]</code>, where
* <code>E</code> is an element type. For example, to obtain a <code>Aggregating[Vector[String]]</code> you must supply an <code>Equality[String]</code>,
* either implicitly or explicitly. The <code>contain</code> syntax uses this <code>Equality[E]</code> to determine containership.
* Thus if you want to change how containership is determined for an element type <code>E</code>, place an implicit <code>Equality[E]</code>
* in scope or use the explicitly DSL. Although the implicit parameter required for the <code>contain</code> syntax is of type <code>Aggregating[L]</code>,
* implicit conversions are provided in the <code>Aggregating</code> companion object from <code>Equality[E]</code> to the various
* types of aggregations of <code>E</code>. Here's an example:
* </p>
*
* <pre class="stHighlight">
* (Vector(" A", "B ") should contain atLeastOneOf ("a ", "b", "c")) (after being lowerCased and trimmed)
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in an "at least one of" comparison, you can use "atLeastOneElementOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain atLeastOneElementOf List(2, 3, 4)
* Array(1, 2, 3) should contain atLeastOneElementOf Vector(3, 4, 5)
* "abc" should contain atLeastOneElementOf Set('c', 'a', 't')
* (Vector(" A", "B ") should contain atLeastOneElementOf List("a ", "b", "c")) (after being lowerCased and trimmed)
* </pre>
*
* <p>
* The "<code>contain</code> <code>atMostOneOf</code>" syntax lets you specify a set of objects at most one of which should be contained in the containing object:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain atMostOneOf (5, 6, 7)
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in a "at most one of" comparison, you can use "atMostOneElementOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain atMostOneElementOf Vector(5, 6, 7)
* </pre>
*
* <p>
* The "<code>contain</code> <code>allOf</code>" syntax lets you specify a set of objects that should all be contained in the containing object:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain allOf (2, 3, 5)
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in a "all of" comparison, you can use "allElementsOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 4, 5) should contain allElementsOf Array(2, 3, 5)
* </pre>
*
* <p>
* The "<code>contain</code> <code>only</code>" syntax lets you assert that the containing object contains <em>only</em> the specified objects, though it may
* contain more than one of each:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3, 2, 1) should contain only (1, 2, 3)
* </pre>
*
* <p>
* The "<code>contain</code> <code>theSameElementsAs</code>" and "<code>contain</code> <code>theSameElementsInOrderAs</code> syntax differ from the others
* in that the right hand side is a <code>GenTraversable[_]</code> rather than a varargs of <code>Any</code>. (Note: in a future 2.0 milestone release, possibly
* 2.0.M6, these will likely be widened to accept any type <code>R</code> for which an <code>Aggregating[R]</code> exists.)
* </p>
*
* <p>
* The "<code>contain</code> <code>theSameElementsAs</code>" syntax lets you assert that two aggregations contain the same objects:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 2, 3, 3, 3) should contain theSameElementsAs Vector(3, 2, 3, 1, 2, 3)
* </pre>
*
* <p>
* The number of times any family of equal objects appears must also be the same in both the left and right aggregations.
* The specified objects may appear multiple times, but must appear in the order they appear in the right-hand list. For example, if
* the last 3 element is left out of the right-hand list in the previous example, the expression would fail because the left side
* has three 3's and the right hand side has only two:
* </p>
*
* <pre class="stREPL">
* List(1, 2, 2, 3, 3, 3) should contain theSameElementsAs Vector(3, 2, 3, 1, 2)
* org.scalatest.exceptions.TestFailedException: List(1, 2, 2, 3, 3, 3) did not contain the same elements as Vector(3, 2, 3, 1, 2)
* at ...
* </pre>
*
* <p>
* Note that no <code>onlyElementsOf</code> matcher is provided, because it would have the same
* behavior as <code>theSameElementsAs</code>. (<em>I.e.</em>, if you were looking for <code>onlyElementsOf</code>, please use <code>theSameElementsAs</code>
* instead.)
* </p>
*
* </p>
* <a name="workingWithSequences"></a>
* <h2>Working with "sequences"</h2>
*
* <p>
* The rest of the <code>contain</code> syntax, which
* will be described in this section, requires a <code>Sequencing[L]</code> be provided, where again <code>L</code> is the left-hand type.
* (A <code>Sequencing[L]</code> instance defines the "sequencing nature" of a type <code>L</code>.)
* The reason, essentially, is that <code>contain</code> syntax that implies an "order" of elements makes sense only for types that place elements in a sequence.
* For example, it doesn't make sense to assert that a <code>Map[String, Int]</code> or <code>Set[Int]</code> contains all of a set of integers in a particular
* order, as these types don't necessarily define an order for their elements. But this does make sense for a type such as <code>Seq[Int]</code> that does define
* an order for its elements.
* </p>
*
* <p>
* The <code>Sequencing</code> companion object provides implicit instances of <code>Sequencing[L]</code>
* for types <code>GenSeq[E]</code>, <code>java.util.List[E]</code>,
* <code>String</code>, and <code>Array[E]</code>.
* Here are some examples:
* </p>
*
* <p>
* Similar to <code>Containing[L]</code>, the implicit methods that provide the <code>Aggregating[L]</code> instances require an <code>Equality[E]</code>, where
* <code>E</code> is an element type. For example, to obtain a <code>Aggregating[Vector[String]]</code> you must supply an <code>Equality[String]</code>,
* either implicitly or explicitly. The <code>contain</code> syntax uses this <code>Equality[E]</code> to determine containership.
* Thus if you want to change how containership is determined for an element type <code>E</code>, place an implicit <code>Equality[E]</code>
* in scope or use the explicitly DSL. Although the implicit parameter required for the <code>contain</code> syntax is of type <code>Aggregating[L]</code>,
* implicit conversions are provided in the <code>Aggregating</code> companion object from <code>Equality[E]</code> to the various
* types of aggregations of <code>E</code>. Here's an example:
* </p>
*
* <p>
* The "<code>contain</code> <code>inOrderOnly</code>" syntax lets you assert that the containing object contains <em>only</em> the specified objects, in order.
* The specified objects may appear multiple times, but must appear in the order they appear in the right-hand list. Here's an example:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 2, 3, 3, 3) should contain inOrderOnly (1, 2, 3)
* </pre>
*
* <p>
* The "<code>contain</code> <code>inOrder</code>" syntax lets you assert that the containing object contains <em>only</em> the specified objects in order, like
* <code>inOrderOnly</code>, but allows other objects to appear in the left-hand aggregation as well:
* contain more than one of each:
* </p>
*
* <pre class="stHighlight">
* List(0, 1, 2, 2, 99, 3, 3, 3, 5) should contain inOrder (1, 2, 3)
* </pre>
*
* <p>
* If you have a collection of elements that you'd like to use in a "in order" comparison, you can use "inOrderElementsOf," like this:
* </p>
*
* <pre class="stHighlight">
* List(0, 1, 2, 2, 99, 3, 3, 3, 5) should contain inOrderElementsOf Array(1, 2, 3)
* </pre>
*
* <p>
* Note that "order" in <code>inOrder</code>, <code>inOrderOnly</code>, and <code>theSameElementsInOrderAs</code> (described below)
* in the <code>Aggregation[L]</code> instances built-in to ScalaTest is defined as "iteration order".
* </p>
*
* <p>
* Lastly, the "<code>contain</code> <code>theSameElementsInOrderAs</code>" syntax lets you assert that two aggregations contain
* the same exact elements in the same (iteration) order:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain theSameElementsInOrderAs collection.mutable.TreeSet(3, 2, 1)
* </pre>
*
* <p>
* The previous assertion succeeds because the iteration order of a<code>TreeSet</code> is the natural
* ordering of its elements, which in this case is 1, 2, 3. An iterator obtained from the left-hand <code>List</code> will produce the same elements
* in the same order.
* </p>
*
* <p>
* Note that no <code>inOrderOnlyElementsOf</code> matcher is provided, because it would have the same
* behavior as <code>theSameElementsInOrderAs</code>. (<em>I.e.</em>, if you were looking for <code>inOrderOnlyElementsOf</code>, please use <code>theSameElementsInOrderAs</code>
* instead.)
* </p>
*
* <a name="workingWithSortables"></a>
* <h2>Working with "sortables"</h2>
*
* <p>
* You can also ask whether the elements of "sortable" objects (such as <code>Array</code>s, Java <code>List</code>s, and <code>GenSeq</code>s)
* are in sorted order, like this:
* </p>
*
* <pre class="stHighlight">
* List(1, 2, 3) shouldBe sorted
* </pre>
*
* <a name="workingWithIterators"></a>
* <h2>Working with iterators</h2>
*
* <p>
* Although it seems desireable to provide similar matcher syntax for Scala and Java iterators to that provided for sequences like
* <code>Seq</code>s, <code>Array</code>, and <code>java.util.List</code>, the
* ephemeral nature of iterators makes this problematic. Some syntax (such as <code>should</code> <code>contain</code>) is relatively straightforward to
* support on iterators, but other syntax (such
* as, for example, <code>Inspector</code> expressions on nested iterators) is not. Rather
* than allowing inconsistencies between sequences and iterators in the API, we chose to not support any such syntax directly on iterators:
*
* <pre class="stHighlight">
* scala> val it = List(1, 2, 3).iterator
* it: Iterator[Int] = non-empty iterator
*
* scala> it should contain (2)
* <console>:15: error: could not find implicit value for parameter typeClass1: org.scalatest.enablers.Containing[Iterator[Int]]
* it should contain (2)
* ^
* </pre>
*
* <p>
* Instead, you will need to convert your iterators to a sequence explicitly before using them in matcher expressions:
* </p>
*
* <pre class="stHighlight">
* scala> it.toStream should contain (2)
* </pre>
*
* <p>
* We recommend you convert (Scala or Java) iterators to <code>Stream</code>s, as shown in the previous example, so that you can
* continue to reap any potential benefits provided by the laziness of the underlying iterator.
* </p>
*
* <a name="inspectorShorthands"></a>
* <h2>Inspector shorthands</h2>
*
* <p>
* You can use the <a href="../../Inspectors.html"><code>Inspectors</code></a> syntax with matchers as well as assertions. If you have a multi-dimensional collection, such as a
* list of lists, using <code>Inspectors</code> is your best option:
* </p>
*
* <pre class="stHighlight">
* val yss =
* List(
* List(1, 2, 3),
* List(1, 2, 3),
* List(1, 2, 3)
* )
*
* forAll (yss) { ys =>
* forAll (ys) { y => y should be > 0 }
* }
* </pre>
*
* <p>
* For assertions on one-dimensional collections, however, matchers provides "inspector shorthands." Instead of writing:
* </p>
*
* <pre class="stHighlight">
* val xs = List(1, 2, 3)
* forAll (xs) { x => x should be < 10 }
* </pre>
*
* <p>
* You can write:
* </p>
*
* <pre class="stHighlight">
* all (xs) should be < 10
* </pre>
*
* <p>
* The previous statement asserts that all elements of the <code>xs</code> list should be less than 10.
* All of the inspectors have shorthands in matchers. Here is the full list:
* </p>
*
* <ul>
* <li><code>all</code> - succeeds if the assertion holds true for every element</li>
* <li><code>atLeast</code> - succeeds if the assertion holds true for at least the specified number of elements</li>
* <li><code>atMost</code> - succeeds if the assertion holds true for at most the specified number of elements</li>
* <li><code>between</code> - succeeds if the assertion holds true for between the specified minimum and maximum number of elements, inclusive</li>
* <li><code>every</code> - same as <code>all</code>, but lists all failing elements if it fails (whereas <code>all</code> just reports the first failing element)</li>
* <li><code>exactly</code> - succeeds if the assertion holds true for exactly the specified number of elements</li>
* </ul>
*
* <p>
* Here are some examples:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest.matchers.should.Matchers._
* import org.scalatest.matchers.should.Matchers._
*
* scala> val xs = List(1, 2, 3, 4, 5)
* xs: List[Int] = List(1, 2, 3, 4, 5)
*
* scala> all (xs) should be > 0
*
* scala> atMost (2, xs) should be >= 4
*
* scala> atLeast (3, xs) should be < 5
*
* scala> between (2, 3, xs) should (be > 1 and be < 5)
*
* scala> exactly (2, xs) should be <= 2
*
* scala> every (xs) should be < 10
*
* scala> // And one that fails...
*
* scala> exactly (2, xs) shouldEqual 2
* org.scalatest.exceptions.TestFailedException: 'exactly(2)' inspection failed, because only 1 element
* satisfied the assertion block at index 1:
* at index 0, 1 did not equal 2,
* at index 2, 3 did not equal 2,
* at index 3, 4 did not equal 2,
* at index 4, 5 did not equal 2
* in List(1, 2, 3, 4, 5)
* at ...
* </pre>
*
* <p>
* Like <a href=""><code>Inspectors</code></a>, objects used with inspector shorthands can be any type <code>T</code> for which a <code>Collecting[T, E]</code>
* is availabe, which by default includes <code>GenTraversable</code>,
* Java <code>Collection</code>, Java <code>Map</code>, <code>Array</code>s, and <code>String</code>s.
* Here are some examples:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest._
* import org.scalatest._
*
* scala> import matchers.should.Matchers._
* import matchers.should.Matchers._
*
* scala> all (Array(1, 2, 3)) should be < 5
*
* scala> import collection.JavaConverters._
* import collection.JavaConverters._
*
* scala> val js = List(1, 2, 3).asJava
* js: java.util.List[Int] = [1, 2, 3]
*
* scala> all (js) should be < 5
*
* scala> val jmap = Map("a" -> 1, "b" -> 2).asJava
* jmap: java.util.Map[String,Int] = {a=1, b=2}
*
* scala> atLeast(1, jmap) shouldBe Entry("b", 2)
*
* scala> atLeast(2, "hello, world!") shouldBe 'o'
* </pre>
*
* <a name="singleElementCollections"></a>
* <h2>Single-element collections</h2>
*
* <p>
* To assert both that a collection contains just one "lone" element as well as something else about that element, you can use
* the <code>loneElement</code> syntax provided by trait <a href="../../LoneElement.html"><code>LoneElement</code></a>. For example, if a
* <code>Set[Int]</code> should contain just one element, an <code>Int</code>
* less than or equal to 10, you could write:
* </p>
*
* <pre class="stHighlight">
* import LoneElement._
* set.loneElement should be <= 10
* </pre>
*
* <p>
* You can invoke <code>loneElement</code> on any type <code>T</code> for which an implicit <a href="../../enablers/Collecting.html"><code>Collecting[E, T]</code></a>
* is available, where <code>E</code> is the element type returned by the <code>loneElement</code> invocation. By default, you can use <code>loneElement</code>
* on <code>GenTraversable</code>, Java <code>Collection</code>, Java <code>Map</code>, <code>Array</code>, and <code>String</code>.
* </p>
*
* <a name="javaCollectionsAndMaps"></a>
* <h2>Java collections and maps</h2>
*
* <p>
* You can use similar syntax on Java collections (<code>java.util.Collection</code>) and maps (<code>java.util.Map</code>).
* For example, you can check whether a Java <code>Collection</code> or <code>Map</code> is <code>empty</code>,
* like this:
* </p>
*
* <pre class="stHighlight">
* javaCollection should be ('empty)
* javaMap should be ('empty)
* </pre>
*
* <p>
* Even though Java's <code>List</code> type doesn't actually have a <code>length</code> or <code>getLength</code> method,
* you can nevertheless check the length of a Java <code>List</code> (<code>java.util.List</code>) like this:
* </p>
*
* <pre class="stHighlight">
* javaList should have length 9
* </pre>
*
* <p>
* You can check the size of any Java <code>Collection</code> or <code>Map</code>, like this:
* </p>
*
* <pre class="stHighlight">
* javaMap should have size 20
* javaSet should have size 90
* </pre>
*
* <p>
* In addition, you can check whether a Java <code>Collection</code> contains a particular
* element, like this:
* </p>
*
* <pre class="stHighlight">
* javaCollection should contain ("five")
* </pre>
*
* <p>
* One difference to note between the syntax supported on Java and Scala collections is that
* in Java, <code>Map</code> is not a subtype of <code>Collection</code>, and does not
* actually define an element type. You can ask a Java <code>Map</code> for an "entry set"
* via the <code>entrySet</code> method, which will return the <code>Map</code>'s key/value pairs
* wrapped in a set of <code>java.util.Map.Entry</code>, but a <code>Map</code> is not actually
* a collection of <code>Entry</code>. To make Java <code>Map</code>s easier to work with, however,
* ScalaTest matchers allows you to treat a Java <code>Map</code> as a collection of <code>Entry</code>,
* and defines a convenience implementation of <code>java.util.Map.Entry</code> in
* <a href="../../Entry.html"><code>org.scalatest.Entry</code></a>. Here's how you use it:
* </p>
*
* <pre class="stHighlight">
* javaMap should contain (Entry(2, 3))
* javaMap should contain oneOf (Entry(2, 3), Entry(3, 4))
* </pre>
*
* You can you alse just check whether a Java <code>Map</code> contains a particular key, or value, like this:
*
* <pre class="stHighlight">
* javaMap should contain key 1
* javaMap should contain value "Howdy"
* </pre>
*
* <a name="stringsAndArraysAsCollections"></a>
* <h2><code>String</code>s and <code>Array</code>s as collections</h2>
*
* <p>
* You can also use all the syntax described above for Scala and Java collections on <code>Array</code>s and
* <code>String</code>s. Here are some examples:
* </p>
*
* <pre class="stREPL">
* scala> import org.scalatest._
* import org.scalatest._
*
* scala> import matchers.should.Matchers._
* import matchers.should.Matchers._
*
* scala> atLeast (2, Array(1, 2, 3)) should be > 1
*
* scala> atMost (2, "halloo") shouldBe 'o'
*
* scala> Array(1, 2, 3) shouldBe sorted
*
* scala> "abcdefg" shouldBe sorted
*
* scala> Array(1, 2, 3) should contain atMostOneOf (3, 4, 5)
*
* scala> "abc" should contain atMostOneOf ('c', 'd', 'e')
* </pre>
*
* <a name="beAsAnEqualityComparison"></a>
* <h2><code>be</code> as an equality comparison</h2>
*
* <p>
* All uses of <code>be</code> other than those shown previously perform an equality comparison. They work
* the same as <code>equal</code> when it is used with default equality. This redundancy between <code>be</code> and <code>equals</code> exists in part
* because it enables syntax that sometimes sounds more natural. For example, instead of writing:
* </p>
*
* <pre class="stHighlight">
* result should equal (null)
* </pre>
*
* <p>
* You can write:
* </p>
*
* <pre class="stHighlight">
* result should be (null)
* </pre>
*
* <p>
* (Hopefully you won't write that too much given <code>null</code> is error prone, and <code>Option</code>
* is usually a better, well, option.)
* As mentioned <a href="#checkingEqualityWithMatchers">previously</a>, the other difference between <code>equal</code>
* and <code>be</code> is that <code>equal</code> delegates the equality check to an <code>Equality</code> typeclass, whereas
* <code>be</code> always uses default equality.
* Here are some other examples of <code>be</code> used for equality comparison:
* </p>
*
* <pre class="stHighlight">
* sum should be (7.0)
* boring should be (false)
* fun should be (true)
* list should be (Nil)
* option should be (None)
* option should be (Some(1))
* </pre>
*
* <p>
* As with <code>equal</code> used with default equality, using <code>be</code> on arrays results in <code>deep</code> being called on both arrays prior to
* calling <code>equal</code>. As a result,
* the following expression would <em>not</em> throw a <a href="../../exceptions/TestFailedException.html"><code>TestFailedException</code></a>:
* </p>
*
* <pre class="stHighlight">
* Array(1, 2) should be (Array(1, 2)) // succeeds (i.e., does not throw TestFailedException)
* </pre>
*
* <p>
* Because <code>be</code> is used in several ways in ScalaTest matcher syntax, just as it is used in many ways in English, one
* potential point of confusion in the event of a failure is determining whether <code>be</code> was being used as an equality comparison or
* in some other way, such as a property assertion. To make it more obvious when <code>be</code> is being used for equality, the failure
* messages generated for those equality checks will include the word <code>equal</code> in them. For example, if this expression fails with a
* <code>TestFailedException</code>:
* </p>
*
* <pre class="stHighlight">
* option should be (Some(1))
* </pre>
*
* <p>
* The detail message in that <code>TestFailedException</code> will include the words <code>"equal to"</code> to signify <code>be</code>
* was in this case being used for equality comparison:
* </p>
*
* <pre class="stHighlight">
* Some(2) was not equal to Some(1)
* </pre>
*
* <a name="beingNegative"></a>
* <h2>Being negative</h2>
*
* <p>
* If you wish to check the opposite of some condition, you can simply insert <code>not</code> in the expression.
* Here are a few examples:
* </p>
*
* <pre class="stHighlight">
* result should not be (null)
* sum should not be <= (10)
* mylist should not equal (yourList)
* string should not startWith ("Hello")
* </pre>
*
* <a name="checkingThatCodeDoesNotCompile"></a>
* <h2>Checking that a snippet of code does not compile</h2>
*
* <p>
* Often when creating libraries you may wish to ensure that certain arrangements of code that
* represent potential “user errors” do not compile, so that your library is more error resistant.
* ScalaTest <code>Matchers</code> trait includes the following syntax for that purpose:
* </p>
*
* <pre class="stHighlight">
* "val a: String = 1" shouldNot compile
* </pre>
*
* <p>
* If you want to ensure that a snippet of code does not compile because of a type error (as opposed
* to a syntax error), use:
* </p>
*
* <pre class="stHighlight">
* "val a: String = 1" shouldNot typeCheck
* </pre>
*
* <p>
* Note that the <code>shouldNot</code> <code>typeCheck</code> syntax will only succeed if the given snippet of code does not
* compile because of a type error. A syntax error will still result on a thrown <code>TestFailedException</code>.
* </p>
*
* <p>
* If you want to state that a snippet of code <em>does</em> compile, you can make that
* more obvious with:
* </p>
*
* <pre class="stHighlight">
* "val a: Int = 1" should compile
* </pre>
*
* <p>
* Although the previous three constructs are implemented with macros that determine at compile time whether
* the snippet of code represented by the string does or does not compile, errors
* are reported as test failures at runtime.
* </p>
*
* <a name="logicalExpressions"></a>
* <h2>Logical expressions with <code>and</code> and <code>or</code></h2>
*
* <p>
* You can also combine matcher expressions with <code>and</code> and/or <code>or</code>, however,
* you must place parentheses or curly braces around the <code>and</code> or <code>or</code> expression. For example,
* this <code>and</code>-expression would not compile, because the parentheses are missing:
* </p>
*
* <pre class="stHighlight">
* map should contain key ("two") and not contain value (7) // ERROR, parentheses missing!
* </pre>
*
* <p>
* Instead, you need to write:
* </p>
*
* <pre class="stHighlight">
* map should (contain key ("two") and not contain value (7))
* </pre>
*
* <p>
* Here are some more examples:
* </p>
*
* <pre class="stHighlight">
* number should (be > (0) and be <= (10))
* option should (equal (Some(List(1, 2, 3))) or be (None))
* string should (
* equal ("fee") or
* equal ("fie") or
* equal ("foe") or
* equal ("fum")
* )
* </pre>
*
* <p>
* Two differences exist between expressions composed of these <code>and</code> and <code>or</code> operators and the expressions you can write
* on regular <code>Boolean</code>s using its <code>&&</code> and <code>||</code> operators. First, expressions with <code>and</code>
* and <code>or</code> do not short-circuit. The following contrived expression, for example, would print <code>"hello, world!"</code>:
* </p>
*
* <pre class="stHighlight">
* "yellow" should (equal ("blue") and equal { println("hello, world!"); "green" })
* </pre>
*
* <p>
* In other words, the entire <code>and</code> or <code>or</code> expression is always evaluated, so you'll see any side effects
* of the right-hand side even if evaluating
* only the left-hand side is enough to determine the ultimate result of the larger expression. Failure messages produced by these
* expressions will "short-circuit," however,
* mentioning only the left-hand side if that's enough to determine the result of the entire expression. This "short-circuiting" behavior
* of failure messages is intended
* to make it easier and quicker for you to ascertain which part of the expression caused the failure. The failure message for the previous
* expression, for example, would be:
* </p>
*
* <pre class="stHighlight">
* "yellow" did not equal "blue"
* </pre>
*
* <p>
* Most likely this lack of short-circuiting would rarely be noticeable, because evaluating the right hand side will usually not
* involve a side effect. One situation where it might show up, however, is if you attempt to <code>and</code> a <code>null</code> check on a variable with an expression
* that uses the variable, like this:
* </p>
*
* <pre class="stHighlight">
* map should (not be (null) and contain key ("ouch"))
* </pre>
*
* <p>
* If <code>map</code> is <code>null</code>, the test will indeed fail, but with a <code>NullArgumentException</code>, not a
* <code>TestFailedException</code>. Here, the <code>NullArgumentException</code> is the visible right-hand side effect. To get a
* <code>TestFailedException</code>, you would need to check each assertion separately:
* </p>
*
* <pre class="stHighlight">
* map should not be (null)
* map should contain key ("ouch")
* </pre>
*
* <p>
* If <code>map</code> is <code>null</code> in this case, the <code>null</code> check in the first expression will fail with
* a <code>TestFailedException</code>, and the second expression will never be executed.
* </p>
*
* <p>
* The other difference with <code>Boolean</code> operators is that although <code>&&</code> has a higher precedence than <code>||</code>,
* <code>and</code> and <code>or</code>
* have the same precedence. Thus although the <code>Boolean</code> expression <code>(a || b && c)</code> will evaluate the <code>&&</code> expression
* before the <code>||</code> expression, like <code>(a || (b && c))</code>, the following expression:
* </p>
*
* <pre class="stHighlight">
* traversable should (contain (7) or contain (8) and have size (9))
* </pre>
*
* <p>
* Will evaluate left to right, as:
* </p>
*
* <pre class="stHighlight">
* traversable should ((contain (7) or contain (8)) and have size (9))
* </pre>
*
* <p>
* If you really want the <code>and</code> part to be evaluated first, you'll need to put in parentheses, like this:
* </p>
*
* <pre class="stHighlight">
* traversable should (contain (7) or (contain (8) and have size (9)))
* </pre>
*
* <a name="workingWithOptions"></a>
* <h2>Working with <code>Option</code>s</h2>
*
* <p>
* You can work with options using ScalaTest's equality, <code>empty</code>,
* <code>defined</code>, and <code>contain</code> syntax.
* For example, if you wish to check whether an option is <code>None</code>, you can write any of:
* </p>
*
* <pre class="stHighlight">
* option shouldEqual None
* option shouldBe None
* option should === (None)
* option shouldBe empty
* </pre>
*
* <p>
* If you wish to check an option is defined, and holds a specific value, you can write any of:
* </p>
*
* <pre class="stHighlight">
* option shouldEqual Some("hi")
* option shouldBe Some("hi")
* option should === (Some("hi"))
* </pre>
*
* <p>
* If you only wish to check that an option is defined, but don't care what it's value is, you can write:
* </p>
*
* <pre class="stHighlight">
* option shouldBe defined
* </pre>
*
* <p>
* If you mix in (or import the members of) <a href="../../OptionValues.html"><code>OptionValues</code></a>,
* you can write one statement that indicates you believe an option should be defined and then say something else about its value. Here's an example:
* </p>
*
* <pre class="stHighlight">
* import org.scalatest.OptionValues._
* option.value should be < 7
* </pre>
*
* <p>
* As mentioned previously, you can use also use ScalaTest's <code>contain</code>, <code>contain oneOf</code>, and
* <code>contain noneOf</code> syntax with options:
* </p>
*
* <pre class="stHighlight">
* Some(2) should contain (2)
* Some(7) should contain oneOf (5, 7, 9)
* Some(0) should contain noneOf (7, 8, 9)
* </pre>
* </p>
*
* <a name="checkingArbitraryProperties"></a>
* <h2>Checking arbitrary properties with <code>have</code></h2>
*
* <p>
* Using <code>have</code>, you can check properties of any type, where a <em>property</em> is an attribute of any
* object that can be retrieved either by a public field, method, or JavaBean-style <code>get</code>
* or <code>is</code> method, like this:
* </p>
*
* <pre class="stHighlight">
* book should have (
* 'title ("Programming in Scala"),
* 'author (List("Odersky", "Spoon", "Venners")),
* 'pubYear (2008)
* )
* </pre>
*
* <p>
* This expression will use reflection to ensure the <code>title</code>, <code>author</code>, and <code>pubYear</code> properties of object <code>book</code>
* are equal to the specified values. For example, it will ensure that <code>book</code> has either a public Java field or method
* named <code>title</code>, or a public method named <code>getTitle</code>, that when invoked (or accessed in the field case) results
* in a the string <code>"Programming in Scala"</code>. If all specified properties exist and have their expected values, respectively,
* execution will continue. If one or more of the properties either does not exist, or exists but results in an unexpected value,
* a <code>TestFailedException</code> will be thrown that explains the problem. (For the details on how a field or method is selected during this
* process, see the documentation for <a href="Matchers$HavePropertyMatcherGenerator.html"><code>HavePropertyMatcherGenerator</code></a>.)
* </p>
*
* <p>
* When you use this syntax, you must place one or more property values in parentheses after <code>have</code>, seperated by commas, where a <em>property
* value</em> is a symbol indicating the name of the property followed by the expected value in parentheses. The only exceptions to this rule is the syntax
* for checking size and length shown previously, which does not require parentheses. If you forget and put parentheses in, however, everything will
* still work as you'd expect. Thus instead of writing:
* </p>
*
* <pre class="stHighlight">
* array should have length (3)
* set should have size (90)
* </pre>
*
* <p>
* You can alternatively, write:
* </p>
*
* <pre class="stHighlight">
* array should have (length (3))
* set should have (size (90))
* </pre>
*
* <p>
* If a property has a value different from the specified expected value, a <code>TestFailedError</code> will be thrown
* with a detailed message that explains the problem. For example, if you assert the following on
* a <code>book</code> whose title is <code>Moby Dick</code>:
* </p>
*
* <pre class="stHighlight">
* book should have ('title ("A Tale of Two Cities"))
* </pre>
*
* <p>
* You'll get a <code>TestFailedException</code> with this detail message:
* </p>
*
* <pre>
* The title property had value "Moby Dick", instead of its expected value "A Tale of Two Cities",
* on object Book("Moby Dick", "Melville", 1851)
* </pre>
*
* <p>
* If you prefer to check properties in a type-safe manner, you can use a <code>HavePropertyMatcher</code>.
* This would allow you to write expressions such as:
* </p>
*
* <pre class="stHighlight">
* book should have (
* title ("Programming in Scala"),
* author (List("Odersky", "Spoon", "Venners")),
* pubYear (2008)
* )
* </pre>
*
* <p>
* These expressions would fail to compile if <code>should</code> is used on an inappropriate type, as determined
* by the type parameter of the <code>HavePropertyMatcher</code> being used. (For example, <code>title</code> in this example
* might be of type <code>HavePropertyMatcher[org.publiclibrary.Book]</code>. If used with an appropriate type, such an expression will compile
* and at run time the property method or field will be accessed directly; <em>i.e.</em>, no reflection will be used.
* See the documentation for <a href="matchers/HavePropertyMatcher.html"><code>HavePropertyMatcher</code></a> for more information.
* </p>
*
* <a name="lengthSizeHavePropertyMatchers"></a>
* <h2>Using <code>length</code> and <code>size</code> with <code>HavePropertyMatcher</code>s</h2>
*
* <p>
* If you want to use <code>length</code> or <code>size</code> syntax with your own custom <code>HavePropertyMatcher</code>s, you
* can do so, but you must write <code>(of [“the type”])</code> afterwords. For example, you could write:
* </p>
*
* <pre class="stHighlight">
* book should have (
* title ("A Tale of Two Cities"),
* length (220) (of [Book]),
* author ("Dickens")
* )
* </pre>
*
* <p>
* Prior to ScalaTest 2.0, “<code>length</code> <code>(22)</code>” yielded a <code>HavePropertyMatcher[Any, Int]</code> that used reflection to dynamically look
* for a <code>length</code> field or <code>getLength</code> method. In ScalaTest 2.0, “<code>length</code> <code>(22)</code>” yields a
* <code>MatcherFactory1[Any, Length]</code>, so it is no longer a <code>HavePropertyMatcher</code>. The <code>(of [<type>])</code> syntax converts the
* the <code>MatcherFactory1[Any, Length]</code> to a <code>HavePropertyMatcher[<type>, Int]</code>.
* </p>
*
* <a name="matchingAPattern"></a>
* <h2>Checking that an expression matches a pattern</h2>
*
* <p>
* ScalaTest's <a href="../../Inside.html"><code>Inside</code></a> trait allows you to make assertions after a pattern match.
* Here's an example:
* </p>
*
* <pre class="stHighlight">
* case class Name(first: String, middle: String, last: String)
*
* val name = Name("Jane", "Q", "Programmer")
*
* inside(name) { case Name(first, _, _) =>
* first should startWith ("S")
* }
* </pre>
*
* <p>
* You can use <code>inside</code> to just ensure a pattern is matched, without making any further assertions, but a better
* alternative for that kind of assertion is <code>matchPattern</code>. The <code>matchPattern</code> syntax allows you
* to express that you expect a value to match a particular pattern, no more and no less:
* </p>
*
* <pre class="stHighlight">
* name should matchPattern { case Name("Sarah", _, _) => }
* </pre>
*
* <a name="usingCustomMatchers"></a>
* <h2>Using custom matchers</h2>
*
* <p>
* If none of the built-in matcher syntax (or options shown so far for extending the syntax) satisfy a particular need you have, you can create
* custom <code>Matcher</code>s that allow
* you to place your own syntax directly after <code>should</code>. For example, class <code>java.io.File</code> has a method <code>isHidden</code>, which
* indicates whether a file of a certain path and name is hidden. Because the <code>isHidden</code> method takes no parameters and returns <code>Boolean</code>,
* you can call it using <code>be</code> with a symbol or <code>BePropertyMatcher</code>, yielding assertions like:
* </p>
*
* <pre class="stHighlight">
* file should be ('hidden) // using a symbol
* file should be (hidden) // using a BePropertyMatcher
* </pre>
*
* <p>
* If it doesn't make sense to have your custom syntax follow <code>be</code>, you might want to create a custom <code>Matcher</code>
* instead, so your syntax can follow <code>should</code> directly. For example, you might want to be able to check whether
* a <code>java.io.File</code>'s name ends with a particular extension, like this:
* </p>
*
* <pre class="stHighlight">
* // using a plain-old Matcher
* file should endWithExtension ("txt")
* </pre>
*
* <p>
* ScalaTest provides several mechanism to make it easy to create custom matchers, including ways to compose new matchers
* out of existing ones complete with new error messages. For more information about how to create custom
* <code>Matcher</code>s, please see the documentation for the <a href="matchers/Matcher.html"><code>Matcher</code></a> trait.
* </p>
*
* <a name="checkingForExpectedExceptions"></a>
* <h2>Checking for expected exceptions</h2>
*
* <p>
* Sometimes you need to test whether a method throws an expected exception under certain circumstances, such
* as when invalid arguments are passed to the method. With <code>Matchers</code> mixed in, you can
* check for an expected exception like this:
* </p>
*
* <pre class="stHighlight">
* an [IndexOutOfBoundsException] should be thrownBy s.charAt(-1)
* </pre>
*
* <p>
* If <code>charAt</code> throws an instance of <code>StringIndexOutOfBoundsException</code>,
* this expression will result in that exception. But if <code>charAt</code> completes normally, or throws a different
* exception, this expression will complete abruptly with a <code>TestFailedException</code>.
*
* <p>
* If you need to further isnpect an expected exception, you can capture it using this syntax:
* </p>
*
* <pre class="stHighlight">
* val thrown = the [IndexOutOfBoundsException] thrownBy s.charAt(-1)
* </pre>
*
* <p>
* This expression returns the caught exception so that you can inspect it further if you wish, for
* example, to ensure that data contained inside the exception has the expected values. Here's an
* example:
* </p>
*
* <pre class="stHighlight">
* thrown.getMessage should equal ("String index out of range: -1")
* </pre>
*
* <p>
* If you prefer you can also capture and inspect an expected exception in one statement, like this:
* </p>
*
* <pre class="stHighlight">
* the [ArithmeticException] thrownBy 1 / 0 should have message "/ by zero"
* the [IndexOutOfBoundsException] thrownBy {
* s.charAt(-1)
* } should have message "String index out of range: -1"
* </pre>
*
* <p>
* You can also state that no exception should be thrown by some code, like this:
* </p>
*
* <pre class="stHighlight">
* noException should be thrownBy 0 / 1
* </pre>
*
* <a name="thosePeskyParens"></a>
* <h2>Those pesky parens</h2>
*
* <p>
* Perhaps the most tricky part of writing assertions using ScalaTest matchers is remembering
* when you need or don't need parentheses, but bearing in mind a few simple rules <!-- PRESERVE -->should help.
* It is also reassuring to know that if you ever leave off a set of parentheses when they are
* required, your code will not compile. Thus the compiler will help you remember when you need the parens.
* That said, the rules are:
* </p>
*
* <p>
* 1. Although you don't always need them, you may choose to always put parentheses
* around right-hand values, such as the <code>7</code> in <code>num should equal (7)</code>:
* </p>
*
* <pre>
* result should equal <span class="stRed">(</span>4<span class="stRed">)</span>
* array should have length <span class="stRed">(</span>3<span class="stRed">)</span>
* book should have (
* 'title <span class="stRed">(</span>"Programming in Scala"<span class="stRed">)</span>,
* 'author <span class="stRed">(</span>List("Odersky", "Spoon", "Venners")<span class="stRed">)</span>,
* 'pubYear <span class="stRed">(</span>2008<span class="stRed">)</span>
* )
* option should be <span class="stRed">(</span>'defined<span class="stRed">)</span>
* catMap should (contain key <span class="stRed">(</span>9<span class="stRed">)</span> and contain value <span class="stRed">(</span>"lives"<span class="stRed">)</span>)</span>
* keyEvent should be an <span class="stRed">(</span>'actionKey<span class="stRed">)</span>
* javaSet should have size <span class="stRed">(</span>90<span class="stRed">)</span>
* </pre>
*
* <p>
* 2. Except for <code>length</code>, <code>size</code> and <code>message</code>, you must always put parentheses around
* the list of one or more property values following a <code>have</code>:
* </p>
*
* <pre>
* file should (exist and have <span class="stRed">(</span>'name ("temp.txt")<span class="stRed">)</span>)
* book should have <span class="stRed">(</span>
* title ("Programming in Scala"),
* author (List("Odersky", "Spoon", "Venners")),
* pubYear (2008)
* <span class="stRed">)</span>
* javaList should have length (9) // parens optional for length and size
* </pre>
*
* <p>
* 3. You must always put parentheses around <code>and</code> and <code>or</code> expressions, as in:
* </p>
*
* <pre>
* catMap should <span class="stRed">(</span>contain key (9) and contain value ("lives")<span class="stRed">)</span>
* number should <span class="stRed">(</span>equal (2) or equal (4) or equal (8)<span class="stRed">)</span>
* </pre>
*
* <p>
* 4. Although you don't always need them, you may choose to always put parentheses
* around custom <code>Matcher</code>s when they appear directly after <code>not</code>:
* </p>
*
* <pre>
* file should exist
* file should not <span class="stRed">(</span>exist<span class="stRed">)</span>
* file should (exist and have ('name ("temp.txt")))
* file should (not <span class="stRed">(</span>exist<span class="stRed">)</span> and have ('name ("temp.txt"))
* file should (have ('name ("temp.txt") or exist)
* file should (have ('name ("temp.txt") or not <span class="stRed">(</span>exist<span class="stRed">)</span>)
* </pre>
*
* <p>
* That's it. With a bit of practice it <!-- PRESERVE -->should become natural to you, and the compiler will always be there to tell you if you
* forget a set of needed parentheses.
* </p>
*
* <p>
* <em>Note: ScalaTest's matchers are in part inspired by the matchers of <a href="http://rspec.info" target="_blank">RSpec</a>,
* <a href="https://github.com/hamcrest/JavaHamcrest" target="_blank">Hamcrest</a>, and
* <a href="http://etorreborre.github.io/specs2/" target="_blank">specs2</a>, and its “<code>shouldNot compile</code>” syntax
* by the <code>illTyped</code> macro of <a href="https://github.com/milessabin/shapeless" target="_blank">shapeless</a>.</em>
* </p>
*
* @author Bill Venners
* @author Chua Chee Seng
*/
trait Matchers extends Assertions with Tolerance with ShouldVerb with MatcherWords with Explicitly { matchers =>
import scala.language.implicitConversions
// SKIP-SCALATESTJS,NATIVE-START
// This guy is generally done through an implicit conversion from a symbol. It takes that symbol, and
// then represents an object with an apply method. So it gives an apply method to symbols.
// book should have ('author ("Gibson"))
// ^ // Basically this 'author symbol gets converted into this class, and its apply method takes "Gibson"
// TODO, put the documentation of the details of the algo for selecting a method or field to use here.
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* <p>
* This class is used as the result of an implicit conversion from class <code>Symbol</code>, to enable symbols to be
* used in <code>have ('author ("Dickens"))</code> syntax. The name of the implicit conversion method is
* <code>convertSymbolToHavePropertyMatcherGenerator</code>.
* </p>
*
* <p>
* Class <code>HavePropertyMatcherGenerator</code>'s primary constructor takes a <code>Symbol</code>. The
* <code>apply</code> method uses reflection to find and access a property that has the name specified by the
* <code>Symbol</code> passed to the constructor, so it can determine if the property has the expected value
* passed to <code>apply</code>.
* If the symbol passed is <code>'title</code>, for example, the <code>apply</code> method
* will use reflection to look for a public Java field named
* "title", a public method named "title", or a public method named "getTitle".
* If a method, it must take no parameters. If multiple candidates are found,
* the <code>apply</code> method will select based on the following algorithm:
* </p>
*
* <table class="stTable">
* <tr><th class="stHeadingCell">Field</th><th class="stHeadingCell">Method</th><th class="stHeadingCell">"get" Method</th><th class="stHeadingCell">Result</th></tr>
* <tr><td class="stTableCell"> </td><td class="stTableCell"> </td><td class="stTableCell"> </td><td class="stTableCell">Throws <code>TestFailedException</code>, because no candidates found</td></tr>
* <tr><td class="stTableCell"> </td><td class="stTableCell"> </td><td class="stTableCell"><code>getTitle()</code></td><td class="stTableCell">Invokes <code>getTitle()</code></td></tr>
* <tr><td class="stTableCell"> </td><td class="stTableCell"><code>title()</code></td><td class="stTableCell"> </td><td class="stTableCell">Invokes <code>title()</code></td></tr>
* <tr><td class="stTableCell"> </td><td class="stTableCell"><code>title()</code></td><td class="stTableCell"><code>getTitle()</code></td><td class="stTableCell">Invokes <code>title()</code> (this can occur when <code>BeanProperty</code> annotation is used)</td></tr>
* <tr><td class="stTableCell"><code>title</code></td><td class="stTableCell"> </td><td class="stTableCell"> </td><td class="stTableCell">Accesses field <code>title</code></td></tr>
* <tr><td class="stTableCell"><code>title</code></td><td class="stTableCell"> </td><td class="stTableCell"><code>getTitle()</code></td><td class="stTableCell">Invokes <code>getTitle()</code></td></tr>
* <tr><td class="stTableCell"><code>title</code></td><td class="stTableCell"><code>title()</code></td><td class="stTableCell"> </td><td class="stTableCell">Invokes <code>title()</code></td></tr>
* <tr><td class="stTableCell"><code>title</code></td><td class="stTableCell"><code>title()</code></td><td class="stTableCell"><code>getTitle()</code></td><td class="stTableCell">Invokes <code>title()</code> (this can occur when <code>BeanProperty</code> annotation is used)</td></tr>
* </table>
*
* @author Bill Venners
*/
final class HavePropertyMatcherGenerator(symbol: Symbol, prettifer: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have ('title ("A Tale of Two Cities"))
* ^
* </pre>
*
* <p>
* This class has an <code>apply</code> method that will produce a <code>HavePropertyMatcher[AnyRef, Any]</code>.
* The implicit conversion method, <code>convertSymbolToHavePropertyMatcherGenerator</code>, will cause the
* above line of code to be eventually transformed into:
* </p>
*
* <pre class="stHighlight">
* book should have (convertSymbolToHavePropertyMatcherGenerator('title).apply("A Tale of Two Cities"))
* </pre>
*/
def apply(expectedValue: Any): HavePropertyMatcher[AnyRef, Any] =
new HavePropertyMatcher[AnyRef, Any] {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* book should have ('title ("A Tale of Two Cities"))
* </pre>
*
* <p>
* This method uses reflection to discover a field or method with a name that indicates it represents
* the value of the property with the name contained in the <code>Symbol</code> passed to the
* <code>HavePropertyMatcherGenerator</code>'s constructor. The field or method must be public. To be a
* candidate, a field must have the name <code>symbol.name</code>, so if <code>symbol</code> is <code>'title</code>,
* the field name sought will be <code>"title"</code>. To be a candidate, a method must either have the name
* <code>symbol.name</code>, or have a JavaBean-style <code>get</code> or <code>is</code>. If the type of the
* passed <code>expectedValue</code> is <code>Boolean</code>, <code>"is"</code> is prepended, else <code>"get"</code>
* is prepended. Thus if <code>'title</code> is passed as <code>symbol</code>, and the type of the <code>expectedValue</code> is
* <code>String</code>, a method named <code>getTitle</code> will be considered a candidate (the return type
* of <code>getTitle</code> will not be checked, so it need not be <code>String</code>. By contrast, if <code>'defined</code>
* is passed as <code>symbol</code>, and the type of the <code>expectedValue</code> is <code>Boolean</code>, a method
* named <code>isTitle</code> will be considered a candidate so long as its return type is <code>Boolean</code>.
* </p>
* TODO continue the story
*/
def apply(objectWithProperty: AnyRef): HavePropertyMatchResult[Any] = {
// If 'empty passed, propertyName would be "empty"
val propertyName = symbol.name
val isBooleanProperty =
expectedValue match {
case o: Boolean => true
case _ => false
}
accessProperty(objectWithProperty, symbol, isBooleanProperty) match {
case None =>
// if propertyName is '>, mangledPropertyName would be "$greater"
val mangledPropertyName = transformOperatorChars(propertyName)
// methodNameToInvoke would also be "title"
val methodNameToInvoke = mangledPropertyName
// methodNameToInvokeWithGet would be "getTitle"
val methodNameToInvokeWithGet = "get"+ mangledPropertyName(0).toUpper + mangledPropertyName.substring(1)
throw newTestFailedException(Resources.propertyNotFound(methodNameToInvoke, expectedValue.toString, methodNameToInvokeWithGet), None, pos)
case Some(result) =>
new HavePropertyMatchResult[Any](
result == expectedValue,
propertyName,
expectedValue,
result
)
}
}
/**
* Overrides to return pretty toString.
*/
override def toString: String = "HavePropertyMatcher[AnyRef, Any](expectedValue = " + Prettifier.default(expectedValue) + ")"
}
}
/**
* This implicit conversion method converts a <code>Symbol</code> to a
* <code>HavePropertyMatcherGenerator</code>, to enable the symbol to be used with the <code>have ('author ("Dickens"))</code> syntax.
*/
implicit def convertSymbolToHavePropertyMatcherGenerator(symbol: Symbol)(implicit prettifier: Prettifier, pos: source.Position): HavePropertyMatcherGenerator = new HavePropertyMatcherGenerator(symbol, prettifier, pos)
// SKIP-SCALATESTJS,NATIVE-END
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
class ResultOfBeWordForAny[T](left: T, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax (positiveNumber is a <code>AMatcher</code>):
*
* <pre class="stHighlight">
* 1 should be a positiveNumber
* ^
* </pre>
*/
def a(aMatcher: AMatcher[T]): Assertion = {
val matcherResult = aMatcher(left)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
} else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
/**
* This method enables the following syntax (positiveNumber is a <code>AnMatcher</code>):
*
* <pre class="stHighlight">
* 1 should be an oddNumber
* ^
* </pre>
*/
def an(anMatcher: AnMatcher[T]): Assertion = {
val matcherResult = anMatcher(left)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
} else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* result should be theSameInstanceAs anotherObject
* ^
* </pre>
*/
def theSameInstanceAs(right: AnyRef)(implicit toAnyRef: T <:< AnyRef): Assertion = {
if ((toAnyRef(left) eq right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotSameInstanceAs(prettifier, left, right) else FailureMessages.wasSameInstanceAs(prettifier, left, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.wasSameInstanceAs(prettifier, left, right), FailureMessages.wasNotSameInstanceAs(prettifier, left, right))
}
/* *
* This method enables the following syntax:
*
* <pre class="stHighlight">
* result should be a [String]
* ^
* </pre>
def a[EXPECTED : ClassManifest] {
val clazz = implicitly[ClassManifest[EXPECTED]].erasure.asInstanceOf[Class[EXPECTED]]
if (clazz.isAssignableFrom(left.getClass)) {
throw newTestFailedException(
if (shouldBeTrue)
FailureMessages.wasNotAnInstanceOf(prettifier, left, UnquotedString(clazz.getName), UnquotedString(left.getClass.getName))
else
FailureMessages.wasAnInstanceOf
)
}
}
*/
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* fileMock should be a ('file)
* ^
* </pre>
*/
def a(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(left), symbol, true, true, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
} else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
// SKIP-SCALATESTJS,NATIVE-END
// TODO: Check the shouldBeTrues, are they sometimes always false or true?
/**
* This method enables the following syntax, where <code>badBook</code> is, for example, of type <code>Book</code> and
* <code>goodRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* badBook should be a (goodRead)
* ^
* </pre>
*/
def a(bePropertyMatcher: BePropertyMatcher[T])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try expanding this to 2.10 AnyVals
val result = bePropertyMatcher(left)
if (result.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotA(prettifier, left, UnquotedString(result.propertyName)) else FailureMessages.wasA(prettifier, left, UnquotedString(result.propertyName)), None, pos)
} else indicateSuccess(shouldBeTrue, FailureMessages.wasA(prettifier, left, UnquotedString(result.propertyName)), FailureMessages.wasNotA(prettifier, left, UnquotedString(result.propertyName)))
}
// SKIP-SCALATESTJS,NATIVE-START
// TODO, in both of these, the failure message doesn't have a/an
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* fruit should be an ('orange)
* ^
* </pre>
*/
def an(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(left), symbol, true, false, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
} else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This method enables the following syntax, where <code>badBook</code> is, for example, of type <code>Book</code> and
* <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* book should be an (excellentRead)
* ^
* </pre>
*/
def an(beTrueMatcher: BePropertyMatcher[T])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try expanding this to 2.10 AnyVals
val beTrueMatchResult = beTrueMatcher(left)
if (beTrueMatchResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotAn(prettifier, left, UnquotedString(beTrueMatchResult.propertyName)) else FailureMessages.wasAn(prettifier, left, UnquotedString(beTrueMatchResult.propertyName)), None, pos)
} else indicateSuccess(shouldBeTrue, FailureMessages.wasAn(prettifier, left, UnquotedString(beTrueMatchResult.propertyName)), FailureMessages.wasNotAn(prettifier, left, UnquotedString(beTrueMatchResult.propertyName)))
}
/**
* This method enables the following syntax, where <code>fraction</code> is, for example, of type <code>PartialFunction</code>:
*
* <pre class="stHighlight">
* fraction should be definedAt (6)
* ^
* </pre>
*/
def definedAt[U](right: U)(implicit ev: T <:< PartialFunction[U, _]): Assertion = {
if (left.isDefinedAt(right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotDefinedAt(prettifier, left, right) else FailureMessages.wasDefinedAt(prettifier, left, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.wasDefinedAt(prettifier, left, right), FailureMessages.wasNotDefinedAt(prettifier, left, right))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfBeWordForAny([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfBeWordForAny(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class RegexWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* "eight" should not fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""".r)
* ^
* </pre>
*/
def apply(regexString: String): ResultOfRegexWordApplication = new ResultOfRegexWordApplication(regexString, IndexedSeq.empty)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* "eight" should not fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*/
def apply(regex: Regex): ResultOfRegexWordApplication = new ResultOfRegexWordApplication(regex, IndexedSeq.empty)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should not fullyMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def apply(regexWithGroups: RegexWithGroups) =
new ResultOfRegexWordApplication(regexWithGroups.regex, regexWithGroups.groups)
/**
* Overrides to return "regex"
*/
override def toString: String = "regex"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfIncludeWordForString(left: String, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should include regex ("world")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = regex(rightRegexString.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should include regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = {
val result = includeRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
if (result.matches != shouldBeTrue)
indicateFailure(if (shouldBeTrue) result.failureMessage(prettifier) else result.negatedFailureMessage(prettifier), None, pos)
else indicateSuccess(shouldBeTrue, result.negatedFailureMessage(prettifier), result.failureMessage(prettifier))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should include regex ("wo.ld".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = {
if (rightRegex.findFirstIn(left).isDefined != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotIncludeRegex(prettifier, left, rightRegex) else FailureMessages.includedRegex(prettifier, left, rightRegex), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.includedRegex(prettifier, left, rightRegex), FailureMessages.didNotIncludeRegex(prettifier, left, rightRegex))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfIncludeWordForString([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfIncludeWordForString(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfStartWithWordForString(left: String, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should startWith regex ("Hel*o")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = regex(rightRegexString.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should startWith regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = {
val result = startWithRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
if (result.matches != shouldBeTrue)
indicateFailure(if (shouldBeTrue) result.failureMessage(prettifier) else result.negatedFailureMessage(prettifier), None, pos)
else indicateSuccess(shouldBeTrue, result.negatedFailureMessage(prettifier), result.failureMessage(prettifier))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should startWith regex ("Hel*o".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = {
if (rightRegex.pattern.matcher(left).lookingAt != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotStartWithRegex(prettifier, left, rightRegex) else FailureMessages.startedWithRegex(prettifier, left, rightRegex), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.startedWithRegex(prettifier, left, rightRegex), FailureMessages.didNotStartWithRegex(prettifier, left, rightRegex))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfStartWithWordForString([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfStartWithWordForString(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfEndWithWordForString(left: String, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should endWith regex ("wor.d")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = regex(rightRegexString.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should endWith regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = {
val result = endWithRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
if (result.matches != shouldBeTrue)
indicateFailure(if (shouldBeTrue) result.failureMessage(prettifier) else result.negatedFailureMessage(prettifier), None, pos)
else indicateSuccess(shouldBeTrue, result.negatedFailureMessage(prettifier), result.failureMessage(prettifier))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should endWith regex ("wor.d".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = {
val allMatches = rightRegex.findAllIn(left)
if ((allMatches.hasNext && (allMatches.end == left.length)) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotEndWithRegex(prettifier, left, rightRegex) else FailureMessages.endedWithRegex(prettifier, left, rightRegex), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.endedWithRegex(prettifier, left, rightRegex), FailureMessages.didNotEndWithRegex(prettifier, left, rightRegex))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfEndWithWordForString([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfEndWithWordForString(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfFullyMatchWordForString(left: String, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should fullMatch regex ("Hel*o world")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = regex(rightRegexString.r)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should fullMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = {
val result = fullyMatchRegexWithGroups(left, regexWithGroups.regex, regexWithGroups.groups)
if (result.matches != shouldBeTrue)
indicateFailure(if (shouldBeTrue) result.failureMessage(prettifier) else result.negatedFailureMessage(prettifier), None, pos)
else indicateSuccess(shouldBeTrue, result.negatedFailureMessage(prettifier), result.failureMessage(prettifier))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* string should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = {
if (rightRegex.pattern.matcher(left).matches != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotFullyMatchRegex(prettifier, left, rightRegex) else FailureMessages.fullyMatchedRegex(prettifier, left, rightRegex), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.fullyMatchedRegex(prettifier, left, rightRegex), FailureMessages.didNotFullyMatchRegex(prettifier, left, rightRegex))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfFullyMatchWordForString([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfFullyMatchWordForString(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
// Going back to original, legacy one to get to a good place to check in.
/*
def equal(right: Any): Matcher[Any] =
new Matcher[Any] {
def apply(left: Any): MatchResult = {
val (leftee, rightee) = Suite.getObjectsForFailureMessage(left, right)
MatchResult(
areEqualComparingArraysStructurally(left, right),
FailureMessages.didNotEqual(prettifier, leftee, rightee),
FailureMessages.equaled(prettifier, left, right)
)
}
}
*/
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should equal (100 +- 1)
* ^
* </pre>
*/
def equal[T](spread: Spread[T]): Matcher[T] = {
new Matcher[T] {
def apply(left: T): MatchResult = {
MatchResult(
spread.isWithin(left),
Resources.rawDidNotEqualPlusOrMinus,
Resources.rawEqualedPlusOrMinus,
Vector(left, spread.pivot, spread.tolerance)
)
}
override def toString: String = "equal (" + Prettifier.default(spread) + ")"
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should equal (null)
* ^
* </pre>
*/
def equal(o: Null): Matcher[AnyRef] =
new Matcher[AnyRef] {
def apply(left: AnyRef): MatchResult = {
MatchResult(
left == null,
Resources.rawDidNotEqualNull,
Resources.rawEqualedNull,
Resources.rawDidNotEqualNull,
Resources.rawMidSentenceEqualedNull,
Vector(left),
Vector.empty
)
}
override def toString: String = "equal (" + Prettifier.default(o) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class KeyWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* map should not contain key (10)
* ^
* </pre>
*/
def apply(expectedKey: Any): ResultOfKeyWordApplication = new ResultOfKeyWordApplication(expectedKey)
/**
* Overrides to return pretty toString.
*
* @return "key"
*/
override def toString: String = "key"
}
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* map should not contain key (10)
* ^
* </pre>
*/
val key = new KeyWord
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ValueWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* map should not contain key (10)
* ^
* </pre>
*/
def apply(expectedValue: Any): ResultOfValueWordApplication = new ResultOfValueWordApplication(expectedValue)
/**
* Overrides to return pretty toString.
*
* @return "value"
*/
override def toString: String = "value"
}
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* map should not contain value (10)
* ^
* </pre>
*/
val value = new ValueWord
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class AWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* badBook should not be a ('goodRead)
* ^
* </pre>
*/
def apply(symbol: Symbol): ResultOfAWordToSymbolApplication = new ResultOfAWordToSymbolApplication(symbol)
/**
* This method enables the following syntax, where, for example, <code>badBook</code> is of type <code>Book</code> and <code>goodRead</code>
* is a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* badBook should not be a (goodRead)
* ^
* </pre>
*/
def apply[T](beTrueMatcher: BePropertyMatcher[T]): ResultOfAWordToBePropertyMatcherApplication[T] = new ResultOfAWordToBePropertyMatcherApplication(beTrueMatcher)
/**
* This method enables the following syntax, where, <code>positiveNumber</code> is an <code>AMatcher[Book]</code>:
*
* <pre class="stHighlight">
* result should not be a (positiveNumber)
* ^
* </pre>
*/
def apply[T](aMatcher: AMatcher[T]): ResultOfAWordToAMatcherApplication[T] = new ResultOfAWordToAMatcherApplication(aMatcher)
/**
* Overrides to return pretty toString.
*
* @return "a"
*/
override def toString: String = "a"
}
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* badBook should not be a ('goodRead)
* ^
* </pre>
*/
val a = new AWord
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class AnWord {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* badBook should not be an ('excellentRead)
* ^
* </pre>
*/
def apply(symbol: Symbol): ResultOfAnWordToSymbolApplication = new ResultOfAnWordToSymbolApplication(symbol)
/**
* This method enables the following syntax, where, for example, <code>badBook</code> is of type <code>Book</code> and <code>excellentRead</code>
* is a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* badBook should not be an (excellentRead)
* ^
* </pre>
*/
def apply[T](beTrueMatcher: BePropertyMatcher[T]): ResultOfAnWordToBePropertyMatcherApplication[T] = new ResultOfAnWordToBePropertyMatcherApplication(beTrueMatcher)
/**
* This method enables the following syntax, where, <code>positiveNumber</code> is an <code>AnMatcher[Book]</code>:
*
* <pre class="stHighlight">
* result should not be an (positiveNumber)
* ^
* </pre>
*/
def apply[T](anMatcher: AnMatcher[T]): ResultOfAnWordToAnMatcherApplication[T] = new ResultOfAnWordToAnMatcherApplication(anMatcher)
/**
* Overrides to return pretty toString.
*
* @return "an"
*/
override def toString: String = "an"
}
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* badBook should not be an (excellentRead)
* ^
* </pre>
*/
val an = new AnWord
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class TheSameInstanceAsPhrase {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* oneString should not be theSameInstanceAs (anotherString)
* ^
* </pre>
*/
def apply(anyRef: AnyRef): ResultOfTheSameInstanceAsApplication = new ResultOfTheSameInstanceAsApplication(anyRef)
/**
* Overrides to return pretty toString.
*
* @return "theSameInstanceAs"
*/
override def toString: String = "theSameInstanceAs"
}
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* oneString should not be theSameInstanceAs (anotherString)
* ^
* </pre>
*/
val theSameInstanceAs: TheSameInstanceAsPhrase = new TheSameInstanceAsPhrase
/**
* This field enables the following syntax:
*
* <pre class="stHighlight">
* "eight" should not fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""".r)
* ^
* </pre>
*/
val regex = new RegexWord
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfHaveWordForExtent[A](left: A, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* obj should have length (2L)
* ^
* </pre>
*
* <p>
* This method is ultimately invoked for objects that have a <code>length</code> property structure
* of type <code>Long</code>,
* but is of a type that is not handled by implicit conversions from nominal types such as
* <code>scala.Seq</code>, <code>java.lang.String</code>, and <code>java.util.List</code>.
* </p>
*/
def length(expectedLength: Long)(implicit len: Length[A]): Assertion = {
val leftLength = len.lengthOf(left)
if ((leftLength == expectedLength) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, left, leftLength, expectedLength) else FailureMessages.hadLength(prettifier, left, expectedLength), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.hadLength(prettifier, left, expectedLength), FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, left, leftLength, expectedLength))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* obj should have size (2L)
* ^
* </pre>
*
* <p>
* This method is ultimately invoked for objects that have a <code>size</code> property structure
* of type <code>Long</code>,
* but is of a type that is not handled by implicit conversions from nominal types such as
* <code>Traversable</code> and <code>java.util.Collection</code>.
* </p>
*/
def size(expectedSize: Long)(implicit sz: Size[A]): Assertion = {
val leftSize = sz.sizeOf(left)
if ((leftSize == expectedSize) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, left, leftSize, expectedSize) else FailureMessages.hadSize(prettifier, left, expectedSize), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.hadSize(prettifier, left, expectedSize), FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, left, leftSize, expectedSize))
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* exception should have message ("file not found")
* ^
* </pre>
*/
def message(expectedMessage: String)(implicit messaging: Messaging[A]): Assertion = {
val actualMessage = messaging.messageOf(left)
if ((actualMessage== expectedMessage) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.hadMessageInsteadOfExpectedMessage(prettifier, left, actualMessage, expectedMessage) else FailureMessages.hadExpectedMessage(prettifier, left, expectedMessage), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.hadExpectedMessage(prettifier, left, expectedMessage), FailureMessages.hadMessageInsteadOfExpectedMessage(prettifier, left, actualMessage, expectedMessage))
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfHaveWordForExtent([left], [shouldBeTrue])"
*/
override def toString: String = "ResultOfHaveWordForExtent(" + Prettifier.default(left) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* num should (not be < (10) and not be > (17))
* ^
* </pre>
*/
def <[T : Ordering] (right: T): ResultOfLessThanComparison[T] =
new ResultOfLessThanComparison(right)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* num should (not be > (10) and not be < (7))
* ^
* </pre>
*/
def >[T : Ordering] (right: T): ResultOfGreaterThanComparison[T] =
new ResultOfGreaterThanComparison(right)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* num should (not be <= (10) and not be > (17))
* ^
* </pre>
*/
def <=[T : Ordering] (right: T): ResultOfLessThanOrEqualToComparison[T] =
new ResultOfLessThanOrEqualToComparison(right)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* num should (not be >= (10) and not be < (7))
* ^
* </pre>
*/
def >=[T : Ordering] (right: T): ResultOfGreaterThanOrEqualToComparison[T] =
new ResultOfGreaterThanOrEqualToComparison(right)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* list should (not be definedAt (7) and not be definedAt (9))
* ^
* </pre>
*/
def definedAt[T](right: T): ResultOfDefinedAt[T] =
new ResultOfDefinedAt(right)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (oneOf(1, 2))
* ^
* </pre>
*/
def oneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.oneOfDuplicate, pos)
new ResultOfOneOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (oneElementOf (List(1, 2)))
* ^
* </pre>
*/
def oneElementOf(elements: GenTraversable[Any]) = {
val xs = elements.toList
new ResultOfOneElementOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (atLeastOneOf(1, 2))
* ^
* </pre>
*/
def atLeastOneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.atLeastOneOfDuplicate, pos)
new ResultOfAtLeastOneOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (atLeastOneElementOf (List(1, 2)))
* ^
* </pre>
*/
def atLeastOneElementOf(elements: GenTraversable[Any]) = {
val xs = elements.toList
new ResultOfAtLeastOneElementOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (noneOf(1, 2))
* ^
* </pre>
*/
def noneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.noneOfDuplicate, pos)
new ResultOfNoneOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (noElementsOf List(1, 2))
* ^
* </pre>
*/
def noElementsOf(elements: GenTraversable[Any]) = {
val xs = elements.toList
new ResultOfNoElementsOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (theSameElementsAs(List(1, 2, 3)))
* ^
* </pre>
*/
def theSameElementsAs(xs: GenTraversable[_]) = new ResultOfTheSameElementsAsApplication(xs)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (theSameElementsInOrderAs(List(1, 2)))
* ^
* </pre>
*/
def theSameElementsInOrderAs(xs: GenTraversable[_]) = new ResultOfTheSameElementsInOrderAsApplication(xs)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (only(1, 2))
* ^
* </pre>
*/
def only(xs: Any*)(implicit pos: source.Position) = {
if (xs.isEmpty)
throw new NotAllowedException(FailureMessages.onlyEmpty, pos)
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.onlyDuplicate, pos)
new ResultOfOnlyApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (inOrderOnly(1, 2))
* ^
* </pre>
*/
def inOrderOnly[T](firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.inOrderOnlyDuplicate, pos)
new ResultOfInOrderOnlyApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (allOf(1, 2))
* ^
* </pre>
*/
def allOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.allOfDuplicate, pos)
new ResultOfAllOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (allElementsOf(1, 2))
* ^
* </pre>
*/
def allElementsOf[R](elements: GenTraversable[R]) = {
val xs = elements.toList
new ResultOfAllElementsOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (inOrder(1, 2))
* ^
* </pre>
*/
def inOrder(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.inOrderDuplicate, pos)
new ResultOfInOrderApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (inOrderElementsOf List(1, 2))
* ^
* </pre>
*/
def inOrderElementsOf[R](elements: GenTraversable[R]) = {
val xs = elements.toList
new ResultOfInOrderElementsOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (atMostOneOf(1, 2))
* ^
* </pre>
*/
def atMostOneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit pos: source.Position) = {
val xs = firstEle :: secondEle :: remainingEles.toList
if (xs.distinct.size != xs.size)
throw new NotAllowedException(FailureMessages.atMostOneOfDuplicate, pos)
new ResultOfAtMostOneOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* List(1, 2, 3) should contain (atMostOneElementOf (List(1, 2)))
* ^
* </pre>
*/
def atMostOneElementOf[R](elements: GenTraversable[R]) = {
val xs = elements.toList
new ResultOfAtMostOneElementOfApplication(xs)
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* a [RuntimeException] should be thrownBy {...}
* ^
* </pre>
*/
def thrownBy(fun: => Any) = new ResultOfThrownByApplication(fun)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* exception should not have message ("file not found")
* ^
* </pre>
*/
def message(expectedMessage: String) = new ResultOfMessageWordApplication(expectedMessage)
/*
// For safe keeping
private implicit def nodeToCanonical(node: scala.xml.Node) = new Canonicalizer(node)
private class Canonicalizer(node: scala.xml.Node) {
def toCanonical: scala.xml.Node = {
node match {
case elem: scala.xml.Elem =>
val canonicalizedChildren =
for (child <- node.child if !child.toString.trim.isEmpty) yield {
child match {
case elem: scala.xml.Elem => elem.toCanonical
case other => other
}
}
new scala.xml.Elem(elem.prefix, elem.label, elem.attributes, elem.scope, canonicalizedChildren: _*)
case other => other
}
}
}
*/
/*
class AType[T : ClassManifest] {
private val clazz = implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]]
def isAssignableFromClassOf(o: Any): Boolean = clazz.isAssignableFrom(o.getClass)
def className: String = clazz.getName
}
def a[T : ClassManifest]: AType[T] = new AType[T]
*/
// This is where InspectorShorthands started
protected sealed class Collected(name: String) extends Serializable {
override def toString: String = name
}
private val AllCollected = new Collected("AllCollected")
private val EveryCollected = new Collected("EveryCollected")
private case class BetweenCollected(from: Int, to: Int) extends Collected("BetweenCollected")
private case class AtLeastCollected(num: Int) extends Collected("AtLeastCollected")
private case class AtMostCollected(num: Int) extends Collected("AtMostCollected")
private val NoCollected = new Collected("NoCollected")
private case class ExactlyCollected(num: Int) extends Collected("ExactlyCollected")
private[scalatest] def doCollected[T](collected: Collected, xs: scala.collection.GenTraversable[T], original: Any, prettifier: Prettifier, pos: source.Position)(fun: T => Assertion): Assertion = {
val asserting = InspectorAsserting.assertingNatureOfAssertion
collected match {
case AllCollected =>
asserting.forAll(xs, original, true, prettifier, pos) { e =>
fun(e)
}
case AtLeastCollected(num) =>
asserting.forAtLeast(num, xs, original, true, prettifier, pos) { e =>
fun(e)
}
case EveryCollected =>
asserting.forEvery(xs, original, true, prettifier, pos) { e =>
fun(e)
}
case ExactlyCollected(num) =>
asserting.forExactly(num, xs, original, true, prettifier, pos) { e =>
fun(e)
}
case NoCollected =>
asserting.forNo(xs, original, true, prettifier, pos) { e =>
fun(e)
}
case BetweenCollected(from, to) =>
asserting.forBetween(from, to, xs, original, true, prettifier, pos) { e =>
fun(e)
}
case AtMostCollected(num) =>
asserting.forAtMost(num, xs, original, true, prettifier, pos) { e =>
fun(e)
}
}
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfNotWordForCollectedAny[T](collected: Collected, xs: scala.collection.GenTraversable[T], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not equal (7)
* ^
* </pre>
*/
def equal(right: Any)(implicit equality: Equality[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((equality.areEqual(e, right)) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotEqual(prettifier, e, right) else FailureMessages.equaled(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.equaled(prettifier, e, right), FailureMessages.didNotEqual(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be (7)
* ^
* </pre>
*/
def be(right: Any): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((e == right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotEqualTo(prettifier, e, right) else FailureMessages.wasEqualTo(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.wasEqualTo(prettifier, e, right), FailureMessages.wasNotEqualTo(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be <= (7)
* ^
* </pre>
*/
def be(comparison: ResultOfLessThanOrEqualToComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (comparison(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotLessThanOrEqualTo(prettifier, e, comparison.right) else FailureMessages.wasLessThanOrEqualTo(prettifier, e, comparison.right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasLessThanOrEqualTo(prettifier, e, comparison.right), FailureMessages.wasNotLessThanOrEqualTo(prettifier, e, comparison.right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be >= (7)
* ^
* </pre>
*/
def be(comparison: ResultOfGreaterThanOrEqualToComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (comparison(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotGreaterThanOrEqualTo(prettifier, e, comparison.right) else FailureMessages.wasGreaterThanOrEqualTo(prettifier, e, comparison.right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasGreaterThanOrEqualTo(prettifier, e, comparison.right), FailureMessages.wasNotGreaterThanOrEqualTo(prettifier, e, comparison.right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be < (7)
* ^
* </pre>
*/
def be(comparison: ResultOfLessThanComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (comparison(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotLessThan(prettifier, e, comparison.right) else FailureMessages.wasLessThan(prettifier, e, comparison.right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasLessThan(prettifier, e, comparison.right), FailureMessages.wasNotLessThan(prettifier, e, comparison.right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be > (7)
* ^
* </pre>
*/
def be(comparison: ResultOfGreaterThanComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (comparison(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotGreaterThan(prettifier, e, comparison.right) else FailureMessages.wasGreaterThan(prettifier, e, comparison.right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasGreaterThan(prettifier, e, comparison.right), FailureMessages.wasNotGreaterThan(prettifier, e, comparison.right))
}
}
/**
* <strong>
* The deprecation period for the "be ===" syntax has expired, and the syntax
* will now throw <code>NotAllowedException</code>. Please use should equal, should ===, shouldEqual,
* should be, or shouldBe instead.
* </strong>
*
* <p>
* Note: usually syntax will be removed after its deprecation period. This was left in because otherwise the syntax could in some
* cases still compile, but silently wouldn't work.
* </p>
*/
@deprecated("The deprecation period for the be === syntax has expired. Please use should equal, should ===, shouldEqual, should be, or shouldBe instead.")
def be(comparison: TripleEqualsInvocation[_]): Nothing = {
throw new NotAllowedException(FailureMessages.beTripleEqualsNotAllowed, pos)
}
/**
* This method enables the following syntax, where <code>odd</code> refers to
* a <code>BeMatcher[Int]</code>:
*
* <pre class="stHighlight">
* all(xs) should not be (odd)
* ^
* </pre>
*/
def be(beMatcher: BeMatcher[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = beMatcher(e)
if (result.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) result.failureMessage(prettifier) else result.negatedFailureMessage(prettifier), None, pos)
}
else indicateSuccess(shouldBeTrue, result.negatedFailureMessage(prettifier), result.failureMessage(prettifier))
}
}
/**
* This method enables the following syntax, where <code>stack</code> is, for example, of type <code>Stack</code> and
* <code>empty</code> refers to a <code>BePropertyMatcher[Stack]</code>:
*
* <pre class="stHighlight">
* all(xs) should not be (empty)
* ^
* </pre>
*/
def be(bePropertyMatcher: BePropertyMatcher[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = bePropertyMatcher(e)
if (result.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNot(prettifier, e, UnquotedString(result.propertyName)) else FailureMessages.was(prettifier, e, UnquotedString(result.propertyName)), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.was(prettifier, e, UnquotedString(result.propertyName)), FailureMessages.wasNot(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables the following syntax, where <code>notFileMock</code> is, for example, of type <code>File</code> and
* <code>file</code> refers to a <code>BePropertyMatcher[File]</code>:
*
* <pre class="stHighlight">
* all(xs) should not be a (file)
* ^
* </pre>
*/
def be[U >: T](resultOfAWordApplication: ResultOfAWordToBePropertyMatcherApplication[U]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = resultOfAWordApplication.bePropertyMatcher(e)
if (result.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotA(prettifier, e, UnquotedString(result.propertyName)) else FailureMessages.wasA(prettifier, e, UnquotedString(result.propertyName)), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasA(prettifier, e, UnquotedString(result.propertyName)), FailureMessages.wasNotA(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables the following syntax, where <code>keyEvent</code> is, for example, of type <code>KeyEvent</code> and
* <code>actionKey</code> refers to a <code>BePropertyMatcher[KeyEvent]</code>:
*
* <pre class="stHighlight">
* all(keyEvents) should not be an (actionKey)
* ^
* </pre>
*/
def be[U >: T](resultOfAnWordApplication: ResultOfAnWordToBePropertyMatcherApplication[U]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = resultOfAnWordApplication.bePropertyMatcher(e)
if (result.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotAn(prettifier, e, UnquotedString(result.propertyName)) else FailureMessages.wasAn(prettifier, e, UnquotedString(result.propertyName)), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasAn(prettifier, e, UnquotedString(result.propertyName)), FailureMessages.wasNotAn(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be theSameInstanceAs (string)
* ^
* </pre>
*/
def be(resultOfSameInstanceAsApplication: ResultOfTheSameInstanceAsApplication): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
e match {
case ref: AnyRef =>
if ((resultOfSameInstanceAsApplication.right eq ref) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotSameInstanceAs(prettifier, e, resultOfSameInstanceAsApplication.right) else FailureMessages.wasSameInstanceAs(prettifier, e, resultOfSameInstanceAsApplication.right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasSameInstanceAs(prettifier, e, resultOfSameInstanceAsApplication.right), FailureMessages.wasNotSameInstanceAs(prettifier, e, resultOfSameInstanceAsApplication.right))
case _ =>
throw new IllegalArgumentException("theSameInstanceAs should only be used for AnyRef")
}
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be definedAt ("apple")
* ^
* </pre>
*/
def be[U](resultOfDefinedAt: ResultOfDefinedAt[U])(implicit ev: T <:< PartialFunction[U, _]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (e.isDefinedAt(resultOfDefinedAt.right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotDefinedAt(prettifier, e, resultOfDefinedAt.right) else FailureMessages.wasDefinedAt(prettifier, e, resultOfDefinedAt.right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.wasDefinedAt(prettifier, e, resultOfDefinedAt.right), FailureMessages.wasNotDefinedAt(prettifier, e, resultOfDefinedAt.right))
}
}
// TODO: Write tests and implement cases for:
// have(length (9), title ("hi")) (this one we'll use this have method but add a HavePropertyMatcher* arg)
// have(size (9), title ("hi")) (this one we'll use the next have method but add a HavePropertyMatcher* arg)
// have(length(9), size (9), title ("hi")) (for this one we'll need a new overloaded have(ROLWA, ROSWA, HPM*))
// have(size(9), length (9), title ("hi")) (for this one we'll need a new overloaded have(ROSWA, ROLWA, HPM*))
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not have length (0)
* ^
* </pre>
*
*/
def have(resultOfLengthWordApplication: ResultOfLengthWordApplication)(implicit len: Length[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val right = resultOfLengthWordApplication.expectedLength
val leftLength = len.lengthOf(e)
if ((leftLength == right) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, e, leftLength, right) else FailureMessages.hadLength(prettifier, e, right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.hadLength(prettifier, e, right), FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, e, leftLength, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not have size (0)
* ^
* </pre>
*
*/
def have(resultOfSizeWordApplication: ResultOfSizeWordApplication)(implicit sz: Size[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val right = resultOfSizeWordApplication.expectedSize
val leftSize = sz.sizeOf(e)
if ((leftSize == right) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, e, leftSize, right) else FailureMessages.hadSize(prettifier, e, right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.hadSize(prettifier, e, right), FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, e, leftSize, right))
}
}
/**
* This method enables the following syntax, where <code>badBook</code> is, for example, of type <code>Book</code> and
* <code>title ("One Hundred Years of Solitude")</code> results in a <code>HavePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(books) should not have (title ("One Hundred Years of Solitude"))
* ^
* </pre>
*/
def have[U >: T](firstPropertyMatcher: HavePropertyMatcher[U, _], propertyMatchers: HavePropertyMatcher[U, _]*): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val results =
for (propertyVerifier <- firstPropertyMatcher :: propertyMatchers.toList) yield
propertyVerifier(e)
val firstFailureOption = results.find(pv => !pv.matches)
val justOneProperty = propertyMatchers.isEmpty
// if shouldBeTrue is false, then it is like "not have ()", and should throw TFE if firstFailureOption.isDefined is false
// if shouldBeTrue is true, then it is like "not (not have ()), which should behave like have ()", and should throw TFE if firstFailureOption.isDefined is true
if (firstFailureOption.isDefined == shouldBeTrue) {
firstFailureOption match {
case Some(firstFailure) =>
// This is one of these cases, thus will only get here if shouldBeTrue is true
// 0 0 | 0 | 1
// 0 1 | 0 | 1
// 1 0 | 0 | 1
indicateFailure(
FailureMessages.propertyDidNotHaveExpectedValue(prettifier,
UnquotedString(firstFailure.propertyName),
firstFailure.expectedValue,
firstFailure.actualValue,
e
),
None,
pos
)
case None =>
// This is this cases, thus will only get here if shouldBeTrue is false
// 1 1 | 1 | 0
val failureMessage =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
FailureMessages.propertyHadExpectedValue(prettifier,
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
e
)
}
else FailureMessages.allPropertiesHadExpectedValues(prettifier, e)
indicateFailure(failureMessage, None, pos)
}
}
else {
if (shouldBeTrue)
indicateSuccess(FailureMessages.allPropertiesHadExpectedValues(prettifier, e))
else {
firstFailureOption match {
case Some(firstFailure) =>
indicateSuccess(
FailureMessages.propertyDidNotHaveExpectedValue(prettifier,
UnquotedString(firstFailure.propertyName),
firstFailure.expectedValue,
firstFailure.actualValue,
e
)
)
case None =>
val message =
if (justOneProperty) {
val firstPropertyResult = results.head // know this will succeed, because firstPropertyMatcher was required
FailureMessages.propertyHadExpectedValue(prettifier,
UnquotedString(firstPropertyResult.propertyName),
firstPropertyResult.expectedValue,
e
)
}
else FailureMessages.allPropertiesHadExpectedValues(prettifier, e)
indicateSuccess(message)
}
}
}
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be (null)
* ^
* </pre>
*/
def be(o: Null)(implicit ev: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((e == null) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotNull(prettifier, e) else FailureMessages.wasNull, None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasNull, FailureMessages.wasNotNull(prettifier, e))
}
}
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be ('empty)
* ^
* </pre>
*/
def be(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), symbol, false, false, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
}
else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be a ('file)
* ^
* </pre>
*/
def be(resultOfAWordApplication: ResultOfAWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), resultOfAWordApplication.symbol, true, true, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
}
else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be an ('actionKey)
* ^
* </pre>
*/
def be(resultOfAnWordApplication: ResultOfAnWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), resultOfAnWordApplication.symbol, true, false, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) matcherResult.failureMessage(prettifier) else matcherResult.negatedFailureMessage(prettifier), None, pos)
}
else indicateSuccess(shouldBeTrue, matcherResult.negatedFailureMessage(prettifier), matcherResult.failureMessage(prettifier))
}
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be sorted
* ^
* </pre>
*/
def be(sortedWord: SortedWord)(implicit sortable: Sortable[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sortable.isSorted(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotSorted(prettifier, e) else FailureMessages.wasSorted(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasSorted(prettifier, e), FailureMessages.wasNotSorted(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be readable
* ^
* </pre>
*/
def be(readableWord: ReadableWord)(implicit readability: Readability[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (readability.isReadable(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotReadable(prettifier, e) else FailureMessages.wasReadable(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasReadable(prettifier, e), FailureMessages.wasNotReadable(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be writable
* ^
* </pre>
*/
def be(writableWord: WritableWord)(implicit writability: Writability[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (writability.isWritable(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotWritable(prettifier, e) else FailureMessages.wasWritable(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasWritable(prettifier, e), FailureMessages.wasNotWritable(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be empty
* ^
* </pre>
*/
def be(emptyWord: EmptyWord)(implicit emptiness: Emptiness[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (emptiness.isEmpty(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotEmpty(prettifier, e) else FailureMessages.wasEmpty(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasEmpty(prettifier, e), FailureMessages.wasNotEmpty(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should not be defined
* ^
* </pre>
*/
def be(definedWord: DefinedWord)(implicit definition: Definition[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (definition.isDefined(e) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.wasNotDefined(prettifier, e) else FailureMessages.wasDefined(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.wasDefined(prettifier, e), FailureMessages.wasNotDefined(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain (null)
* ^
* </pre>
*/
def contain(nullValue: Null)(implicit containing: Containing[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((containing.contains(e, null)) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainNull(prettifier, e) else FailureMessages.containedNull(prettifier, e), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.containedNull(prettifier, e), FailureMessages.didNotContainNull(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain ("one")
* ^
* </pre>
*/
def contain(expectedElement: Any)(implicit containing: Containing[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val right = expectedElement
if ((containing.contains(e, right)) != shouldBeTrue) {
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainExpectedElement(prettifier, e, right) else FailureMessages.containedExpectedElement(prettifier, e, right), None, pos)
}
else indicateSuccess(shouldBeTrue, FailureMessages.containedExpectedElement(prettifier, e, right), FailureMessages.didNotContainExpectedElement(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain oneOf ("one")
* ^
* </pre>
*/
def contain(oneOf: ResultOfOneOfApplication)(implicit containing: Containing[T]): Assertion = {
val right = oneOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos)
else indicateSuccess(
shouldBeTrue,
FailureMessages.containedOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain oneElementOf ("one")
* ^
* </pre>
*/
def contain(oneElementOf: ResultOfOneElementOfApplication)(implicit containing: Containing[T]): Assertion = {
val right = oneElementOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainOneElementOf(prettifier, e, right) else FailureMessages.containedOneElementOf(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.containedOneElementOf(prettifier, e, right), FailureMessages.didNotContainOneElementOf(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain atLeastOneOf ("one")
* ^
* </pre>
*/
def contain(atLeastOneOf: ResultOfAtLeastOneOfApplication)(implicit aggregating: Aggregating[T]): Assertion = {
val right = atLeastOneOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtLeastOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain atLeastOneElementOf ("one")
* ^
* </pre>
*/
def contain(atLeastOneElementOf: ResultOfAtLeastOneElementOfApplication)(implicit evidence: Aggregating[T]): Assertion = {
val right = atLeastOneElementOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (evidence.containsAtLeastOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right) else FailureMessages.containedAtLeastOneElementOf(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.containedAtLeastOneElementOf(prettifier, e, right), FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain noneOf ("one")
* ^
* </pre>
*/
def contain(noneOf: ResultOfNoneOfApplication)(implicit containing: Containing[T]): Assertion = {
val right = noneOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsNoneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else indicateSuccess(
shouldBeTrue,
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain noElementsOf ("one")
* ^
* </pre>
*/
def contain(noElementsOf: ResultOfNoElementsOfApplication)(implicit evidence: Containing[T]): Assertion = {
val right = noElementsOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (evidence.containsNoneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.containedAtLeastOneElementOf(prettifier, e, right) else FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right), FailureMessages.containedAtLeastOneElementOf(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain theSameElementsAs ("one")
* ^
* </pre>
*/
def contain(theSameElementsAs: ResultOfTheSameElementsAsApplication)(implicit aggregating: Aggregating[T]): Assertion = {
val right = theSameElementsAs.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsTheSameElementsAs(e, right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainSameElements(prettifier, e, right) else FailureMessages.containedSameElements(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.containedSameElements(prettifier, e, right), FailureMessages.didNotContainSameElements(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain theSameElementsInOrderAs ("one")
* ^
* </pre>
*/
def contain(theSameElementsInOrderAs: ResultOfTheSameElementsInOrderAsApplication)(implicit sequencing: Sequencing[T]): Assertion = {
val right = theSameElementsInOrderAs.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsTheSameElementsInOrderAs(e, right) != shouldBeTrue)
indicateFailure(if (shouldBeTrue) FailureMessages.didNotContainSameElementsInOrder(prettifier, e, right) else FailureMessages.containedSameElementsInOrder(prettifier, e, right), None, pos)
else indicateSuccess(shouldBeTrue, FailureMessages.containedSameElementsInOrder(prettifier, e, right), FailureMessages.didNotContainSameElementsInOrder(prettifier, e, right))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain only ("one")
* ^
* </pre>
*/
def contain(only: ResultOfOnlyApplication)(implicit aggregating: Aggregating[T]): Assertion = {
val right = only.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsOnly(e, right) != shouldBeTrue) {
val withFriendlyReminder = right.size == 1 && (right(0).isInstanceOf[scala.collection.GenTraversable[_]] || right(0).isInstanceOf[Every[_]])
indicateFailure(
if (shouldBeTrue)
if (withFriendlyReminder)
FailureMessages.didNotContainOnlyElementsWithFriendlyReminder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.didNotContainOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
if (withFriendlyReminder)
FailureMessages.containedOnlyElementsWithFriendlyReminder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
}
else indicateSuccess(
shouldBeTrue,
FailureMessages.containedOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain inOrderOnly ("one", "two")
* ^
* </pre>
*/
def contain(only: ResultOfInOrderOnlyApplication)(implicit sequencing: Sequencing[T]): Assertion = {
val right = only.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsInOrderOnly(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos)
else indicateSuccess(
shouldBeTrue,
FailureMessages.containedInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain allOf ("one")
* ^
* </pre>
*/
def contain(only: ResultOfAllOfApplication)(implicit aggregating: Aggregating[T]): Assertion = {
val right = only.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAllOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain allElementsOf ("one")
* ^
* </pre>
*/
def contain(only: ResultOfAllElementsOfApplication)(implicit evidence: Aggregating[T]): Assertion = {
val right = only.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (evidence.containsAllOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllElementsOf(prettifier, e, right)
else
FailureMessages.containedAllElementsOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllElementsOf(prettifier, e, right),
FailureMessages.didNotContainAllElementsOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain inOrder ("one")
* ^
* </pre>
*/
def contain(inOrder: ResultOfInOrderApplication)(implicit sequencing: Sequencing[T]): Assertion = {
val right = inOrder.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsInOrder(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain inOrderElementsOf (List("one"))
* ^
* </pre>
*/
def contain(inOrderElementsOf: ResultOfInOrderElementsOfApplication)(implicit evidence: Sequencing[T]): Assertion = {
val right = inOrderElementsOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (evidence.containsInOrder(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllElementsOfInOrder(prettifier, e, right)
else
FailureMessages.containedAllElementsOfInOrder(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllElementsOfInOrder(prettifier, e, right),
FailureMessages.didNotContainAllElementsOfInOrder(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain atMostOneOf ("one")
* ^
* </pre>
*/
def contain(atMostOneOf: ResultOfAtMostOneOfApplication)(implicit aggregating: Aggregating[T]): Assertion = {
val right = atMostOneOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtMostOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should not contain atMostOneElementOf List("one")
* ^
* </pre>
*/
def contain(atMostOneElementOf: ResultOfAtMostOneElementOfApplication)(implicit evidence: Aggregating[T]): Assertion = {
val right = atMostOneElementOf.right
doCollected(collected, xs, original, prettifier, pos) { e =>
if (evidence.containsAtMostOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtMostOneElementOf(prettifier, e, right)
else
FailureMessages.containedAtMostOneElementOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtMostOneElementOf(prettifier, e, right),
FailureMessages.didNotContainAtMostOneElementOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(colOfMap) should not contain key ("three")
* ^
* </pre>
*/
def contain(resultOfKeyWordApplication: ResultOfKeyWordApplication)(implicit keyMapping: KeyMapping[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { map =>
val expectedKey = resultOfKeyWordApplication.expectedKey
if ((keyMapping.containsKey(map, expectedKey)) != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainKey(prettifier, map, expectedKey)
else
FailureMessages.containedKey(prettifier, map, expectedKey),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedKey(prettifier, map, expectedKey),
FailureMessages.didNotContainKey(prettifier, map, expectedKey)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(colOfMap) should not contain value (3)
* ^
* </pre>
*/
def contain(resultOfValueWordApplication: ResultOfValueWordApplication)(implicit valueMapping: ValueMapping[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { map =>
val expectedValue = resultOfValueWordApplication.expectedValue
if ((valueMapping.containsValue(map, expectedValue)) != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainValue(prettifier, map, expectedValue)
else
FailureMessages.containedValue(prettifier, map, expectedValue),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedValue(prettifier, map, expectedValue),
FailureMessages.didNotContainValue(prettifier, map, expectedValue)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not startWith ("1.7")
* ^
* </pre>
*/
def startWith(right: String)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((e.indexOf(right) == 0) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotStartWith(prettifier, e, right)
else
FailureMessages.startedWith(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.startedWith(prettifier, e, right),
FailureMessages.didNotStartWith(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not startWith regex ("Hel*o")
* ^
* </pre>
*
* <p>
* The regular expression passed following the <code>regex</code> token can be either a <code>String</code>
* or a <code>scala.util.matching.Regex</code>.
* </p>
*/
def startWith(resultOfRegexWordApplication: ResultOfRegexWordApplication)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = startWithRegexWithGroups(e, resultOfRegexWordApplication.regex, resultOfRegexWordApplication.groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not endWith ("1.7")
* ^
* </pre>
*/
def endWith(expectedSubstring: String)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((e endsWith expectedSubstring) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotEndWith(prettifier, e, expectedSubstring)
else
FailureMessages.endedWith(prettifier, e, expectedSubstring),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.endedWith(prettifier, e, expectedSubstring),
FailureMessages.didNotEndWith(prettifier, e, expectedSubstring)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not endWith regex ("wor.d")
* ^
* </pre>
*/
def endWith(resultOfRegexWordApplication: ResultOfRegexWordApplication)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = endWithRegexWithGroups(e, resultOfRegexWordApplication.regex, resultOfRegexWordApplication.groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not include regex ("wo.ld")
* ^
* </pre>
*
* <p>
* The regular expression passed following the <code>regex</code> token can be either a <code>String</code>
* or a <code>scala.util.matching.Regex</code>.
* </p>
*/
def include(resultOfRegexWordApplication: ResultOfRegexWordApplication)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = includeRegexWithGroups(e, resultOfRegexWordApplication.regex, resultOfRegexWordApplication.groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not include ("world")
* ^
* </pre>
*/
def include(expectedSubstring: String)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((e.indexOf(expectedSubstring) >= 0) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotIncludeSubstring(prettifier, e, expectedSubstring)
else
FailureMessages.includedSubstring(prettifier, e, expectedSubstring),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.includedSubstring(prettifier, e, expectedSubstring),
FailureMessages.didNotIncludeSubstring(prettifier, e, expectedSubstring)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should not fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*
* <p>
* The regular expression passed following the <code>regex</code> token can be either a <code>String</code>
* or a <code>scala.util.matching.Regex</code>.
* </p>
*/
def fullyMatch(resultOfRegexWordApplication: ResultOfRegexWordApplication)(implicit ev: T <:< String): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = fullyMatchRegexWithGroups(e, resultOfRegexWordApplication.regex, resultOfRegexWordApplication.groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfNotWordForCollectedAny([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfNotWordForCollectedAny(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfContainWordForCollectedAny[T](collected: Collected, xs: scala.collection.GenTraversable[T], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain oneOf (1, 2)
* ^
* </pre>
*/
def oneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit containing: Containing[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.oneOfDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainOneOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain oneElementOf List(1, 2)
* ^
* </pre>
*/
def oneElementOf(elements: GenTraversable[Any])(implicit containing: Containing[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainOneElementOf(prettifier, e, right)
else
FailureMessages.containedOneElementOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedOneElementOf(prettifier, e, right),
FailureMessages.didNotContainOneElementOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain atLeastOneOf (1, 2)
* ^
* </pre>
*/
def atLeastOneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit aggregating: Aggregating[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.atLeastOneOfDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtLeastOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain atLeastOneElementOf List(1, 2)
* ^
* </pre>
*/
def atLeastOneElementOf(elements: GenTraversable[Any])(implicit aggregating: Aggregating[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtLeastOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right)
else
FailureMessages.containedAtLeastOneElementOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtLeastOneElementOf(prettifier, e, right),
FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain noneOf (1, 2)
* ^
* </pre>
*/
def noneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit containing: Containing[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.noneOfDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsNoneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.didNotContainAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.containedAtLeastOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain noElementsOf (1, 2)
* ^
* </pre>
*/
def noElementsOf(elements: GenTraversable[Any])(implicit containing: Containing[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (containing.containsNoneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.containedAtLeastOneElementOf(prettifier, e, right)
else
FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.didNotContainAtLeastOneElementOf(prettifier, e, right),
FailureMessages.containedAtLeastOneElementOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain theSameElementsAs (1, 2)
* ^
* </pre>
*/
def theSameElementsAs(right: GenTraversable[_])(implicit aggregating: Aggregating[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsTheSameElementsAs(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainSameElements(prettifier, e, right)
else
FailureMessages.containedSameElements(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedSameElements(prettifier, e, right),
FailureMessages.didNotContainSameElements(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain theSameElementsInOrderAs (1, 2)
* ^
* </pre>
*/
def theSameElementsInOrderAs(right: GenTraversable[_])(implicit sequencing: Sequencing[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsTheSameElementsInOrderAs(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainSameElementsInOrder(prettifier, e, right)
else
FailureMessages.containedSameElementsInOrder(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedSameElementsInOrder(prettifier, e, right),
FailureMessages.didNotContainSameElementsInOrder(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain only (1, 2)
* ^
* </pre>
*/
def only(right: Any*)(implicit aggregating: Aggregating[T]): Assertion = {
if (right.isEmpty)
throw new NotAllowedException(FailureMessages.onlyEmpty, pos)
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.onlyDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsOnly(e, right) != shouldBeTrue) {
val withFriendlyReminder = right.size == 1 && (right(0).isInstanceOf[scala.collection.GenTraversable[_]] || right(0).isInstanceOf[Every[_]])
indicateFailure(
if (shouldBeTrue)
if (withFriendlyReminder)
FailureMessages.didNotContainOnlyElementsWithFriendlyReminder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.didNotContainOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
if (withFriendlyReminder)
FailureMessages.containedOnlyElementsWithFriendlyReminder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain inOrderOnly (1, 2)
* ^
* </pre>
*/
def inOrderOnly(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit sequencing: Sequencing[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.inOrderOnlyDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsInOrderOnly(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainInOrderOnlyElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain allOf (1, 2)
* ^
* </pre>
*/
def allOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit aggregating: Aggregating[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.allOfDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAllOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAllOfElements(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain allElementsOf (1, 2)
* ^
* </pre>
*/
def allElementsOf(elements: GenTraversable[Any])(implicit aggregating: Aggregating[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAllOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllElementsOf(prettifier, e, right)
else
FailureMessages.containedAllElementsOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllElementsOf(prettifier, e, right),
FailureMessages.didNotContainAllElementsOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain inOrder (1, 2)
* ^
* </pre>
*/
def inOrder(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit sequencing: Sequencing[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.inOrderDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsInOrder(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAllOfElementsInOrder(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* option should contain inOrderElementsOf (1, 2)
* ^
* </pre>
*/
def inOrderElementsOf(elements: GenTraversable[Any])(implicit sequencing: Sequencing[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (sequencing.containsInOrder(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAllElementsOfInOrder(prettifier, e, right)
else
FailureMessages.containedAllElementsOfInOrder(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAllElementsOfInOrder(prettifier, e, right),
FailureMessages.didNotContainAllElementsOfInOrder(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should contain atMostOneOf (1, 2)
* ^
* </pre>
*/
def atMostOneOf(firstEle: Any, secondEle: Any, remainingEles: Any*)(implicit aggregating: Aggregating[T]): Assertion = {
val right = firstEle :: secondEle :: remainingEles.toList
if (right.distinct.size != right.size)
throw new NotAllowedException(FailureMessages.atMostOneOfDuplicate, pos)
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtMostOneOf(e, right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
else
FailureMessages.containedAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", "))),
FailureMessages.didNotContainAtMostOneOf(prettifier, e, UnquotedString(right.map(r => FailureMessages.decorateToStringValue(prettifier, r)).mkString(", ")))
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should contain atMostOneElementOf (1, 2)
* ^
* </pre>
*/
def atMostOneElementOf(elements: GenTraversable[Any])(implicit aggregating: Aggregating[T]): Assertion = {
val right = elements.toList
doCollected(collected, xs, original, prettifier, pos) { e =>
if (aggregating.containsAtMostOneOf(e, right.distinct) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainAtMostOneElementOf(prettifier, e, right)
else
FailureMessages.containedAtMostOneElementOf(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedAtMostOneElementOf(prettifier, e, right),
FailureMessages.didNotContainAtMostOneElementOf(prettifier, e, right)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(colOfMap) should contain key ("one")
* ^
* </pre>
*/
def key(expectedKey: Any)(implicit keyMapping: KeyMapping[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { map =>
if (keyMapping.containsKey(map, expectedKey) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainKey(prettifier, map, expectedKey)
else
FailureMessages.containedKey(prettifier, map, expectedKey),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedKey(prettifier, map, expectedKey),
FailureMessages.didNotContainKey(prettifier, map, expectedKey)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(colOfMap) should contain value (1)
* ^
* </pre>
*/
def value(expectedValue: Any)(implicit valueMapping: ValueMapping[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { map =>
if (valueMapping.containsValue(map, expectedValue) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.didNotContainValue(prettifier, map, expectedValue)
else
FailureMessages.containedValue(prettifier, map, expectedValue),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.containedValue(prettifier, map, expectedValue),
FailureMessages.didNotContainValue(prettifier, map, expectedValue)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfContainWordForCollectedAny([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfContainWordForCollectedAny(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
sealed class ResultOfBeWordForCollectedAny[T](collected: Collected, xs: scala.collection.GenTraversable[T], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
// TODO: Missing should(AMatcher) and should(AnMatcher)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should be theSameInstanceAs anotherObject
* ^
* </pre>
*/
def theSameInstanceAs(right: AnyRef)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((toAnyRef(e) eq right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.wasNotSameInstanceAs(prettifier, e, right)
else
FailureMessages.wasSameInstanceAs(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.wasSameInstanceAs(prettifier, e, right),
FailureMessages.wasNotSameInstanceAs(prettifier, e, right)
)
}
}
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should be a ('file)
* ^
* </pre>
*/
def a(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), symbol, true, true, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
matcherResult.failureMessage(prettifier)
else
matcherResult.negatedFailureMessage(prettifier),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
matcherResult.negatedFailureMessage(prettifier),
matcherResult.failureMessage(prettifier)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should be an ('orange)
* ^
* </pre>
*/
def an(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), symbol, true, false, prettifier, pos)
if (matcherResult.matches != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
matcherResult.failureMessage(prettifier)
else
matcherResult.negatedFailureMessage(prettifier),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
matcherResult.negatedFailureMessage(prettifier),
matcherResult.failureMessage(prettifier)
)
}
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This method enables the following syntax, where <code>badBook</code> is, for example, of type <code>Book</code> and
* <code>goodRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(books) should be a (goodRead)
* ^
* </pre>
*/
def a[U <: T](bePropertyMatcher: BePropertyMatcher[U])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try supporting 2.10 AnyVals
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = bePropertyMatcher(e.asInstanceOf[U])
if (result.matches != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
FailureMessages.wasNotA(prettifier, e, UnquotedString(result.propertyName))
else
FailureMessages.wasA(prettifier, e, UnquotedString(result.propertyName)),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
FailureMessages.wasA(prettifier, e, UnquotedString(result.propertyName)),
FailureMessages.wasNotA(prettifier, e, UnquotedString(result.propertyName))
)
}
}
/**
* This method enables the following syntax, where <code>badBook</code> is, for example, of type <code>Book</code> and
* <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(books) should be an (excellentRead)
* ^
* </pre>
*/
def an[U <: T](beTrueMatcher: BePropertyMatcher[U])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try supporting 2.10 AnyVals
doCollected(collected, xs, original, prettifier, pos) { e =>
val beTrueMatchResult = beTrueMatcher(e.asInstanceOf[U])
if (beTrueMatchResult.matches != shouldBeTrue) {
indicateFailure(
if (shouldBeTrue)
FailureMessages.wasNotAn(prettifier, e, UnquotedString(beTrueMatchResult.propertyName))
else
FailureMessages.wasAn(prettifier, e, UnquotedString(beTrueMatchResult.propertyName)),
None,
pos
)
}
else
indicateSuccess(
shouldBeTrue,
FailureMessages.wasAn(prettifier, e, UnquotedString(beTrueMatchResult.propertyName)),
FailureMessages.wasNotAn(prettifier, e, UnquotedString(beTrueMatchResult.propertyName))
)
}
}
/**
* This method enables the following syntax, where <code>fraction</code> is, for example, of type <code>PartialFunction</code>:
*
* <pre class="stHighlight">
* all(xs) should be definedAt (6)
* ^
* </pre>
*/
def definedAt[U](right: U)(implicit ev: T <:< PartialFunction[U, _]): Assertion = {
doCollected(collected, xs, xs, prettifier, pos) { e =>
if (e.isDefinedAt(right) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.wasNotDefinedAt(prettifier, e, right)
else
FailureMessages.wasDefinedAt(prettifier, e, right),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.wasDefinedAt(prettifier, e, right),
FailureMessages.wasNotDefinedAt(prettifier, e, right)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfBeWordForCollectedAny([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfBeWordForCollectedAny(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
// SKIP-SCALATESTJS,NATIVE-START
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfBeWordForCollectedArray[T](collected: Collected, xs: scala.collection.GenTraversable[Array[T]], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position)
extends ResultOfBeWordForCollectedAny(collected, xs, original, shouldBeTrue, prettifier, pos) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(colOfArray) should be ('empty)
* ^
* </pre>
*/
def apply(right: Symbol): Matcher[Array[T]] =
new Matcher[Array[T]] {
def apply(left: Array[T]): MatchResult = matchSymbolToPredicateMethod(deep(left), right, false, false, prettifier, pos)
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfBeWordForCollectedArray([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfBeWordForCollectedArray(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfCollectedAny[T](collected: Collected, xs: scala.collection.GenTraversable[T], original: Any, prettifier: Prettifier, pos: source.Position) {
// TODO: shouldBe null works, b ut should be (null) does not when type is Any:
/*
scala> val ys = List(null, null, 1)
ys: List[Any] = List(null, null, 1)
scala> all (ys) shouldBe null
<console>:15: error: ambiguous reference to overloaded definition,
both method shouldBe in class ResultOfCollectedAny of type (spread: org.scalactic.Spread[Any])Unit
and method shouldBe in class ResultOfCollectedAny of type (beMatcher: org.scalatest.matchers.BeMatcher[Any])Unit
match argument types (Null)
all (ys) shouldBe null
^
scala> all (ys) should be (null)
org.scalatest.exceptions.TestFailedException: org.scalatest.Matchers$ResultOfCollectedAny@18515783 was not null
at org.scalatest.MatchersHelper$.newTestFailedException(MatchersHelper.scala:163)
at org.scalatest.Matchers$ShouldMethodHelper$.shouldMatcher(Matchers.scala:5529)
at org.scalatest.Matchers$AnyShouldWrapper.should(Matchers.scala:5563)
at .<init>(<console>:15)
at .<clinit>(<console>)
*/
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should be (3)
* ^
* </pre>
*/
def should(rightMatcher: Matcher[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = rightMatcher(e)
result match {
case equalMatchResult: EqualMatchResult =>
if (equalMatchResult.matches)
indicateSuccess(result.negatedFailureMessage(prettifier))
else {
val failureMessage = equalMatchResult.failureMessage(prettifier)
val analysis = equalMatchResult.analysis
indicateFailure(failureMessage, None, pos, analysis)
}
case _ =>
MatchFailed.unapply(result)(prettifier) match {
case Some(failureMessage) => indicateFailure(failureMessage, None, pos)
case None => indicateSuccess(result.negatedFailureMessage(prettifier))
}
}
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) shouldEqual 7
* ^
* </pre>
*/
def shouldEqual(right: Any)(implicit equality: Equality[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!equality.areEqual(e, right)) {
val prettyPair = prettifier(e, right)
indicateFailure(Resources.formatString(Resources.rawDidNotEqual, Array(prettyPair.left, prettyPair.right)), None, pos, prettyPair.analysis)
}
else indicateSuccess(FailureMessages.equaled(prettifier, e, right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldEqual 7.1 +- 0.2
* ^doCollected
* </pre>
*/
def shouldEqual(spread: Spread[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!spread.isWithin(e)) {
indicateFailure(FailureMessages.didNotEqualPlusOrMinus(prettifier, e, spread.pivot, spread.tolerance), None, pos)
}
else indicateSuccess(FailureMessages.equaledPlusOrMinus(prettifier, e, spread.pivot, spread.tolerance))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe sorted
* ^
* </pre>
*/
def shouldBe(sortedWord: SortedWord)(implicit sortable: Sortable[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!sortable.isSorted(e))
indicateFailure(FailureMessages.wasNotSorted(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasSorted(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe readable
* ^
* </pre>
*/
def shouldBe(readableWord: ReadableWord)(implicit readability: Readability[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!readability.isReadable(e))
indicateFailure(FailureMessages.wasNotReadable(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasReadable(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe writable
* ^
* </pre>
*/
def shouldBe(writableWord: WritableWord)(implicit writability: Writability[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!writability.isWritable(e))
indicateFailure(FailureMessages.wasNotWritable(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasWritable(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe empty
* ^
* </pre>
*/
def shouldBe(emptyWord: EmptyWord)(implicit emptiness: Emptiness[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!emptiness.isEmpty(e))
indicateFailure(FailureMessages.wasNotEmpty(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasEmpty(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe defined
* ^
* </pre>
*/
def shouldBe(definedWord: DefinedWord)(implicit definition: Definition[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!definition.isDefined(e))
indicateFailure(FailureMessages.wasNotDefined(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasDefined(prettifier, e))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe a [Type]
* ^
* </pre>
*/
def shouldBe(aType: ResultOfATypeInvocation[_]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!aType.clazz.isAssignableFrom(e.getClass))
indicateFailure(FailureMessages.wasNotAnInstanceOf(prettifier, e, UnquotedString(aType.clazz.getName), UnquotedString(e.getClass.getName)), None, pos)
else indicateSuccess(FailureMessages.wasAnInstanceOf(prettifier, e, UnquotedString(aType.clazz.getName)))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe an [Type]
* ^
* </pre>
*/
def shouldBe(anType: ResultOfAnTypeInvocation[_]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!anType.clazz.isAssignableFrom(e.getClass))
indicateFailure(FailureMessages.wasNotAnInstanceOf(prettifier, e, UnquotedString(anType.clazz.getName), UnquotedString(e.getClass.getName)), None, pos)
else indicateSuccess(FailureMessages.wasAnInstanceOf(prettifier, e, UnquotedString(anType.clazz.getName)))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldEqual null
* ^
* </pre>
*/
def shouldEqual(right: Null)(implicit ev: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (e != null) {
indicateFailure(FailureMessages.didNotEqualNull(prettifier, e), None, pos)
}
else indicateSuccess(FailureMessages.equaledNull)
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should equal (3)
* ^
* </pre>
*/
def should[TYPECLASS1[_]](rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
val rightMatcher = rightMatcherFactory1.matcher
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = rightMatcher(e)
result match {
case equalMatchResult: EqualMatchResult =>
if (equalMatchResult.matches)
indicateSuccess(result.negatedFailureMessage(prettifier))
else {
val failureMessage = equalMatchResult.failureMessage(prettifier)
val analysis = equalMatchResult.analysis
indicateFailure(failureMessage, None, pos, analysis)
}
case _ =>
MatchFailed.unapply(result)(prettifier) match {
case Some(failureMessage) => indicateFailure(failureMessage, None, pos)
case None => indicateSuccess(result.negatedFailureMessage(prettifier))
}
}
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should (equal (expected) and have length 12)
* ^
* </pre>
*/
def should[TYPECLASS1[_], TYPECLASS2[_]](rightMatcherFactory2: MatcherFactory2[T, TYPECLASS1, TYPECLASS2])(implicit typeClass1: TYPECLASS1[T], typeClass2: TYPECLASS2[T]): Assertion = {
val rightMatcher = rightMatcherFactory2.matcher
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = rightMatcher(e)
result match {
case equalMatchResult: EqualMatchResult =>
if (equalMatchResult.matches)
indicateSuccess(result.negatedFailureMessage(prettifier))
else {
val failureMessage = equalMatchResult.failureMessage(prettifier)
val analysis = equalMatchResult.analysis
indicateFailure(failureMessage, None, pos, analysis)
}
case _ =>
MatchFailed.unapply(result)(prettifier) match {
case Some(failureMessage) => indicateFailure(failureMessage, None, pos)
case None => indicateSuccess(result.negatedFailureMessage(prettifier))
}
}
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should be theSameInstanceAs anotherObject
* ^
* </pre>
*/
def should(beWord: BeWord): ResultOfBeWordForCollectedAny[T] =
new ResultOfBeWordForCollectedAny[T](collected, xs, original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should not equal (3)
* ^
* </pre>
*/
def should(notWord: NotWord): ResultOfNotWordForCollectedAny[T] =
new ResultOfNotWordForCollectedAny(collected, xs, original, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (results) should have length (3)
* ^
* all (results) should have size (3)
* ^
* </pre>
*/
def should(haveWord: HaveWord): ResultOfHaveWordForCollectedExtent[T] =
new ResultOfHaveWordForCollectedExtent(collected, xs, original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) shouldBe 7
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: Any): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY def shouldBe[R](right: R)(implicit caneq: scala.CanEqual[T, R]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (e != right) {
val (eee, rightee) = Suite.getObjectsForFailureMessage(e, right)
indicateFailure(FailureMessages.wasNot(prettifier, eee, rightee), None, pos)
}
else indicateSuccess(FailureMessages.was(prettifier, e, right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(4, 5, 6) shouldBe < (7)
* ^
* </pre>
*/
def shouldBe(comparison: ResultOfLessThanComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!comparison(e)) {
indicateFailure(
FailureMessages.wasNotLessThan(prettifier,
e,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasLessThan(prettifier, e, comparison.right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(4, 5, 6) shouldBe <= (7)
* ^
* </pre>
*/
def shouldBe(comparison: ResultOfLessThanOrEqualToComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!comparison(e)) {
indicateFailure(
FailureMessages.wasNotLessThanOrEqualTo(prettifier,
e,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasLessThanOrEqualTo(prettifier, e, comparison.right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(8, 9, 10) shouldBe > (7)
* ^
* </pre>
*/
def shouldBe(comparison: ResultOfGreaterThanComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!comparison(e)) {
indicateFailure(
FailureMessages.wasNotGreaterThan(prettifier,
e,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasGreaterThan(prettifier, e, comparison.right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(8, 9, 10) shouldBe >= (7)
* ^
* </pre>
*/
def shouldBe(comparison: ResultOfGreaterThanOrEqualToComparison[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!comparison(e)) {
indicateFailure(
FailureMessages.wasNotGreaterThanOrEqualTo(prettifier,
e,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasGreaterThanOrEqualTo(prettifier, e, comparison.right))
}
}
/**
* This method enables the following syntax, where <code>odd</code> refers to a <code>BeMatcher[Int]</code>:
*
* <pre class="stHighlight">testing
* all(xs) shouldBe odd
* ^
* </pre>
*/
def shouldBe(beMatcher: BeMatcher[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = beMatcher.apply(e)
if (!result.matches)
indicateFailure(result.failureMessage(prettifier), None, pos)
else indicateSuccess(result.negatedFailureMessage(prettifier))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) shouldBe 7.1 +- 0.2
* ^
* </pre>
*/
def shouldBe(spread: Spread[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!spread.isWithin(e))
indicateFailure(FailureMessages.wasNotPlusOrMinus(prettifier, e, spread.pivot, spread.tolerance), None, pos)
else indicateSuccess(FailureMessages.wasPlusOrMinus(prettifier, e, spread.pivot, spread.tolerance))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) shouldBe theSameInstanceAs (anotherObject)
* ^
* </pre>
*/
def shouldBe(resultOfSameInstanceAsApplication: ResultOfTheSameInstanceAsApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (toAnyRef(e) ne resultOfSameInstanceAsApplication.right)
indicateFailure(
FailureMessages.wasNotSameInstanceAs(prettifier,
e,
resultOfSameInstanceAsApplication.right
),
None,
pos
)
else indicateSuccess(FailureMessages.wasSameInstanceAs(prettifier, e, resultOfSameInstanceAsApplication.right))
}
}
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe 'empty
* ^
* </pre>
*/
def shouldBe(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), symbol, false, true, prettifier, pos)
if (!matcherResult.matches)
indicateFailure(matcherResult.failureMessage(prettifier), None, pos)
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe a ('empty)
* ^
* </pre>
*/
def shouldBe(resultOfAWordApplication: ResultOfAWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), resultOfAWordApplication.symbol, true, true, prettifier, pos)
if (!matcherResult.matches) {
indicateFailure(matcherResult.failureMessage(prettifier), None, pos)
}
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe an ('empty)
* ^
* </pre>
*/
def shouldBe(resultOfAnWordApplication: ResultOfAnWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(e), resultOfAnWordApplication.symbol, true, false, prettifier, pos)
if (!matcherResult.matches) {
indicateFailure(matcherResult.failureMessage(prettifier), None, pos)
}
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldBe null
* ^
* </pre>
*/
def shouldBe(o: Null)(implicit ev: T <:< AnyRef): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (e != null)
indicateFailure(FailureMessages.wasNotNull(prettifier, e), None, pos)
else indicateSuccess(FailureMessages.wasNull)
}
}
/**
* This method enables the following syntax, where <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(xs) shouldBe excellentRead
* ^
* </pre>
*/
def shouldBe[U <: T](bePropertyMatcher: BePropertyMatcher[U])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try supporting this with 2.10 AnyVals
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = bePropertyMatcher(e.asInstanceOf[U])
if (!result.matches)
indicateFailure(FailureMessages.wasNot(prettifier, e, UnquotedString(result.propertyName)), None, pos)
else indicateSuccess(FailureMessages.was(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables the following syntax, where <code>goodRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(xs) shouldBe a (goodRead)
* ^
* </pre>
*/
def shouldBe[U <: T](resultOfAWordApplication: ResultOfAWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {// TODO: Try supporting this with 2.10 AnyVals
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = resultOfAWordApplication.bePropertyMatcher(e.asInstanceOf[U])
if (!result.matches)
indicateFailure(FailureMessages.wasNotA(prettifier, e, UnquotedString(result.propertyName)), None, pos)
else indicateSuccess(FailureMessages.was(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables the following syntax, where <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* all(xs) shouldBe an (excellentRead)
* ^
* </pre>
*/
def shouldBe[U <: T](resultOfAnWordApplication: ResultOfAnWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {// TODO: Try supporting this with 2.10 AnyVals
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = resultOfAnWordApplication.bePropertyMatcher(e.asInstanceOf[U])
if (!result.matches)
indicateFailure(FailureMessages.wasNotAn(prettifier, e, UnquotedString(result.propertyName)), None, pos)
else indicateSuccess(FailureMessages.wasAn(prettifier, e, UnquotedString(result.propertyName)))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) shouldNot (be (3))
* ^
* </pre>
*/
def shouldNot[U <: T](rightMatcherX1: Matcher[U]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
try {
val result = rightMatcherX1.apply(e.asInstanceOf[U])
if (result.matches)
indicateFailure(result.negatedFailureMessage(prettifier), None, pos)
else indicateSuccess(result.failureMessage(prettifier))
}
catch {
case tfe: TestFailedException =>
indicateFailure(tfe.getMessage, tfe.cause, pos)
}
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) shouldNot (equal (3))
* ^
* </pre>
*/
def shouldNot[TYPECLASS1[_]](rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
val rightMatcher = rightMatcherFactory1.matcher
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = rightMatcher(e)
MatchSucceeded.unapply(result)(prettifier) match {
case Some(negatedFailureMessage) =>
indicateFailure(negatedFailureMessage, None, pos)
case None => indicateSuccess(result.failureMessage(prettifier))
}
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) should === (b)
* ^
* </pre>
*/
def should[U](inv: TripleEqualsInvocation[U])(implicit constraint: T CanEqual U): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((constraint.areEqual(e, inv.right)) != inv.expectingEqual)
indicateFailure(
if (inv.expectingEqual)
FailureMessages.didNotEqual(prettifier, e, inv.right)
else
FailureMessages.equaled(prettifier, e, inv.right),
None,
pos
)
else indicateSuccess(inv.expectingEqual, FailureMessages.equaled(prettifier, e, inv.right), FailureMessages.didNotEqual(prettifier, e, inv.right))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) should === (100 +- 1)
* ^
* </pre>
*/
def should(inv: TripleEqualsInvocationOnSpread[T])(implicit ev: Numeric[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if ((inv.spread.isWithin(e)) != inv.expectingEqual)
indicateFailure(
if (inv.expectingEqual)
FailureMessages.didNotEqualPlusOrMinus(prettifier, e, inv.spread.pivot, inv.spread.tolerance)
else
FailureMessages.equaledPlusOrMinus(prettifier, e, inv.spread.pivot, inv.spread.tolerance),
None,
pos
)
else indicateSuccess(inv.expectingEqual, FailureMessages.equaledPlusOrMinus(prettifier, e, inv.spread.pivot, inv.spread.tolerance), FailureMessages.didNotEqualPlusOrMinus(prettifier, e, inv.spread.pivot, inv.spread.tolerance))
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) shouldNot be theSameInstanceAs anotherInstance
* ^
* </pre>
*/
def shouldNot(beWord: BeWord): ResultOfBeWordForCollectedAny[T] =
new ResultOfBeWordForCollectedAny[T](collected, xs, original, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) should contain oneOf (1, 2, 3)
* ^
* </pre>
*/
def should(containWord: ContainWord): ResultOfContainWordForCollectedAny[T] = {
new ResultOfContainWordForCollectedAny(collected, xs, original, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all (xs) shouldNot contain (oneOf (1, 2, 3))
* ^
* </pre>
*/
def shouldNot(containWord: ContainWord): ResultOfContainWordForCollectedAny[T] = {
new ResultOfContainWordForCollectedAny(collected, xs, original, false, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should exist
* ^
* </pre>
*/
def should(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (!existence.exists(e))
indicateFailure(
FailureMessages.doesNotExist(prettifier, e),
None,
pos
)
else indicateSuccess(FailureMessages.exists(prettifier, e))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) should not (exist)
* ^
* </pre>
*/
def should(notExist: ResultOfNotExist)(implicit existence: Existence[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (existence.exists(e))
indicateFailure(
FailureMessages.exists(prettifier, e),
None,
pos
)
else indicateSuccess(FailureMessages.doesNotExist(prettifier, e))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(xs) shouldNot exist
* ^
* </pre>
*/
def shouldNot(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
if (existence.exists(e))
indicateFailure(
FailureMessages.exists(prettifier, e),
None,
pos
)
else indicateSuccess(FailureMessages.doesNotExist(prettifier, e))
}
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) should startWith regex ("Hel*o")
* ^
* </pre>
*/
def should(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForCollectedString =
new ResultOfStartWithWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) should endWith regex ("wo.ld")
* ^
* </pre>
*/
def should(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForCollectedString =
new ResultOfEndWithWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) should include regex ("wo.ld")
* ^
* </pre>
*/
def should(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForCollectedString =
new ResultOfIncludeWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) should fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*/
def should(fullyMatchWord: FullyMatchWord)(implicit ev: T <:< String): ResultOfFullyMatchWordForCollectedString =
new ResultOfFullyMatchWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) shouldNot fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*/
def shouldNot(fullyMatchWord: FullyMatchWord)(implicit ev: T <:< String): ResultOfFullyMatchWordForCollectedString =
new ResultOfFullyMatchWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) shouldNot startWith regex ("Hel*o")
* ^
* </pre>
*/
def shouldNot(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForCollectedString =
new ResultOfStartWithWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) shouldNot endWith regex ("wo.ld")
* ^
* </pre>
*/
def shouldNot(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForCollectedString =
new ResultOfEndWithWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* all(string) shouldNot include regex ("wo.ld")
* ^
* </pre>
*/
def shouldNot(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForCollectedString =
new ResultOfIncludeWordForCollectedString(collected, xs.asInstanceOf[GenTraversable[String]], original, false, prettifier, pos)
/**
* Overrides to return pretty toString.
*
* @return "ResultOfCollectedAny([collected], [xs])"
*/
override def toString: String = "ResultOfCollectedAny(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
*/
final class ResultOfHaveWordForCollectedExtent[A](collected: Collected, xs: scala.collection.GenTraversable[A], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should have length (12)
* ^
* </pre>
*/
def length(expectedLength: Long)(implicit len: Length[A]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val eLength = len.lengthOf(e)
if ((eLength == expectedLength) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, e, eLength, expectedLength)
else
FailureMessages.hadLength(prettifier, e, expectedLength),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.hadLength(prettifier, e, expectedLength),
FailureMessages.hadLengthInsteadOfExpectedLength(prettifier, e, eLength, expectedLength)
)
}
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all (xs) should have size (12)
* ^
* </pre>
*/
def size(expectedSize: Long)(implicit sz: Size[A]): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val eSize = sz.sizeOf(e)
if ((eSize == expectedSize) != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, e, eSize, expectedSize)
else
FailureMessages.hadSize(prettifier, e, expectedSize),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
FailureMessages.hadSize(prettifier, e, expectedSize),
FailureMessages.hadSizeInsteadOfExpectedSize(prettifier, e, eSize, expectedSize)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfHaveWordForCollectedExtent([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfHaveWordForCollectedExtent(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfStartWithWordForCollectedString(collected: Collected, xs: scala.collection.GenTraversable[String], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should startWith regex ("Hel*o")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = { checkRegex(rightRegexString.r) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should fullMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = { checkRegex(regexWithGroups.regex, regexWithGroups.groups) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should startWith regex ("Hel*o".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = { checkRegex(rightRegex) }
private def checkRegex(rightRegex: Regex, groups: IndexedSeq[String] = IndexedSeq.empty): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = startWithRegexWithGroups(e, rightRegex, groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfStartWithWordForCollectedString([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfStartWithWordForCollectedString(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfIncludeWordForCollectedString(collected: Collected, xs: scala.collection.GenTraversable[String], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should include regex ("world")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = { checkRegex(rightRegexString.r) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should include regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = { checkRegex(regexWithGroups.regex, regexWithGroups.groups) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should include regex ("wo.ld".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = { checkRegex(rightRegex) }
private def checkRegex(rightRegex: Regex, groups: IndexedSeq[String] = IndexedSeq.empty): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = includeRegexWithGroups(e, rightRegex, groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfIncludeWordForCollectedString([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfIncludeWordForCollectedString(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfEndWithWordForCollectedString(collected: Collected, xs: scala.collection.GenTraversable[String], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should endWith regex ("wor.d")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = { checkRegex(rightRegexString.r) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should endWith regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = { checkRegex(regexWithGroups.regex, regexWithGroups.groups) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should endWith regex ("wor.d".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = { checkRegex(rightRegex) }
private def checkRegex(rightRegex: Regex, groups: IndexedSeq[String] = IndexedSeq.empty): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = endWithRegexWithGroups(e, rightRegex, groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfEndWithWordForCollectedString([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfEndWithWordForCollectedString(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="InspectorsMatchers.html"><code>InspectorsMatchers</code></a> for an overview of
* the matchers DSL.
*
* @author Bill Venners
* @author Chee Seng
*/
final class ResultOfFullyMatchWordForCollectedString(collected: Collected, xs: scala.collection.GenTraversable[String], original: Any, shouldBeTrue: Boolean, prettifier: Prettifier, pos: source.Position) {
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should fullMatch regex ("Hel*o world")
* ^
* </pre>
*/
def regex(rightRegexString: String): Assertion = { checkRegex(rightRegexString.r) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should fullMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
def regex(regexWithGroups: RegexWithGroups): Assertion = { checkRegex(regexWithGroups.regex, regexWithGroups.groups) }
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(string) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def regex(rightRegex: Regex): Assertion = { checkRegex(rightRegex) }
private def checkRegex(rightRegex: Regex, groups: IndexedSeq[String] = IndexedSeq.empty): Assertion = {
doCollected(collected, xs, original, prettifier, pos) { e =>
val result = fullyMatchRegexWithGroups(e, rightRegex, groups)
if (result.matches != shouldBeTrue)
indicateFailure(
if (shouldBeTrue)
result.failureMessage(prettifier)
else
result.negatedFailureMessage(prettifier),
None,
pos
)
else
indicateSuccess(
shouldBeTrue,
result.negatedFailureMessage(prettifier),
result.failureMessage(prettifier)
)
}
}
/**
* Overrides to return pretty toString.
*
* @return "ResultOfFullyMatchWordForCollectedString([collected], [xs], [shouldBeTrue])"
*/
override def toString: String = "ResultOfFullyMatchWordForCollectedString(" + Prettifier.default(collected) + ", " + Prettifier.default(xs) + ", " + Prettifier.default(shouldBeTrue) + ")"
}
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* all(xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def all[E, C[_]](xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(AllCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>scala.collection.GenMap</code>:
*
* <pre class="stHighlight">
* all(map) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def all[K, V, MAP[k, v] <: scala.collection.GenMap[k, v]](xs: MAP[K, V])(implicit collecting: Collecting[(K, V), scala.collection.GenTraversable[(K, V)]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[(K, V)] =
new ResultOfCollectedAny(AllCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* all(jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def all[K, V, JMAP[k, v] <: java.util.Map[k, v]](xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(AllCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* all(str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def all(xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(AllCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* atLeast(1, xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atLeast[E, C[_]](num: Int, xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(AtLeastCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>scala.collection.GenMap</code>:
*
* <pre class="stHighlight">
* atLeast(1, map) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atLeast[K, V, MAP[k, v] <: scala.collection.GenMap[k, v]](num: Int, xs: MAP[K, V])(implicit collecting: Collecting[(K, V), scala.collection.GenTraversable[(K, V)]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[(K, V)] =
new ResultOfCollectedAny(AtLeastCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* atLeast(1, jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atLeast[K, V, JMAP[k, v] <: java.util.Map[k, v]](num: Int, xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(AtLeastCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* atLeast(1, str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atLeast(num: Int, xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(AtLeastCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* every(xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def every[E, C[_]](xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(EveryCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>scala.collection.GenMap</code>:
*
* <pre class="stHighlight">
* every(map) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def every[K, V, MAP[k, v] <: scala.collection.Map[k, v]](xs: MAP[K, V])(implicit collecting: Collecting[(K, V), scala.collection.GenTraversable[(K, V)]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[(K, V)] =
new ResultOfCollectedAny(EveryCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* every(jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def every[K, V, JMAP[k, v] <: java.util.Map[k, v]](xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(EveryCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* every(str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def every(xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(EveryCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* exactly(xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def exactly[E, C[_]](num: Int, xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(ExactlyCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>scala.collection.GenMap</code>:
*
* <pre class="stHighlight">
* exactly(map) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def exactly[K, V, MAP[k, v] <: scala.collection.GenMap[k, v]](num: Int, xs: MAP[K, V])(implicit collecting: Collecting[(K, V), scala.collection.GenTraversable[(K, V)]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[(K, V)] =
new ResultOfCollectedAny(ExactlyCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* exactly(jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def exactly[K, V, JMAP[k, v] <: java.util.Map[k, v]](num: Int, xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(ExactlyCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* exactly(str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def exactly(num: Int, xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(ExactlyCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* no(xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def no[E, C[_]](xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(NoCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* no(jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def no[K, V, JMAP[k, v] <: java.util.Map[k, v]](xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(NoCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* no(str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def no(xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(NoCollected, collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* between(1, 3, xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def between[E, C[_]](from: Int, upTo:Int, xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(BetweenCollected(from, upTo), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* between(1, 3, jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def between[K, V, JMAP[k, v] <: java.util.Map[k, v]](from: Int, upTo:Int, xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(BetweenCollected(from, upTo), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* between(1, 3, str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def between(from: Int, upTo:Int, xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(BetweenCollected(from, upTo), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* atMost(3, xs) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atMost[E, C[_]](num: Int, xs: C[E])(implicit collecting: Collecting[E, C[E]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[E] =
new ResultOfCollectedAny(AtMostCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>scala.collection.GenMap</code>:
*
* <pre class="stHighlight">
* atMost(3, map) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atMost[K, V, MAP[k, v] <: scala.collection.GenMap[k, v]](num: Int, xs: MAP[K, V])(implicit collecting: Collecting[(K, V), scala.collection.GenTraversable[(K, V)]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[(K, V)] =
new ResultOfCollectedAny(AtMostCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>java.util.Map</code>:
*
* <pre class="stHighlight">
* atMost(3, jmap) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atMost[K, V, JMAP[k, v] <: java.util.Map[k, v]](num: Int, xs: JMAP[K, V])(implicit collecting: Collecting[org.scalatest.Entry[K, V], JMAP[K, V]], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[org.scalatest.Entry[K, V]] =
new ResultOfCollectedAny(AtMostCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax for <code>String</code>:
*
* <pre class="stHighlight">
* atMost(3, str) should fullymatch regex ("Hel*o world".r)
* ^
* </pre>
*/
def atMost(num: Int, xs: String)(implicit collecting: Collecting[Char, String], prettifier: Prettifier, pos: source.Position): ResultOfCollectedAny[Char] =
new ResultOfCollectedAny(AtMostCollected(num), collecting.genTraversableFrom(xs), xs, prettifier, pos)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* a [RuntimeException] should be thrownBy { ... }
* ^
* </pre>
*/
def a[T: ClassTag]: ResultOfATypeInvocation[T] =
new ResultOfATypeInvocation(classTag)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* an [Exception] should be thrownBy { ... }
* ^
* </pre>
*/
def an[T : ClassTag]: ResultOfAnTypeInvocation[T] =
new ResultOfAnTypeInvocation(classTag)
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* the [FileNotFoundException] should be thrownBy { ... }
* ^
* </pre>
*/
def the[T : ClassTag](implicit pos: source.Position): ResultOfTheTypeInvocation[T] =
new ResultOfTheTypeInvocation(classTag, pos)
// This is where ShouldMatchers.scala started
// 13 Feb 2019: Current dotty does not seems to like inner object, this is a work around until the problem is fixed.
private class ShouldMethodHelperClass {
def shouldMatcher[T](left: T, rightMatcher: Matcher[T], prettifier: Prettifier, pos: source.Position): Assertion = {
val result = rightMatcher(left)
result match {
case equalMatchResult: EqualMatchResult =>
if (equalMatchResult.matches)
indicateSuccess(result.negatedFailureMessage(prettifier))
else {
val failureMessage = equalMatchResult.failureMessage(prettifier)
val analysis = equalMatchResult.analysis
indicateFailure(failureMessage, None, pos, analysis)
}
case _ =>
MatchFailed.unapply(result)(prettifier) match {
case Some(failureMessage) => indicateFailure(failureMessage, None, pos)
case None => indicateSuccess(result.negatedFailureMessage(prettifier))
}
}
}
def shouldNotMatcher[T](left: T, rightMatcher: Matcher[T], prettifier: Prettifier, pos: source.Position): Assertion = {
val result = rightMatcher(left)
MatchSucceeded.unapply(result)(prettifier) match {
case Some(negatedFailureMessage) => indicateFailure(negatedFailureMessage, None, pos)
case None => indicateSuccess(result.failureMessage(prettifier))
}
}
}
private val ShouldMethodHelper = new ShouldMethodHelperClass
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* <p>
* This class is used in conjunction with an implicit conversion to enable <code>should</code> methods to
* be invoked on objects of type <code>Any</code>.
* </p>
*
* @author Bill Venners
*/
sealed class AnyShouldWrapper[T](val leftSideValue: T, val pos: source.Position, val prettifier: Prettifier) {
//DOTTY-ONLY } // We need an empty AnyShouldWrapper for now.
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should be (3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(rightMatcherX1: Matcher[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(rightMatcherX1: Matcher[T]): Assertion = {
ShouldMethodHelper.shouldMatcher(leftSideValue, rightMatcherX1, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should equal (3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should[TYPECLASS1[_]](rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, TYPECLASS1[_]](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
ShouldMethodHelper.shouldMatcher(leftSideValue, rightMatcherFactory1.matcher, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should (equal (expected) and have length 3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should[TYPECLASS1[_], TYPECLASS2[_]](rightMatcherFactory2: MatcherFactory2[T, TYPECLASS1, TYPECLASS2])(implicit typeClass1: TYPECLASS1[T], typeClass2: TYPECLASS2[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, TYPECLASS1[_], TYPECLASS2[_]](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(rightMatcherFactory2: MatcherFactory2[T, TYPECLASS1, TYPECLASS2])(implicit typeClass1: TYPECLASS1[T], typeClass2: TYPECLASS2[T]): Assertion = {
ShouldMethodHelper.shouldMatcher(leftSideValue, rightMatcherFactory2.matcher, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* a shouldEqual b
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldEqual(right: Any)(implicit equality: Equality[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldEqual(right: Any)(implicit equality: Equality[T]): Assertion = {
if (!equality.areEqual(leftSideValue, right)) {
val prettyPair = prettifier(leftSideValue, right)
indicateFailure(Resources.formatString(Resources.rawDidNotEqual, Array(prettyPair.left, prettyPair.right)), None, pos, prettyPair.analysis)
}
else indicateSuccess(FailureMessages.equaled(prettifier, leftSideValue, right))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldEqual 7.1 +- 0.2
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldEqual(spread: Spread[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldEqual(spread: Spread[T]): Assertion = {
if (!spread.isWithin(leftSideValue)) {
indicateFailure(FailureMessages.didNotEqualPlusOrMinus(prettifier, leftSideValue, spread.pivot, spread.tolerance), None, pos)
}
else indicateSuccess(FailureMessages.equaledPlusOrMinus(prettifier, leftSideValue, spread.pivot, spread.tolerance))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldEqual null
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldEqual(right: Null)(implicit ev: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldEqual(right: Null)(implicit ev: T <:< AnyRef): Assertion = {
if (leftSideValue != null) {
indicateFailure(FailureMessages.didNotEqualNull(prettifier, leftSideValue), None, pos)
}
else indicateSuccess(FailureMessages.equaledNull)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should not equal (3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(notWord: NotWord): ResultOfNotWordForAny[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(notWord: NotWord): ResultOfNotWordForAny[T] =
new ResultOfNotWordForAny[T](leftSideValue, false, prettifier, pos)
// In 2.10, will work with AnyVals. TODO: Also, Need to ensure Char works
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* a should === (b)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should[U](inv: TripleEqualsInvocation[U])(implicit constraint: T CanEqual U): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, U](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(inv: TripleEqualsInvocation[U])(implicit constraint: T CanEqual U): Assertion = {
if ((constraint.areEqual(leftSideValue, inv.right)) != inv.expectingEqual)
indicateFailure(
if (inv.expectingEqual)
FailureMessages.didNotEqual(prettifier, leftSideValue, inv.right)
else
FailureMessages.equaled(prettifier, leftSideValue, inv.right),
None,
pos
)
else
indicateSuccess(
inv.expectingEqual,
FailureMessages.equaled(prettifier, leftSideValue, inv.right),
FailureMessages.didNotEqual(prettifier, leftSideValue, inv.right)
)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should === (100 +- 1)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(inv: TripleEqualsInvocationOnSpread[T])(implicit ev: Numeric[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(inv: TripleEqualsInvocationOnSpread[T])(implicit ev: Numeric[T]): Assertion = {
if ((inv.spread.isWithin(leftSideValue)) != inv.expectingEqual)
indicateFailure(
if (inv.expectingEqual)
FailureMessages.didNotEqualPlusOrMinus(prettifier, leftSideValue, inv.spread.pivot, inv.spread.tolerance)
else
FailureMessages.equaledPlusOrMinus(prettifier, leftSideValue, inv.spread.pivot, inv.spread.tolerance),
None,
pos
)
else
indicateSuccess(
inv.expectingEqual,
FailureMessages.equaledPlusOrMinus(prettifier, leftSideValue, inv.spread.pivot, inv.spread.tolerance),
FailureMessages.didNotEqualPlusOrMinus(prettifier, leftSideValue, inv.spread.pivot, inv.spread.tolerance)
)
}
// TODO: Need to make sure this works in inspector shorthands. I moved this
// up here from NumericShouldWrapper.
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should be a aMatcher
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(beWord: BeWord): ResultOfBeWordForAny[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(beWord: BeWord): ResultOfBeWordForAny[T] =
new ResultOfBeWordForAny(leftSideValue, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* aDouble shouldBe 8.8
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: Any): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, R](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: R)(implicit caneq: scala.CanEqual[T, R]): Assertion = {
if (!areEqualComparingArraysStructurally(leftSideValue, right)) {
val (leftee, rightee) = Suite.getObjectsForFailureMessage(leftSideValue, right)
val localPrettifier = prettifier // Grabbing a local copy so we don't attempt to serialize AnyShouldWrapper (since first param to indicateFailure is a by-name)
indicateFailure(FailureMessages.wasNotEqualTo(localPrettifier, leftee, rightee), None, pos)
}
else indicateSuccess(FailureMessages.wasEqualTo(prettifier, leftSideValue, right))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* 5 shouldBe < (7)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(comparison: ResultOfLessThanComparison[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(comparison: ResultOfLessThanComparison[T]): Assertion = {
if (!comparison(leftSideValue)) {
indicateFailure(
FailureMessages.wasNotLessThan(prettifier,
leftSideValue,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasLessThan(prettifier, leftSideValue, comparison.right))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* 8 shouldBe > (7)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(comparison: ResultOfGreaterThanComparison[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(comparison: ResultOfGreaterThanComparison[T]): Assertion = {
if (!comparison(leftSideValue)) {
indicateFailure(
FailureMessages.wasNotGreaterThan(prettifier,
leftSideValue,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasGreaterThan(prettifier, leftSideValue, comparison.right))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* 5 shouldBe <= (7)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(comparison: ResultOfLessThanOrEqualToComparison[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(comparison: ResultOfLessThanOrEqualToComparison[T]): Assertion = {
if (!comparison(leftSideValue)) {
indicateFailure(
FailureMessages.wasNotLessThanOrEqualTo(prettifier,
leftSideValue,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasLessThanOrEqualTo(prettifier, leftSideValue, comparison.right))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* 8 shouldBe >= (7)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(comparison: ResultOfGreaterThanOrEqualToComparison[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(comparison: ResultOfGreaterThanOrEqualToComparison[T]): Assertion = {
if (!comparison(leftSideValue)) {
indicateFailure(
FailureMessages.wasNotGreaterThanOrEqualTo(prettifier,
leftSideValue,
comparison.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasGreaterThanOrEqualTo(prettifier, leftSideValue, comparison.right))
}
/**
* This method enables the following syntax, where <code>odd</code> refers to a <code>BeMatcher[Int]</code>:
*
* <pre class="stHighlight">testing
* 1 shouldBe odd
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(beMatcher: BeMatcher[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(beMatcher: BeMatcher[T]): Assertion = {
val result = beMatcher.apply(leftSideValue)
if (!result.matches)
indicateFailure(result.failureMessage(prettifier), None, pos)
else indicateSuccess(result.negatedFailureMessage(prettifier))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe 7.1 +- 0.2
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(spread: Spread[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(spread: Spread[T]): Assertion = {
if (!spread.isWithin(leftSideValue)) {
indicateFailure(FailureMessages.wasNotPlusOrMinus(prettifier, leftSideValue, spread.pivot, spread.tolerance), None, pos)
}
else indicateSuccess(FailureMessages.wasPlusOrMinus(prettifier, leftSideValue, spread.pivot, spread.tolerance))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe sorted
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: SortedWord)(implicit sortable: Sortable[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: SortedWord)(implicit sortable: Sortable[T]): Assertion = {
if (!sortable.isSorted(leftSideValue))
indicateFailure(FailureMessages.wasNotSorted(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.wasSorted(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* aDouble shouldBe a [Book]
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(aType: ResultOfATypeInvocation[_]): Assertion = macro TypeMatcherMacro.shouldBeATypeImpl
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) inline def shouldBe(aType: ResultOfATypeInvocation[_]): Assertion = ${ org.scalatest.matchers.should.TypeMatcherMacro.shouldBeATypeImpl('{leftSideValue}, '{aType}, '{pos}, '{prettifier}) }
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* aDouble shouldBe an [Book]
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(anType: ResultOfAnTypeInvocation[_]): Assertion = macro TypeMatcherMacro.shouldBeAnTypeImpl
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) inline def shouldBe(anType: ResultOfAnTypeInvocation[_]): Assertion = ${ org.scalatest.matchers.should.TypeMatcherMacro.shouldBeAnTypeImpl('{leftSideValue}, '{anType}, '{pos}, '{prettifier}) }
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe readable
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: ReadableWord)(implicit readability: Readability[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: ReadableWord)(implicit readability: Readability[T]): Assertion = {
if (!readability.isReadable(leftSideValue))
indicateFailure(FailureMessages.wasNotReadable(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.wasReadable(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe writable
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: WritableWord)(implicit writability: Writability[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: WritableWord)(implicit writability: Writability[T]): Assertion = {
if (!writability.isWritable(leftSideValue))
indicateFailure(FailureMessages.wasNotWritable(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.wasWritable(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe empty
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: EmptyWord)(implicit emptiness: Emptiness[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: EmptyWord)(implicit emptiness: Emptiness[T]): Assertion = {
if (!emptiness.isEmpty(leftSideValue))
indicateFailure(FailureMessages.wasNotEmpty(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.wasEmpty(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe defined
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: DefinedWord)(implicit definition: Definition[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: DefinedWord)(implicit definition: Definition[T]): Assertion = {
if (!definition.isDefined(leftSideValue))
indicateFailure(FailureMessages.wasNotDefined(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.wasDefined(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldNot be (3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(beWord: BeWord): ResultOfBeWordForAny[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(beWord: BeWord): ResultOfBeWordForAny[T] =
new ResultOfBeWordForAny(leftSideValue, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldNot (be (3))
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(rightMatcherX1: Matcher[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(rightMatcherX1: Matcher[T]): Assertion = {
ShouldMethodHelper.shouldNotMatcher(leftSideValue, rightMatcherX1, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldNot (be readable)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot[TYPECLASS1[_]](rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, TYPECLASS1[_]](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(rightMatcherFactory1: MatcherFactory1[T, TYPECLASS1])(implicit typeClass1: TYPECLASS1[T]): Assertion = {
ShouldMethodHelper.shouldNotMatcher(leftSideValue, rightMatcherFactory1.matcher, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldNot have length (3)
* ^
* result shouldNot have size (3)
* ^
* exception shouldNot have message ("file not found")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(haveWord: HaveWord): ResultOfHaveWordForExtent[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(haveWord: HaveWord): ResultOfHaveWordForExtent[T] =
new ResultOfHaveWordForExtent(leftSideValue, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result should have length (3)
* ^
* result should have size (3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(haveWord: HaveWord): ResultOfHaveWordForExtent[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(haveWord: HaveWord): ResultOfHaveWordForExtent[T] =
new ResultOfHaveWordForExtent(leftSideValue, true, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe null
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(right: Null)(implicit ev: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(right: Null)(implicit ev: T <:< AnyRef): Assertion = {
if (leftSideValue != null) {
indicateFailure(FailureMessages.wasNotNull(prettifier, leftSideValue), None, pos)
}
else indicateSuccess(FailureMessages.wasNull)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* result shouldBe theSameInstanceAs (anotherObject)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(resultOfSameInstanceAsApplication: ResultOfTheSameInstanceAsApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(resultOfSameInstanceAsApplication: ResultOfTheSameInstanceAsApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
if (resultOfSameInstanceAsApplication.right ne toAnyRef(leftSideValue)) {
indicateFailure(
FailureMessages.wasNotSameInstanceAs(prettifier,
leftSideValue,
resultOfSameInstanceAsApplication.right
),
None,
pos
)
}
else indicateSuccess(FailureMessages.wasSameInstanceAs(prettifier, leftSideValue, resultOfSameInstanceAsApplication.right))
}
// SKIP-SCALATESTJS,NATIVE-START
// TODO: Remember to write tests for inspector shorthands uncovering the bug below, always a empty because always true true passed to matchSym
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* list shouldBe 'empty
* ^
* </pre>
*/
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
// SKIP-DOTTY-START
def shouldBe(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(symbol: Symbol)(implicit toAnyRef: T <:< AnyRef): Assertion = {
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(leftSideValue), symbol, false, true, prettifier, pos)
if (!matcherResult.matches)
indicateFailure(matcherResult.failureMessage(prettifier), None, pos)
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* list shouldBe a ('empty)
* ^
* </pre>
*/
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
// SKIP-DOTTY-START
def shouldBe(resultOfAWordApplication: ResultOfAWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(resultOfAWordApplication: ResultOfAWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(leftSideValue), resultOfAWordApplication.symbol, true, true, prettifier, pos)
if (!matcherResult.matches) {
indicateFailure(
matcherResult.failureMessage(prettifier),
None,
pos
)
}
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
/**
* This method enables the following syntax:
*
* <pre class="stHighlight">
* list shouldBe an ('empty)
* ^
* </pre>
*/
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
// SKIP-DOTTY-START
def shouldBe(resultOfAnWordApplication: ResultOfAnWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
// SKIP-DOTTY-END
// SKIP-SCALATESTJS,NATIVE-END
// SKIP-SCALATESTJS,NATIVE-START
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(resultOfAnWordApplication: ResultOfAnWordToSymbolApplication)(implicit toAnyRef: T <:< AnyRef): Assertion = {
val matcherResult = matchSymbolToPredicateMethod(toAnyRef(leftSideValue), resultOfAnWordApplication.symbol, true, false, prettifier, pos)
if (!matcherResult.matches) {
indicateFailure(
matcherResult.failureMessage(prettifier),
None,
pos
)
}
else indicateSuccess(matcherResult.negatedFailureMessage(prettifier))
}
// SKIP-SCALATESTJS,NATIVE-END
/**
* This method enables the following syntax, where <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* programmingInScala shouldBe excellentRead
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe(bePropertyMatcher: BePropertyMatcher[T])(implicit ev: T <:< AnyRef): Assertion = { // TODO: Try expanding this to 2.10 AnyVal
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(bePropertyMatcher: BePropertyMatcher[T])(implicit ev: T <:< AnyRef): Assertion = {
val result = bePropertyMatcher(leftSideValue)
if (!result.matches)
indicateFailure(FailureMessages.wasNot(prettifier, leftSideValue, UnquotedString(result.propertyName)), None, pos)
else indicateSuccess(FailureMessages.was(prettifier, leftSideValue, UnquotedString(result.propertyName)))
}
/**
* This method enables the following syntax, where <code>goodRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* programmingInScala shouldBe a (goodRead)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe[U >: T](resultOfAWordApplication: ResultOfAWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {// TODO: Try expanding this to 2.10 AnyVal
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, U >: T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(resultOfAWordApplication: ResultOfAWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {
val result = resultOfAWordApplication.bePropertyMatcher(leftSideValue)
if (!result.matches) {
indicateFailure(FailureMessages.wasNotA(prettifier, leftSideValue, UnquotedString(result.propertyName)), None, pos)
}
else indicateSuccess(FailureMessages.wasA(prettifier, leftSideValue, UnquotedString(result.propertyName)))
}
/**
* This method enables the following syntax, where <code>excellentRead</code> refers to a <code>BePropertyMatcher[Book]</code>:
*
* <pre class="stHighlight">
* programmingInScala shouldBe an (excellentRead)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldBe[U >: T](resultOfAnWordApplication: ResultOfAnWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {// TODO: Try expanding this to 2.10 AnyVal
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T, U >: T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldBe(resultOfAnWordApplication: ResultOfAnWordToBePropertyMatcherApplication[U])(implicit ev: T <:< AnyRef): Assertion = {
val result = resultOfAnWordApplication.bePropertyMatcher(leftSideValue)
if (!result.matches) {
indicateFailure(FailureMessages.wasNotAn(prettifier, leftSideValue, UnquotedString(result.propertyName)), None, pos)
}
else indicateSuccess(FailureMessages.wasAn(prettifier, leftSideValue, UnquotedString(result.propertyName)))
}
/*
def shouldBe[U](right: AType[U]) {
if (!right.isAssignableFromClassOf(leftSideValue)) {
throw newTestFailedException(FailureMessages.wasNotAnInstanceOf(prettifier, leftSideValue, UnquotedString(right.className), UnquotedString(leftSideValue.getClass.getName)))
}
}
*/
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* xs should contain oneOf (1, 2, 3)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(containWord: ContainWord): ResultOfContainWord[T] = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(containWord: ContainWord): ResultOfContainWord[T] = {
new ResultOfContainWord(leftSideValue, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* xs shouldNot contain (oneOf (1, 2, 3))
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(contain: ContainWord): ResultOfContainWord[T] =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(contain: ContainWord): ResultOfContainWord[T] =
new ResultOfContainWord(leftSideValue, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* file should exist
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
if (!existence.exists(leftSideValue))
indicateFailure(FailureMessages.doesNotExist(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.exists(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* file should not (exist)
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(notExist: ResultOfNotExist)(implicit existence: Existence[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(notExist: ResultOfNotExist)(implicit existence: Existence[T]): Assertion = {
if (existence.exists(leftSideValue))
indicateFailure(FailureMessages.exists(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.doesNotExist(prettifier, leftSideValue))
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* file shouldNot exist
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(existWord: ExistWord)(implicit existence: Existence[T]): Assertion = {
if (existence.exists(leftSideValue))
indicateFailure(FailureMessages.exists(prettifier, leftSideValue), None, pos)
else indicateSuccess(FailureMessages.doesNotExist(prettifier, leftSideValue))
}
// From StringShouldWrapper
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should include regex ("hi")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForString = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForString = {
new ResultOfIncludeWordForString(leftSideValue, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should startWith regex ("hello")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForString = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForString = {
new ResultOfStartWithWordForString(leftSideValue, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should endWith regex ("world")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForString = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def should(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForString = {
new ResultOfEndWithWordForString(leftSideValue, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot startWith regex ("hello")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForString =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(startWithWord: StartWithWord)(implicit ev: T <:< String): ResultOfStartWithWordForString =
new ResultOfStartWithWordForString(leftSideValue, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot endWith regex ("world")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForString =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(endWithWord: EndWithWord)(implicit ev: T <:< String): ResultOfEndWithWordForString =
new ResultOfEndWithWordForString(leftSideValue, false, prettifier, pos)
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot include regex ("hi")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForString =
// SKIP-DOTTY-END
//DOTTY-ONLY extension [T](leftSideValue: T)(using pos: source.Position, prettifier: Prettifier) def shouldNot(includeWord: IncludeWord)(implicit ev: T <:< String): ResultOfIncludeWordForString =
new ResultOfIncludeWordForString(leftSideValue, false, prettifier, pos)
// SKIP-DOTTY-START
}
// SKIP-DOTTY-END
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* <p>
* This class is used in conjunction with an implicit conversion to enable <code>should</code> methods to
* be invoked on <code>String</code>s.
* </p>
*
* @author Bill Venners
*/
// SKIP-DOTTY-START
final class StringShouldWrapper(val leftSideString: String, pos: source.Position, prettifier: Prettifier) extends AnyShouldWrapper(leftSideString, pos, prettifier) with StringShouldWrapperForVerb {
// SKIP-DOTTY-END
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should fullyMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def withGroup(group: String): RegexWithGroups =
// SKIP-DOTTY-END
//DOTTY-ONLY extension (leftSideString: String)(using pos: source.Position, prettifier: Prettifier) def withGroup(group: String): RegexWithGroups =
new RegexWithGroups(leftSideString.r, IndexedSeq(group))
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should fullyMatch regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* ^
* </pre>
*/
// SKIP-DOTTY-START
def withGroups(groups: String*): RegexWithGroups =
// SKIP-DOTTY-END
//DOTTY-ONLY extension (leftSideString: String)(using pos: source.Position, prettifier: Prettifier) def withGroups(groups: String*): RegexWithGroups =
new RegexWithGroups(leftSideString.r, IndexedSeq(groups: _*))
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(fullyMatchWord: FullyMatchWord): ResultOfFullyMatchWordForString = {
// SKIP-DOTTY-END
//DOTTY-ONLY extension (leftSideString: String)(using pos: source.Position, prettifier: Prettifier) def should(fullyMatchWord: FullyMatchWord): ResultOfFullyMatchWordForString = {
new ResultOfFullyMatchWordForString(leftSideString, true, prettifier, pos)
}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot fullyMatch regex ("""(-)?(\\d+)(\\.\\d*)?""")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(fullyMatchWord: FullyMatchWord): ResultOfFullyMatchWordForString =
// SKIP-DOTTY-END
//DOTTY-ONLY extension (leftSideString: String)(using pos: source.Position, prettifier: Prettifier) def shouldNot(fullyMatchWord: FullyMatchWord): ResultOfFullyMatchWordForString =
new ResultOfFullyMatchWordForString(leftSideString, false, prettifier, pos)
//DOTTY-ONLY import scala.compiletime.testing.{typeChecks,typeCheckErrors}
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string should compile
* ^
* </pre>
*/
// SKIP-DOTTY-START
def should(compileWord: CompileWord)(implicit pos: source.Position): Assertion = macro CompileMacro.shouldCompileImpl
// SKIP-DOTTY-END
//DOTTY-ONLY extension (inline leftSideString: String)(using pos: source.Position, prettifier: Prettifier) transparent inline def should(compileWord: CompileWord): Assertion = ${ org.scalatest.matchers.should.CompileMacro.shouldCompileImpl('{leftSideString}, '{typeChecks(leftSideString)}, '{compileWord})('{pos}) }
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot compile
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(compileWord: CompileWord)(implicit pos: source.Position): Assertion = macro CompileMacro.shouldNotCompileImpl
// SKIP-DOTTY-END
//DOTTY-ONLY extension (inline leftSideString: String)(using pos: source.Position, prettifier: Prettifier) transparent inline def shouldNot(compileWord: CompileWord): Assertion = ${ org.scalatest.matchers.should.CompileMacro.shouldNotCompileImpl('{leftSideString}, '{typeChecks(leftSideString)}, '{compileWord})('{pos}) }
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* string shouldNot typeCheck
* ^
* </pre>
*/
// SKIP-DOTTY-START
def shouldNot(typeCheckWord: TypeCheckWord)(implicit pos: source.Position): Assertion = macro CompileMacro.shouldNotTypeCheckImpl
// SKIP-DOTTY-END
//DOTTY-ONLY extension (inline leftSideString: String)(using pos: source.Position, prettifier: Prettifier) transparent inline def shouldNot(typeCheckWord: TypeCheckWord): Assertion = ${ org.scalatest.matchers.should.CompileMacro.shouldNotTypeCheckImpl('{leftSideString}, '{typeCheckErrors(leftSideString)}, '{typeCheckWord})('{pos}) }
// SKIP-DOTTY-START
}
// SKIP-DOTTY-END
// SKIP-DOTTY-START
/**
* This class is part of the ScalaTest matchers DSL. Please see the documentation for <a href="Matchers.html"><code>Matchers</code></a> for an overview of
* the matchers DSL.
*
* <p>
* This class is used in conjunction with an implicit conversion to enable <code>withGroup</code> and <code>withGroups</code> methods to
* be invoked on <code>Regex</code>s.
* </p>
*
* @author Bill Venners
*/
final class RegexWrapper(regex: Regex) {
// SKIP-DOTTY-END
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* regex should fullyMatch regex ("a(b*)c" withGroup "bb")
* ^
* </pre>
*/
// SKIP-DOTTY-START
def withGroup(group: String): RegexWithGroups =
// SKIP-DOTTY-END
//DOTTY-ONLY extension (regex: Regex) def withGroup(group: String): RegexWithGroups =
new RegexWithGroups(regex, IndexedSeq(group))
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* regex should fullyMatch regex ("a(b*)(c*)" withGroups ("bb", "cc"))
* ^
* </pre>
*/
// SKIP-DOTTY-START
def withGroups(groups: String*): RegexWithGroups =
// SKIP-DOTTY-END
//DOTTY-ONLY extension (regex: Regex) def withGroups(groups: String*): RegexWithGroups =
new RegexWithGroups(regex, IndexedSeq(groups: _*))
// SKIP-DOTTY-START
}
// SKIP-DOTTY-END
// SKIP-DOTTY-START
/**
* Implicitly converts an object of type <code>T</code> to a <code>AnyShouldWrapper[T]</code>,
* to enable <code>should</code> methods to be invokable on that object.
*/
implicit def convertToAnyShouldWrapper[T](o: T)(implicit pos: source.Position, prettifier: Prettifier): AnyShouldWrapper[T] = new AnyShouldWrapper(o, pos, prettifier)
/**
* Implicitly converts an object of type <code>java.lang.String</code> to a <code>StringShouldWrapper</code>,
* to enable <code>should</code> methods to be invokable on that object.
*/
implicit def convertToStringShouldWrapper(o: String)(implicit pos: source.Position, prettifier: Prettifier): StringShouldWrapper = new StringShouldWrapper(o, pos, prettifier)
/**
* Implicitly converts an object of type <code>scala.util.matching.Regex</code> to a <code>RegexWrapper</code>,
* to enable <code>withGroup</code> and <code>withGroups</code> methods to be invokable on that object.
*/
implicit def convertToRegexWrapper(o: Regex): RegexWrapper = new RegexWrapper(o)
// SKIP-DOTTY-END
/**
* This method enables syntax such as the following:
*
* <pre class="stHighlight">
* book should have (message ("A TALE OF TWO CITIES") (of [Book]), title ("A Tale of Two Cities"))
* ^
* </pre>
*/
def of[T](implicit ev: ClassTag[T]): ResultOfOfTypeInvocation[T] = new ResultOfOfTypeInvocation[T]
}
/**
* Companion object that facilitates the importing of <code>Matchers</code> members as
* an alternative to mixing it the trait. One use case is to import <code>Matchers</code> members so you can use
* them in the Scala interpreter.
*
* @author Bill Venners
*/
object Matchers extends Matchers
| scalatest/scalatest | jvm/shouldmatchers/src/main/scala/org/scalatest/matchers/should/Matchers.scala | Scala | apache-2.0 | 322,269 |
package com.joehalliwell.sp
import scala.collection.immutable.Map
/** *****************************************************************
*
* ******************************************************************/
/*
* Bindings
* TODO: Occurs check? Merge with State?
*/
sealed trait Env {
def unify(terms: (Term, Term)) = this
}
case object Fail extends Env
case class Success(binding: Map[Variable, Term] = Map[Variable, Term]()) extends Env {
def bind(orig: Variable, v: Variable, t: Term): Env = {
//println("B: " + v + "=" + t)
binding.get(v) match {
case None => Success(binding + (v -> t))
case Some(v2) => v2 match {
// Disallow circularities
case v2: Variable => if (orig.equals(v2)) Fail else bind(orig, v2, t)
case t2: Term if t.equals(t2) => this
case default => unify(v2, t)
}
}
}
// Unify two terms, extending our bindings
// TODO: Add note on case ordering (it's important!)
override def unify(terms: (Term, Term)): Env = terms match {
case (t1: Atom, t2: Atom) if t1 == t2 => this
case (t1, t2: Variable) => bind(t2, t2, t1)
case (t1: Variable, t2) => bind(t1, t1, t2)
case (t1: Predicate, t2: Predicate) if t1.name == t2.name && t1.arity == t2.arity
// This is neat, but inefficient! Is there a nice way to write it?
=> t1.args.zip(t2.args).foldLeft(this: Env)((x, y) => x.unify(y))
case _ => Fail
}
//@tailrec
def extract(t: Term): Term = t match {
case Predicate(name, arity, args) => Predicate(name, arity, args.map(x => extract(x)))
case v: Variable => binding.get(v) match {
case None => v
case Some(t) => extract(t)
}
case t => t
}
override def toString = {
for {
(variable, value) <- binding
if variable.level == 0
} yield variable.name + "=" + extract(value)
} mkString "\\n"
}
| joehalliwell/scala-prolog | src/main/scala/com/joehalliwell/sp/Env.scala | Scala | mit | 2,041 |
package de.uniulm.dds.base
/**
* Represents a variable in a decision diagram.
* <p/>
* User: felix
* Date: 27.03.13
* Time: 16:12
*
* @tparam V the type of the elements of a variables domain.
*/
final class Variable[V] private[base](val name: String, val domain: Set[V]) {
require(domain.size >= 2, "Domain size must be two or more.")
override def toString: String = name
/**
* Creates an indicator diagram that map `indicatedValue` to 1 and all other values to 0.
*
* @param indicatedValue the indicated value
* @param context the context to use for constructing the indicator diagram
* @param n the numeric that defines what 1 and 0 are.
* @tparam T the number type
* @return the indicator diagram
*/
def indicator[T](indicatedValue: V)(implicit context: Context[V, T], n: Numeric[T]): DecisionDiagram[V, T] =
DecisionDiagram(this, domain.map[(V, T), Map[V, T]](x => if (x == indicatedValue) x -> n.one else x -> n.zero)(collection.breakOut))
}
object Variable {
/**
* Constructs a new variable.
*
* @param name The name of the variable
* @param domain The domain of the variable, i.e., the values it can take. Must be a finite set of cardinality of two or more.
* @param context the (possibly implicitly given) context that creates the actual variable
* @return the new variable
*/
def apply[V, T](name: String, domain: Set[V])(implicit context: Context[V, T]): Variable[V] = context.getVariable(name, domain)
}
| uulm-ai/scadd | src/main/scala/de/uniulm/dds/base/Variable.scala | Scala | mit | 1,536 |
package cafesat.common
import org.scalatest.funsuite.AnyFunSuite
class FixedIntStackSuite extends AnyFunSuite {
test("a new stack is empty") {
val s = new FixedIntStack(5)
assert(s.isEmpty)
assert(s.size === 0)
}
test("pushing on empty stack contains exactly that element") {
val s = new FixedIntStack(5)
s.push(42)
assert(s.size === 1)
assert(s.top === 42)
}
test("pop returns last pushed element") {
val s = new FixedIntStack(5)
s.push(17)
assert(s.pop() === 17)
}
test("stack behaves in LIFO") {
val s = new FixedIntStack(5)
s.push(17)
s.push(18)
s.push(19)
assert(s.pop() === 19)
assert(s.pop() === 18)
s.push(42)
assert(s.pop() === 42)
assert(s.pop() === 17)
}
test("size growths with each push") {
val s = new FixedIntStack(5)
assert(s.size === 0)
s.push(17)
assert(s.size === 1)
s.push(18)
assert(s.size === 2)
s.push(19)
assert(s.size === 3)
}
test("size shrinks with each pop") {
val s = new FixedIntStack(5)
s.push(1)
s.push(2)
s.push(3)
assert(s.size === 3)
s.pop()
assert(s.size === 2)
s.pop()
assert(s.size === 1)
s.pop()
assert(s.size === 0)
}
}
| regb/cafesat | src/test/scala/cafesat/common/FixedIntStackSuite.scala | Scala | mit | 1,245 |
package controllers
import helpers.UnitSpec
import helpers.TestWithApplication
import pages.vrm_assign.TermsAndConditionsPage
import play.api.test.FakeRequest
import play.api.test.Helpers.OK
import play.api.test.Helpers.contentAsString
import play.api.test.Helpers.defaultAwaitTimeout
import play.api.test.Helpers.status
class TermsAndConditionsUnitSpec extends UnitSpec {
"present" should {
"display the page" in new TestWithApplication {
val result = termsAndConditions.present(FakeRequest())
status(result) should equal(OK)
contentAsString(result) should include(TermsAndConditionsPage.title)
}
}
private def termsAndConditions = testInjector().getInstance(classOf[TermsAndConditions])
}
| dvla/vrm-assign-online | test/controllers/TermsAndConditionsUnitSpec.scala | Scala | mit | 727 |
/*
Copyright 2011 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.gumbix.bioinf.string.seq.test
import net.gumbix.bioinf.string.seq.{BWT, BWTOutput}
import org.junit.{Ignore, Test}
/**
* @author Markus Gumbel (m.gumbel@hs-mannheim.de)
*/
class BWTTest {
@Test
@Ignore
def banana() {
doBWT("banana#")
}
@Test
@Ignore
def googol() {
doBWT("googol#")
}
@Test
@Ignore
def acaacg() {
doBWT("ACAACG#")
}
def doBWT(s: String) {
val btw = new BWTOutput(s)
println("rotation list:")
println(btw.rotationList.mkString(","))
println("sorted rotation list:")
println(btw.sortedRotationList.mkString(","))
println("suffix array (list):")
println(btw.suffixArrayList.mkString(","))
println("BWT:")
println(btw.transformation)
println()
println(btw.rotationToLaTeX)
println()
println(btw.suffixArrayToLaTeX)
}
} | markusgumbel/scalabioalg | core/src/test/scala/net/gumbix/bioinf/string/seq/test/BWTTest.scala | Scala | apache-2.0 | 1,447 |
package scuff.concurrent
import java.util.concurrent._
class QueueFullRejection(r: Runnable, e: ThreadPoolExecutor, queueCapacity: Int)
extends RejectedExecutionException(s"Task $r rejected from $e because ${e.getQueue.getClass.getSimpleName}(capacity=$queueCapacity) is full")
| nilskp/scuff | src/main/scala/scuff/concurrent/QueueFullRejection.scala | Scala | mit | 280 |
package io.bst
import akka.actor.{ActorLogging, Actor}
import akka.util.Timeout
import scala.concurrent.duration._
import spray.can.Http
import spray.http.HttpMethods._
import spray.http.MediaTypes._
import spray.http._
/**
* The REST interface
*/
class Api extends Actor with ActorLogging {
implicit val timeout: Timeout = 1.second // for the actor 'asks'
override def receive = {
case _: Http.Connected => sender ! Http.Register(self)
case HttpRequest(GET, Uri.Path("/"), _, _, _) =>
log.debug("Sending index...")
sender ! index
case _: HttpRequest => sender ! HttpResponse(status = 404, entity = "WTF!")
}
lazy val index = HttpResponse(
entity = HttpEntity(`text/html`,
<html>
<body>
<h1>Say hello to
<i>BST</i>
!</h1>
</body>
</html>.toString()
)
)
}
| bst-cave/core | src/main/scala/io/bst/Api.scala | Scala | apache-2.0 | 870 |
package io.taskr
import com.owlike.genson.defaultGenson.{fromJson, toJson}
case class Event(name: String, description: String) {
override def toString(): String = toJson(this)
}
case class Task(id: String, events: Seq[Event], current_event: Int) {
override def toString(): String = toJson(this)
}
case class TaskProgress(task: Task) {
val totalEvents = task.events.length
val currentEvent = task.current_event
val remainingEvents = totalEvents - currentEvent
val progress =
if (0 != totalEvents) currentEvent.toFloat / totalEvents.toFloat else 0
def toMap() = {
val event = if (currentEvent < task.events.length) task.events(currentEvent) else null
Map(
"total_events" -> totalEvents,
"current_event" -> currentEvent,
"remaining_events" -> remainingEvents,
"progress" -> progress,
"event" -> event
)
}
}
| mcross1882/taskr | src/main/scala/io/taskr/Task.scala | Scala | mit | 934 |
package com.seanshubin.web.sync.domain
import java.nio.charset.{Charset, StandardCharsets}
import java.nio.file.StandardOpenOption.{APPEND, CREATE}
import java.nio.file.{Files, Path}
import scala.collection.JavaConverters._
class FileSystemImpl extends FileSystem {
override def readFileIntoString(path: Path): String = bytesToString(Files.readAllBytes(path))
override def createMissingDirectories(path: Path): Unit = Files.createDirectories(path)
override def writeStringToFile(s: String, path: Path): Unit = Files.write(path, stringToBytes(s))
override def writeBytesToFile(bytes: Seq[Byte], path: Path): Unit = Files.write(path, bytes.toArray)
override def readFileIntoBytes(path: Path): Seq[Byte] = Files.readAllBytes(path)
override def readFileIntoLines(path: Path): Seq[String] = Files.readAllLines(path).asScala
override def fileExists(path: Path): Boolean = Files.exists(path)
override def deleteIfExists(path: Path): Unit = Files.deleteIfExists(path)
override def appendLine(path: Path, line: String): Unit = Files.write(path, Seq(line).asJava, APPEND, CREATE)
private val charset: Charset = StandardCharsets.UTF_8
private def stringToBytes(s: String): Array[Byte] = s.getBytes(charset)
private def bytesToString(bytes: Array[Byte]): String = new String(bytes, charset)
}
| SeanShubin/web-sync | domain/src/main/scala/com/seanshubin/web/sync/domain/FileSystemImpl.scala | Scala | unlicense | 1,319 |
object Prob44 {
def pentagonal(n: Int) = n*(3*n - 1)/2
val pentagonals = Stream.from(0).map(pentagonal)
val pentaList = pentagonals.takeWhile(_ < (Int.MaxValue >> 2)).toList
val pentaSet = pentaList.toSet
def isPenta(n: Int) =
pentaSet.contains(n)
private def exec(goal: Int) = {
def f(j: Int, k: Int): Option[(Int, Int)] = {
@inline
def next(pj: Int, pk: Int) = (pk - pj) match {
case d if d < goal => f(j, k + 1)
case d if d > goal => f(j + 1, k)
case _ => f(j + 1, k + 1)
}
if(j == k) return None
require(j < k)
val pj = pentaList(j)
val pk = pentaList(k)
if(!isPenta(pj + pk)) next(pj, pk)
else if((pk - pj) != goal) next(pj, pk)
else Some(j, k)
}
f(1, 2)
}
def main(args: Array[String]) {
val result = pentagonals.map{i => println(i); i}.map(exec).flatten.head
println(result)
}
}
| ponkotuy/ProjectEular | src/main/scala/Prob44.scala | Scala | mit | 917 |
import edu.uta.diql._
import org.apache.spark._
import org.apache.spark.rdd._
import org.apache.log4j._
object Test {
case class Customer ( name: String, cid: Int, account: Float )
case class Order ( oid: Int, cid: Int, price: Float )
def main ( args: Array[String] ) {
val CF = args(0)
val OF = args(1)
val output_file = args(2)
val conf = new SparkConf().setAppName("Nested")
val sc = new SparkContext(conf)
conf.set("spark.logConf","false")
conf.set("spark.eventLog.enabled","false")
LogManager.getRootLogger().setLevel(Level.WARN)
//explain(true)
val t: Long = System.currentTimeMillis()
val customers = sc.textFile(CF).map{ line => line.split(",")
match { case Array(a,b,c) => Customer(a,b.toInt,c.toFloat) } }
val orders = sc.textFile(OF).map{ line => line.split(",")
match { case Array(a,b,c) => Order(a.toInt,b.toInt,c.toFloat) } }
q("""
select c.name
from c <- customers
where c.account < +/(select o.price from o <- orders where o.cid == c.cid)
""").saveAsTextFile(output_file)
sc.stop()
println("**** DIQL Spark run time: "+(System.currentTimeMillis()-t)/1000.0+" secs")
}
}
| fegaras/DIQL | benchmarks/NestedSpark.scala | Scala | apache-2.0 | 1,236 |
import _root_.db.DB
import play.api._
import play.api.mvc.Handler
import java.io.File
import com.typesafe.config.ConfigFactory
import slick.driver.PostgresDriver.api._
object Application {
def getDev : FakeApplication = {
getApp(new File("conf/application.conf"))
}
/*
def getProd : FakeApplication = {
getApp(new File("conf/application.prod.conf"))
}
def getTest : FakeApplication = {
getApp(new File("conf/application.test.conf"))
}
*/
private def getApp(configFile: File) : FakeApplication = {
if (!configFile.exists())
throw new Exception("Config file does not exist: " + configFile.getAbsolutePath)
val conf = Configuration(ConfigFactory.parseFile(configFile))
FakeApplication(configuration = conf)
}
}
case class FakeApplication(
override val path: java.io.File = new java.io.File("."),
override val classloader: ClassLoader = classOf[FakeApplication].getClassLoader,
val additionalPlugins: Seq[String] = Nil,
val withoutPlugins: Seq[String] = Nil,
override val configuration: Configuration,
val withGlobal: Option[play.api.GlobalSettings] = None,
val withRoutes: PartialFunction[(String, String), Handler] = PartialFunction.empty) extends {
override val sources = None
override val mode = play.api.Mode.Dev
} with Application with WithDefaultConfiguration with WithDefaultGlobal with WithDefaultPlugins {
lazy val database : Database = {
val dbConfig = configuration.getConfig("db.default").getOrElse(???)
Database.forURL(
dbConfig.getString("jdbcUrl").getOrElse(???),
dbConfig.getString("username").getOrElse(???),
dbConfig.getString("password").getOrElse(???),
driver = dbConfig.getString("driverClassName").getOrElse(???)
)
}
def connectDatabase(): Unit = {
DB.dataSource = Some(database)
}
}
| papauschek/cointape | tools/src/main/scala/TestConfig.scala | Scala | mit | 2,014 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala.xml
import Utility.sbToString
import annotation.tailrec
/**
* Copyright 2008 Google Inc. All Rights Reserved.
* @author Burak Emir <bqe@google.com>
*/
object MetaData {
/**
* appends all attributes from new_tail to attribs, without attempting to detect
* or remove duplicates. The method guarantees that all attributes from attribs come before
* the attributes in new_tail, but does not guarantee to preserve the relative order of attribs.
* Duplicates can be removed with normalize.
*/
@tailrec
def concatenate(attribs: MetaData, new_tail: MetaData): MetaData =
if (attribs eq Null) new_tail
else concatenate(attribs.next, attribs copy new_tail)
/**
* returns normalized MetaData, with all duplicates removed and namespace prefixes resolved to
* namespace URIs via the given scope.
*/
def normalize(attribs: MetaData, scope: NamespaceBinding): MetaData = {
def iterate(md: MetaData, normalized_attribs: MetaData, set: Set[String]): MetaData = {
lazy val key = getUniversalKey(md, scope)
if (md eq Null) normalized_attribs
else if (set(key)) iterate(md.next, normalized_attribs, set)
else iterate(md.next, md copy normalized_attribs, set + key)
}
iterate(attribs, Null, Set())
}
/**
* returns key if md is unprefixed, pre+key is md is prefixed
*/
def getUniversalKey(attrib: MetaData, scope: NamespaceBinding) = attrib match {
case prefixed: PrefixedAttribute => scope.getURI(prefixed.pre) + prefixed.key
case unprefixed: UnprefixedAttribute => unprefixed.key
}
/**
* returns MetaData with attributes updated from given MetaData
*/
def update(attribs: MetaData, scope: NamespaceBinding, updates: MetaData): MetaData =
normalize(concatenate(updates, attribs), scope)
}
/** <p>
* This class represents an attribute and at the same time a linked list of attributes.
* Every instance of this class is either an instance of UnprefixedAttribute <code>key,value</code>
* or an instance of PrefixedAttribute <code>namespace_prefix,key,value</code> or Null, the empty
* attribute list. Namespace URIs are obtained by using the namespace scope of the element owning
* this attribute (see <code>getNamespace</code>)
* </p>
*
* Copyright 2008 Google Inc. All Rights Reserved.
* @author Burak Emir <bqe@google.com>
*/
@serializable
abstract class MetaData extends Iterable[MetaData] with Equality
{
/** Updates this MetaData with the MetaData given as argument. All attributes that occur in updates
* are part of the resulting MetaData. If an attribute occurs in both this instance and
* updates, only the one in updates is part of the result (avoiding duplicates). For prefixed
* attributes, namespaces are resolved using the given scope, which defaults to TopScope.
*
* @param updates MetaData with new and updated attributes
* @return a new MetaData instance that contains old, new and updated attributes
*/
def append(updates: MetaData, scope: NamespaceBinding = TopScope): MetaData =
MetaData.update(this, scope, updates)
/**
* Gets value of unqualified (unprefixed) attribute with given key, null if not found
*
* @param key
* @return value as Seq[Node] if key is found, null otherwise
*/
def apply(key: String): Seq[Node]
/** convenience method, same as <code>apply(namespace, owner.scope, key)</code>.
*
* @param namespace_uri namespace uri of key
* @param owner the element owning this attribute list
* @param key the attribute key
* @return ...
*/
final def apply(namespace_uri: String, owner: Node, key: String): Seq[Node] =
apply(namespace_uri, owner.scope, key)
/**
* Gets value of prefixed attribute with given key and namespace, null if not found
*
* @param namespace_uri namespace uri of key
* @param scp a namespace scp (usually of the element owning this attribute list)
* @param key to be looked fore
* @return value as Seq[Node] if key is found, null otherwise
*/
def apply(namespace_uri:String, scp:NamespaceBinding, k:String): Seq[Node]
/** returns a copy of this MetaData item with next field set to argument.
*
* @param next ...
* @return ...
*/
def copy(next: MetaData): MetaData
/** if owner is the element of this metadata item, returns namespace */
def getNamespace(owner: Node): String
def hasNext = (Null != next)
def length: Int = length(0)
def length(i: Int): Int = next.length(i + 1)
def isPrefixed: Boolean
override def canEqual(other: Any) = other match {
case _: MetaData => true
case _ => false
}
override def strict_==(other: Equality) = other match {
case m: MetaData => this.toSet == m.toSet
case _ => false
}
def basisForHashCode: Seq[Any] = List(this.toSet)
/** Returns an iterator on attributes */
def iterator: Iterator[MetaData] = Iterator.single(this) ++ next.iterator
override def size: Int = 1 + iterator.length
/** filters this sequence of meta data */
override def filter(f: MetaData => Boolean): MetaData =
if (f(this)) copy(next filter f)
else next filter f
/** returns key of this MetaData item */
def key: String
/** returns value of this MetaData item */
def value: Seq[Node]
/** Returns a String containing "prefix:key" if the first key is
* prefixed, and "key" otherwise.
*/
def prefixedKey = this match {
case x: Attribute if x.isPrefixed => x.pre + ":" + key
case _ => key
}
/** Returns a Map containing the attributes stored as key/value pairs.
*/
def asAttrMap: Map[String, String] =
iterator map (x => (x.prefixedKey, x.value.text)) toMap
/** returns Null or the next MetaData item */
def next: MetaData
/**
* Gets value of unqualified (unprefixed) attribute with given key, None if not found
*
* @param key
* @return value in Some(Seq[Node]) if key is found, None otherwise
*/
final def get(key: String): Option[Seq[Node]] = Option(apply(key))
/** same as get(uri, owner.scope, key) */
final def get(uri: String, owner: Node, key: String): Option[Seq[Node]] =
get(uri, owner.scope, key)
/** gets value of qualified (prefixed) attribute with given key.
*
* @param uri namespace of key
* @param scope a namespace scp (usually of the element owning this attribute list)
* @param key to be looked fore
* @return value as Some[Seq[Node]] if key is found, None otherwise
*/
final def get(uri: String, scope: NamespaceBinding, key: String): Option[Seq[Node]] =
Option(apply(uri, scope, key))
def toString1(): String = sbToString(toString1)
// appends string representations of single attribute to StringBuilder
def toString1(sb: StringBuilder): Unit
override def toString(): String = sbToString(buildString)
def buildString(sb: StringBuilder): StringBuilder = {
sb.append(' ')
toString1(sb)
next.buildString(sb)
}
/**
* @param scope ...
* @return <code>true</code> iff ...
*/
def wellformed(scope: NamespaceBinding): Boolean
/**
* @param key ...
* @return ...
*/
def remove(key: String): MetaData
/**
* @param namespace ...
* @param scope ...
* @param key ...
* @return ...
*/
def remove(namespace: String, scope: NamespaceBinding, key: String): MetaData
/**
* @param namespace ...
* @param owner ...
* @param key ...
* @return ...
*/
final def remove(namespace: String, owner: Node, key: String): MetaData =
remove(namespace, owner.scope, key)
}
| cran/rkafkajars | java/scala/xml/MetaData.scala | Scala | apache-2.0 | 8,231 |
package sttp.client3.examples
import cats.effect.IO
import cats.effect.unsafe.IORuntime
import fs2._
import sttp.capabilities.fs2.Fs2Streams
import sttp.client3._
import sttp.client3.asynchttpclient.fs2.AsyncHttpClientFs2Backend
import sttp.ws.WebSocketFrame
object WebSocketStreamFs2 extends App {
implicit val runtime: IORuntime = cats.effect.unsafe.implicits.global
def webSocketFramePipe: Pipe[IO, WebSocketFrame.Data[_], WebSocketFrame] = { input =>
Stream.emit(WebSocketFrame.text("1")) ++ input.flatMap {
case WebSocketFrame.Text("10", _, _) =>
println("Received 10 messages, sending close frame")
Stream.emit(WebSocketFrame.close)
case WebSocketFrame.Text(n, _, _) =>
println(s"Received $n messages, replying with $n+1")
Stream.emit(WebSocketFrame.text((n.toInt + 1).toString))
case _ => Stream.empty // ignoring
}
}
AsyncHttpClientFs2Backend
.resource[IO]()
.use { backend =>
basicRequest
.response(asWebSocketStream(Fs2Streams[IO])(webSocketFramePipe))
.get(uri"wss://echo.websocket.org")
.send(backend)
.void
}
.unsafeRunSync()
}
| softwaremill/sttp | examples/src/main/scala/sttp/client3/examples/WebSocketStreamFs2.scala | Scala | apache-2.0 | 1,165 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.submission
import org.orbeon.dom._
import org.orbeon.dom.saxon.DocumentWrapper
import org.orbeon.oxf.http.HttpMethod
import org.orbeon.oxf.util.CollectionUtils._
import org.orbeon.oxf.util.PathUtils.decodeSimpleQuery
import org.orbeon.oxf.util.StringUtils._
import org.orbeon.oxf.util.{IndentedLogger, XPath}
import org.orbeon.oxf.xforms.XFormsConstants._
import org.orbeon.oxf.xforms.XFormsContainingDocument
import org.orbeon.oxf.xforms.analysis.model.ValidationLevel
import org.orbeon.oxf.xforms.analysis.model.ValidationLevel._
import org.orbeon.oxf.xforms.control.XFormsSingleNodeControl
import org.orbeon.oxf.xforms.event.events.{ErrorType, XFormsSubmitErrorEvent}
import org.orbeon.oxf.xforms.event.{Dispatch, ListenersTrait, XFormsEventObserver, XFormsEventTarget}
import org.orbeon.oxf.xforms.model.{BindNode, InstanceData, XFormsInstance, XFormsModel}
import org.orbeon.oxf.xml.TransformerUtils
import org.orbeon.oxf.xml.dom4j.Dom4jUtils
import org.orbeon.saxon.om.{NodeInfo, VirtualNode}
import scala.collection.JavaConverters._
import scala.collection.mutable
abstract class XFormsModelSubmissionBase
extends ListenersTrait
with XFormsEventTarget
with XFormsEventObserver {
thisSubmission: XFormsModelSubmission β
import XFormsModelSubmissionBase._
def getModel: XFormsModel
protected def sendSubmitError(throwable: Throwable, submissionResult: SubmissionResult): Unit =
sendSubmitErrorWithDefault(
throwable,
new XFormsSubmitErrorEvent(thisSubmission, ErrorType.XXFormsInternalError, submissionResult.getConnectionResult)
)
protected def sendSubmitError(throwable: Throwable, resolvedActionOrResource: String): Unit =
sendSubmitErrorWithDefault(
throwable,
new XFormsSubmitErrorEvent(thisSubmission, Option(resolvedActionOrResource), ErrorType.XXFormsInternalError, 0)
)
private def sendSubmitErrorWithDefault(throwable: Throwable, default: β XFormsSubmitErrorEvent): Unit = {
// After a submission, the context might have changed
getModel.resetAndEvaluateVariables()
// Try to get error event from exception and if not possible create default event
val submitErrorEvent =
(throwable collect { case se: XFormsSubmissionException β Option(se.getXFormsSubmitErrorEvent) } flatten) getOrElse default
// Dispatch event
submitErrorEvent.logMessage(throwable)
Dispatch.dispatchEvent(submitErrorEvent)
}
protected def createDocumentToSubmit(
currentNodeInfo : NodeInfo,
currentInstance : Option[XFormsInstance],
validate : Boolean,
relevanceHandling : RelevanceHandling,
annotateWith : Set[String])(implicit
indentedLogger : IndentedLogger
): Document = {
// Revalidate instance
// NOTE: We need to do this before pruning so that bind/@type works correctly. XForms 1.1 seems to say that this
// must be done after pruning, but then it is not clear how XML Schema validation would work then.
// Also, if validate="false" or if serialization="none", then we do not revalidate. Now whether this optimization
// is acceptable depends on whether validate="false" only means "don't check the instance's validity" or also
// don't even recalculate. If the latter, then this also means that type annotations won't be updated, which
// can impact serializations that use type information, for example multipart. But in that case, here we decide
// the optimization is worth it anyway.
if (validate)
currentInstance foreach (_.model.doRecalculateRevalidate())
// Get selected nodes (re-root and handle relevance)
val documentToSubmit =
prepareXML(
containingDocument,
currentNodeInfo,
relevanceHandling,
Dom4jUtils.getNamespaceContext(getSubmissionElement).asScala.toMap,
annotateWith
)
// Check that there are no validation errors
// NOTE: If the instance is read-only, it can't have MIPs at the moment, and can't fail validation/requiredness,
// so we don't go through the process at all.
val instanceSatisfiesValidRequired =
currentInstance.exists(_.readonly) ||
! validate ||
isSatisfiesValidity(documentToSubmit, relevanceHandling)
if (! instanceSatisfiesValidRequired) {
if (indentedLogger.isDebugEnabled) {
val documentString = TransformerUtils.tinyTreeToString(currentNodeInfo)
indentedLogger.logDebug("", "instance document or subset thereof cannot be submitted", "document", documentString)
}
throw new XFormsSubmissionException(
thisSubmission,
"xf:submission: instance to submit does not satisfy valid and/or required model item properties.",
"checking instance validity",
new XFormsSubmitErrorEvent(thisSubmission, ErrorType.ValidationError, null)
)
}
documentToSubmit
}
}
object XFormsModelSubmissionBase {
import Private._
import RelevanceHandling._
// Prepare XML for submission
//
// - re-root if `ref` points to an element other than the root element
// - annotate with `xxf:id` if requested
// - prune or blank non-relevant nodes if requested
// - annotate with alerts if requested
def prepareXML(
xfcd : XFormsContainingDocument,
ref : NodeInfo,
relevanceHandling : RelevanceHandling,
namespaceContext : Map[String, String],
annotateWith : Set[String]
): Document =
ref match {
case virtualNode: VirtualNode β
// "A node from the instance data is selected, based on attributes on the submission
// element. The indicated node and all nodes for which it is an ancestor are considered for
// the remainder of the submit process. "
val copy =
virtualNode.getUnderlyingNode match {
case e: Element β Dom4jUtils.createDocumentCopyParentNamespaces(e)
case n: Node β Dom4jUtils.createDocumentCopyElement(n.getDocument.getRootElement)
case _ β throw new IllegalStateException
}
val attributeNamesForTokens =
annotateWith.iterator map { token β
decodeSimpleQuery(token).headOption match {
case Some((name, value)) β
name β {
value.trimAllToOpt map
(Dom4jUtils.extractTextValueQName(namespaceContext.asJava, _, true)) getOrElse
QName(name, XXFORMS_NAMESPACE_SHORT)
}
case None β
throw new IllegalArgumentException(s"invalid format for `xxf:annotate` value: `$annotateWith`")
}
} toMap
// Annotate ids before pruning so that it is easier for other code (Form Runner) to infer the same ids
attributeNamesForTokens.get("id") foreach
(annotateWithHashes(copy, _))
relevanceHandling match {
case RelevanceHandling.Keep β
attributeNamesForTokens.get("relevant") foreach
(annotateNonRelevantElements(copy, _))
case RelevanceHandling.Remove β
pruneNonRelevantNodes(copy)
case RelevanceHandling.Empty β
blankNonRelevantNodes(copy)
attributeNamesForTokens.get("relevant") foreach
(annotateNonRelevantElements(copy, _))
}
annotateWithAlerts(
xfcd = xfcd,
doc = copy,
levelsToAnnotate =
attributeNamesForTokens.keySet collect
LevelByName map { level β
level β attributeNamesForTokens(level.entryName)
} toMap
)
copy
// Submitting read-only instance backed by TinyTree (no MIPs to check)
// TODO: What about re-rooting and annotations?
case ref if ref.getNodeKind == org.w3c.dom.Node.ELEMENT_NODE β
TransformerUtils.tinyTreeToDom4j(ref)
case ref β
TransformerUtils.tinyTreeToDom4j(ref.getRoot)
}
def pruneNonRelevantNodes(doc: Document): Unit =
Iterator.iterateWhileDefined(findFirstNonRelevantElementOrAttribute(doc)) foreach (_.detach())
def blankNonRelevantNodes(doc: Document): Unit = {
def processElement(e: Element): Unit = {
e.attributes.asScala foreach { a β
if (! InstanceData.getInheritedRelevant(a))
a.setValue("")
}
if (e.containsElement)
e.elements.asScala foreach processElement
else if (! InstanceData.getInheritedRelevant(e))
e.setText("")
}
processElement(doc.getRootElement)
}
def annotateWithHashes(doc: Document, attQName: QName): Unit = {
val wrapper = new DocumentWrapper(doc, null, XPath.GlobalConfiguration)
var annotated = false
doc.accept(new VisitorSupport {
override def visit(element: Element): Unit = {
val hash = SubmissionUtils.dataNodeHash(wrapper.wrap(element))
element.addAttribute(attQName, hash)
annotated = true
}
})
if (annotated)
addRootElementNamespace(doc)
}
// Annotate elements which have failed constraints with an xxf:error, xxf:warning or xxf:info attribute containing
// the alert message. Only the levels passed in `annotate` are handled.
def annotateWithAlerts(
xfcd : XFormsContainingDocument,
doc : Document,
levelsToAnnotate : Map[ValidationLevel, QName]
): Unit =
if (levelsToAnnotate.nonEmpty) {
val elementsToAnnotate = mutable.Map[ValidationLevel, mutable.Map[Set[String], Element]]()
// Iterate data to gather elements with failed constraints
doc.accept(new VisitorSupport {
override def visit(element: Element): Unit = {
val failedValidations = BindNode.failedValidationsForAllLevelsPrioritizeRequired(element)
for (level β levelsToAnnotate.keys) {
// NOTE: Annotate all levels specified. If we decide to store only one level of validation
// in bind nodes, then we would have to change this to take the highest level only and ignore
// the other levels.
val failedValidationsForLevel = failedValidations.getOrElse(level, Nil)
if (failedValidationsForLevel.nonEmpty) {
val map = elementsToAnnotate.getOrElseUpdate(level, mutable.Map[Set[String], Element]())
map += (failedValidationsForLevel map (_.id) toSet) β element
}
}
}
})
if (elementsToAnnotate.nonEmpty) {
val controls = xfcd.getControls.getCurrentControlTree.effectiveIdsToControls
val relevantLevels = elementsToAnnotate.keySet
def controlsIterator =
controls.iterator collect {
case (_, control: XFormsSingleNodeControl)
if control.isRelevant && control.alertLevel.toList.toSet.subsetOf(relevantLevels) β control
}
var annotated = false
def annotateElementIfPossible(control: XFormsSingleNodeControl) = {
// NOTE: We check on the whole set of constraint ids. Since the control reads in all the failed
// constraints for the level, the sets of ids must match.
for {
level β control.alertLevel
controlAlert β Option(control.getAlert)
failedValidationsIds = control.failedValidations.map(_.id).toSet
elementsMap β elementsToAnnotate.get(level)
element β elementsMap.get(failedValidationsIds)
qName β levelsToAnnotate.get(level)
} locally {
// There can be an existing attribute if more than one control bind to the same element
Option(element.attribute(qName)) match {
case Some(existing) β existing.setValue(existing.getValue + controlAlert)
case None β element.addAttribute(qName, controlAlert)
}
annotated = true
}
}
// Iterate all controls with warnings and try to annotate the associated element nodes
controlsIterator foreach annotateElementIfPossible
// If there is any annotation, make sure the attribute's namespace prefix is in scope on the root
// element
if (annotated)
addRootElementNamespace(doc)
}
}
def isSatisfiesValidity(
startNode : Node,
relevantHandling : RelevanceHandling)(implicit
indentedLogger : IndentedLogger
): Boolean =
findFirstElementOrAttributeWith(
startNode,
relevantHandling match {
case Keep | Remove β node β ! InstanceData.getValid(node)
case Empty β node β ! InstanceData.getValid(node) && InstanceData.getInheritedRelevant(node)
}
) match {
case Some(e: Element) β
logInvalidNode(e)
false
case Some(a: Attribute) β
logInvalidNode(a)
false
case Some(_) β
throw new IllegalArgumentException
case None β
true
}
def logInvalidNode(node: Node)(implicit indentedLogger: IndentedLogger): Unit =
if (indentedLogger.isDebugEnabled)
node match {
case e: Element β
indentedLogger.logDebug(
"",
"found invalid node",
"element name",
Dom4jUtils.elementToDebugString(e)
)
case a: Attribute β
indentedLogger.logDebug(
"",
"found invalid attribute",
"attribute name",
Dom4jUtils.attributeToDebugString(a),
"parent element",
Dom4jUtils.elementToDebugString(a.getParent)
)
case _ β
throw new IllegalArgumentException
}
def requestedSerialization(
xformsSerialization : Option[String],
xformsMethod : String,
httpMethod : HttpMethod
): Option[String] =
xformsSerialization flatMap (_.trimAllToOpt) orElse defaultSerialization(xformsMethod, httpMethod)
def getRequestedSerializationOrNull(
xformsSerialization : Option[String],
xformsMethod : String,
httpMethod : HttpMethod
): String =
requestedSerialization(xformsSerialization, xformsMethod, httpMethod).orNull
private object Private {
def defaultSerialization(xformsMethod: String, httpMethod: HttpMethod): Option[String] =
xformsMethod.trimAllToOpt collect {
case "multipart-post" β "multipart/related"
case "form-data-post" β "multipart/form-data"
case "urlencoded-post" β "application/x-www-form-urlencoded"
case _ if httpMethod == HttpMethod.POST || httpMethod == HttpMethod.PUT ||
httpMethod == HttpMethod.LOCK || httpMethod == HttpMethod.UNLOCK β "application/xml"
case _ if httpMethod == HttpMethod.GET || httpMethod == HttpMethod.DELETE β "application/x-www-form-urlencoded"
}
def annotateNonRelevantElements(doc: Document, attQname: QName): Unit = {
def processElem(e: Element): Unit =
if (! InstanceData.getInheritedRelevant(e))
e.addAttribute(attQname, "false")
else {
e.removeAttribute(attQname)
e.elements.asScala foreach processElem
}
processElem(doc.getRootElement)
}
def findFirstNonRelevantElementOrAttribute(startNode: Node): Option[Node] =
findFirstElementOrAttributeWith(startNode, node β ! InstanceData.getInheritedRelevant(node))
def findFirstElementOrAttributeWith(startNode: Node, check: Node β Boolean): Option[Node] = {
val breaks = new scala.util.control.Breaks
import breaks._
var foundNode: Node = null
tryBreakable[Option[Node]] {
startNode.accept(
new VisitorSupport {
override def visit(element: Element) = checkNodeAndBreakIfFail(element)
override def visit(attribute: Attribute) = checkNodeAndBreakIfFail(attribute)
def checkNodeAndBreakIfFail(node: Node) =
if (check(node)) {
foundNode = node
break()
}
}
)
None
} catchBreak {
Some(foundNode)
}
}
def addRootElementNamespace(doc: Document) =
doc.getRootElement.addNamespace(XXFORMS_NAMESPACE_SHORT.prefix, XXFORMS_NAMESPACE_SHORT.uri)
}
} | brunobuzzi/orbeon-forms | xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/submission/XFormsModelSubmissionBase.scala | Scala | lgpl-2.1 | 17,321 |
package com.actian.spark_vector.loader.parsers
import org.scalatest._
import org.scalatest.funsuite.FixtureAnyFunSuite
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import com.actian.spark_vector.loader.command.CSVRead
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.types.IntegerType
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.StringType
import org.apache.spark.sql.Row
import com.actian.spark_vector.loader.options.UserOptions
import resource._
import collection.JavaConverters._
class SpecialCharacterTest extends FixtureAnyFunSuite {
override type FixtureParam = SparkSession
override protected def withFixture(test: OneArgTest): Outcome = {
val conf = new SparkConf()
.setMaster("local[1]")
.setAppName("special character test")
managed(SparkSession.builder.config(conf).getOrCreate()).acquireAndGet { spark =>
withFixture(test.toNoArgTest(spark))
}
}
test("backslash as escape in CSV files"){ implicit spark =>
val parser: scopt.OptionParser[UserOptions] = Parser
val expectedData = List((1,"\\"ten\\""), (2,"\\\\N"))
val schema = StructType( Seq(
StructField(name = "c1", dataType = IntegerType, nullable = false),
StructField(name = "c2", dataType = StringType, nullable = false)))
val loadCommand: Array[String] = Array("load", "csv", "-sf", "../testdata/escaping.csv",
"-vh", "localhost", "-vi", "VW", "-vd", "testdb", "-tt", "sl_m3812", "-pm", "PERMISSIVE",
"-sc", ",", "-is", "true", "-qc", "\\"", "-ec", "\\\\", "-h","c1 int, c2 string,")
val userOptions = parser.parse(loadCommand, UserOptions())
assert(userOptions != None)
val expectedRDD = spark.sparkContext.parallelize(expectedData)
val expectedDF = spark.createDataFrame(expectedData.map({case (x,y) => Row(x,y)}).asJava, schema)
assert(expectedDF.count() == 2)
val selectQuery = CSVRead.registerTempTable(userOptions.get, spark.sqlContext)
val parsedDF = spark.sql(selectQuery)
assert(parsedDF.count() == 2)
assert(parsedDF.except(expectedDF).count() == 0)
}
}
| ActianCorp/spark-vector | loader/src/test/scala/com/actian/spark_vector/loader/parsers/parser/SpecialCharacterTest.scala | Scala | apache-2.0 | 2,245 |
package com.buransky.flickrFolderUploader
import java.io._
import javax.swing.filechooser.FileNameExtensionFilter
import com.flickr4java.flickr.auth.Permission
import com.flickr4java.flickr.photos.SearchParameters
import com.flickr4java.flickr.photosets.PhotosetsInterface
import com.flickr4java.flickr.uploader.{UploadMetaData, Uploader}
import com.flickr4java.flickr.util.FileAuthStore
import com.flickr4java.flickr.{Flickr, REST, RequestContext}
import org.scribe.model.Verifier
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.io.StdIn
object FlickrFolderUploaderApp {
val fileNameToPhotoId = mutable.HashMap[String, String]()
val fileNameToPhotoIdPath = "fileNameToPhotoId.txt"
var bufferedWriter: BufferedWriter = _
def main(args: Array[String]): Unit = {
val userId = args(0)
val apiKey = args(1)
val apiSecret = args(2)
val rootDir = new File(args(3))
println(s"User ID: [$userId]")
println(s"Flickr API key: [$apiKey]")
println(s"Flickr API secret: [$apiSecret]")
println(s"Root dir: $rootDir")
val flickr = new Flickr(apiKey, apiSecret, new REST())
val authInterface = flickr.getAuthInterface
val accessToken = authInterface.getRequestToken
val authStore = new FileAuthStore(new File("authStore"))
val auth = authStore.retrieveAll().headOption match {
case Some(a) => a
case None =>
val authUrl = authInterface.getAuthorizationUrl(accessToken, Permission.DELETE)
println(s"Authorization URL: $authUrl")
val tokenKey = StdIn.readLine()
val requestToken = authInterface.getAccessToken(accessToken, new Verifier(tokenKey))
val a = authInterface.checkToken(requestToken)
authStore.store(a)
a
}
RequestContext.getRequestContext.setAuth(auth)
val photosets = flickr.getPhotosetsInterface
val uploader = flickr.getUploader
val all = photosets.getList(userId).getPhotosets.asScala
loadFileNameToPhotoIds()
bufferedWriter = new BufferedWriter(new FileWriter(new File(fileNameToPhotoIdPath), true))
try {
// Upload all albums
rootDir.listFiles().sortBy(_.getName).foreach { file =>
if (file.isDirectory && !all.exists(_.getTitle == file.getName)) {
uploadAlbum(file, uploader, photosets)
}
}
}
finally {
bufferedWriter.close()
}
// Print all albums:
// println(photosets.getList(userId).getPhotosets.asScala.map(_.getTitle).mkString(","))
// Create new album
// val newAlbum = photosets.create("title", "description", "27549264665")
// println(newAlbum.getUrl)
}
private def loadFileNameToPhotoIds(): Unit = {
val file = new File(fileNameToPhotoIdPath)
if (file.exists()) {
val br = new BufferedReader(new FileReader(file))
try {
var counter = 0
var k = ""
do {
k = br.readLine()
if (k != null) {
val v = br.readLine()
fileNameToPhotoId.update(k, v)
counter += 1
}
} while (k != null)
println(s"$counter photo IDs read from file.")
}
finally {
br.close()
}
}
}
private def uploadAlbum(directory: File,
uploader: Uploader,
photosets: PhotosetsInterface): Unit = {
println(s"Uploading album ${directory.getName} ...")
val fileFilter = new FilenameFilter() {
override def accept(dir: File, name: String): Boolean = {
val lc = name.toLowerCase
lc.endsWith("jpg") || lc.endsWith("jpeg")
}
}
// Upload all JPEGs
// TODO: sort by time created
val photoIds = directory.listFiles(fileFilter).toList.flatMap { photoFile =>
fileNameToPhotoId.get(photoFile.getAbsolutePath) match {
case Some(photoId) =>
println(s"Photo already uploaded. Skipping. [${photoFile.getAbsolutePath}]")
Some(photoId)
case None =>
val metadata = new UploadMetaData()
metadata.setFilename(photoFile.getName)
metadata.setFilemimetype("image/jpeg")
metadata.setTitle(photoFile.getName)
metadata.setPublicFlag(false)
metadata.setFriendFlag(false)
metadata.setFamilyFlag(true)
metadata.setHidden(false)
metadata.setSafetyLevel("SAFETYLEVEL_SAFE")
metadata.setContentType("CONTENTTYPE_PHOTO")
try {
// Upload synchronously
val photoId = uploader.upload(photoFile, metadata)
println(s"Photo uploaded. [${photoFile.getName}, $photoId]")
bufferedWriter.write(photoFile.getAbsolutePath)
bufferedWriter.write("\n")
bufferedWriter.write(photoId)
bufferedWriter.write("\n")
bufferedWriter.flush()
fileNameToPhotoId.update(photoFile.getAbsolutePath, photoId)
Some(photoId)
}
catch {
case ex: Exception =>
println(ex)
None
}
}
}
if (photoIds.nonEmpty) {
// Create album
val newAlbum = photosets.create(directory.getName, "", photoIds.head)
print(s"New album created ${newAlbum.getUrl}. Adding photos ... ")
photoIds.tail.foreach { photoId =>
try {
photosets.addPhoto(newAlbum.getId, photoId)
}
catch {
case ex: Exception => throw new AppException(s"Cannot add photo to album! [${newAlbum.getId}, $photoId]")
}
}
println("done.")
}
else {
println(s"No photos uploaded. Album not created.")
}
}
}
class AppException(message: String, cause: Throwable = null) extends RuntimeException(message, cause) | RadoBuransky/flickr-folder-uploader | src/main/scala/com/buransky/flickrFolderUploader/FlickrFolderUploaderApp.scala | Scala | apache-2.0 | 5,779 |
package net.categoricaldata.category
trait Functor {
val source: Category
val target: Category
final def apply(o: source.O): target.O = onObjects(o)
// the dummy implicit argument is a hack to allow overloading of apply
final def apply(m: source.M)(implicit d: DummyImplicit): target.M = onMorphisms(m)
def onObjects(o: source.O): target.O
def onMorphisms(m: source.M): target.M
}
trait ParametrizedFunctor[SC <: Category, TC <: Category] extends Functor {
override val source: SC
override val target: TC
}
object Functor {
def compose(f: Functor, g: Functor): ParametrizedFunctor[f.source.type, g.target.type] = new ParametrizedFunctor[f.source.type, g.target.type] {
// It seems not a good idea to require f.target == g.source. Often we want to allow f.target < g.source, but that's very hard to check.
// require(f.target == g.source)
override val source: f.source.type = f.source
override val target: g.target.type = g.target
override def onObjects(o: f.source.O): g.target.O = g.onObjects(f.onObjects(o).asInstanceOf[g.source.O])
override def onMorphisms(m: f.source.M): g.target.M = g.onMorphisms(f.onMorphisms(m).asInstanceOf[g.source.M])
}
trait withFinitelyGeneratedSource extends withLocallyFinitelyGeneratedSource {
override val source: FinitelyGeneratedCategory
}
trait withFinitelyGeneratedTarget extends withLocallyFinitelyGeneratedTarget {
override val target: FinitelyGeneratedCategory
}
trait withFinitelyPresentedSource extends withFinitelyGeneratedSource { functor =>
override val source: FinitelyPresentedCategory
def verifyRelations = {
for (relation <- source.allRelations) {
require(functor.onMorphisms(source.pathAsMorphism(relation._1)) == functor.onMorphisms(source.pathAsMorphism(relation._2)))
}
}
}
trait withFinitelyPresentedTarget extends withFinitelyGeneratedTarget {
override val target: FinitelyPresentedCategory
}
trait withLocallyFinitelyGeneratedSource extends withSmallSource { functor =>
override val source: LocallyFinitelyGeneratedCategory
def onGenerators(g: source.G): target.M
override def onMorphisms(m: source.M) = {
val start = onObjects(source.source(m))
val morphisms = for (g <- m.representative.morphisms) yield onGenerators(g)
target.compose(start, morphisms)
}
}
trait withLocallyFinitelyGeneratedTarget extends withSmallTarget {
override val target: LocallyFinitelyGeneratedCategory
}
trait withSmallSource extends Functor {
override val source: SmallCategory
}
trait withSmallTarget extends Functor {
override val target: SmallCategory
}
trait MemoFunctor extends Functor {
import net.tqft.toolkit.functions.Memo
private[this] val memoOnObjects = Memo(super.onObjects _)
private[this] val memoOnMorphisms = Memo(super.onMorphisms _)
abstract override def onObjects(o: source.O) = memoOnObjects(o)
abstract override def onMorphisms(o: source.M) = memoOnMorphisms(o)
}
}
| JasonGross/categoricaldata | src/main/scala/net/categoricaldata/category/Functor.scala | Scala | mit | 3,036 |
package com.twitter.algebird.spark
import com.twitter.algebird.{MapAlgebra, Monoid, Semigroup}
import org.apache.spark._
import org.apache.spark.rdd._
import org.scalatest._
import scala.reflect.ClassTag
import org.scalatest.funsuite.AnyFunSuite
package test {
// not needed in the algebird package, just testing the API
import com.twitter.algebird.spark.ToAlgebird
object Test {
def sum[T: Monoid: ClassTag](r: RDD[T]): T = r.algebird.sum
}
}
/**
* This test almost always times out on travis. Leaving at least a compilation test of using with spark
*/
class AlgebirdRDDTest extends AnyFunSuite with BeforeAndAfter {
private var sc: SparkContext = _
before {
// val conf = new SparkConf()
// .setMaster(master)
// .setAppName(appName)
// sc = new SparkContext(conf)
}
after {
// try sc.stop()
// catch {
// case t: Throwable => ()
// }
}
// Why does scala.math.Equiv suck so much.
implicit def optEq[V](implicit eq: Equiv[V]): Equiv[Option[V]] = Equiv.fromFunction[Option[V]] { (o1, o2) =>
(o1, o2) match {
case (Some(v1), Some(v2)) => eq.equiv(v1, v2)
case (None, None) => true
case _ => false
}
}
def equiv[V](a: V, b: V)(implicit eq: Equiv[V]): Boolean = eq.equiv(a, b)
def assertEq[V: Equiv](a: V, b: V): Unit = assert(equiv(a, b))
def aggregate[T: ClassTag, U: ClassTag, V: Equiv](s: Seq[T], agg: AlgebirdAggregator[T, U, V]): Unit =
assertEq(sc.makeRDD(s).algebird.aggregate(agg), agg(s))
def aggregateByKey[K: ClassTag, T: ClassTag, U: ClassTag, V: Equiv](
s: Seq[(K, T)],
agg: AlgebirdAggregator[T, U, V]
): Unit = {
val resMap = sc.makeRDD(s).algebird.aggregateByKey[K, T, U, V](agg).collect.toMap
implicit val sg = agg.semigroup
val algMap = MapAlgebra.sumByKey(s.map { case (k, t) => k -> agg.prepare(t) }).mapValues(agg.present)
s.map(_._1).toSet.foreach { k: K => assertEq(resMap.get(k), algMap.get(k)) }
}
def sumOption[T: ClassTag: Equiv: Semigroup](s: Seq[T]): Unit =
assertEq(sc.makeRDD(s).algebird.sumOption, Semigroup.sumOption(s))
def sumByKey[K: ClassTag, V: ClassTag: Semigroup: Equiv](s: Seq[(K, V)]): Unit = {
val resMap = sc.makeRDD(s).algebird.sumByKey[K, V].collect.toMap
val algMap = MapAlgebra.sumByKey(s)
s.map(_._1).toSet.foreach { k: K => assertEq(resMap.get(k), algMap.get(k)) }
}
/**
* These tests almost always timeout on Travis. Leaving the above to at least check compilation
*/
// test("aggregate") {
// aggregate(0 to 1000, AlgebirdAggregator.fromSemigroup[Int])
// aggregate(0 to 1000, AlgebirdAggregator.min[Int])
// aggregate(0 to 1000, AlgebirdAggregator.sortedTake[Int](3))
// }
// test("sumOption") {
// sumOption(0 to 1000)
// sumOption((0 to 1000).map(Min(_)))
// sumOption((0 to 1000).map(x => (x, x % 3)))
// }
// test("aggregateByKey") {
// aggregateByKey((0 to 1000).map(k => (k % 3, k)), AlgebirdAggregator.fromSemigroup[Int])
// aggregateByKey((0 to 1000).map(k => (k % 3, k)), AlgebirdAggregator.min[Int])
// aggregateByKey((0 to 1000).map(k => (k % 3, k)), AlgebirdAggregator.sortedTake[Int](3))
// }
// test("sumByKey") {
// sumByKey((0 to 1000).map(k => (k % 3, k)))
// sumByKey((0 to 1000).map(k => (k % 3, Option(k))))
// sumByKey((0 to 1000).map(k => (k % 3, Min(k))))
// }
}
| twitter/algebird | algebird-spark/src/test/scala/com/twitter/algebird/spark/AlgebirdRDDTests.scala | Scala | apache-2.0 | 3,410 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime.java8
@FunctionalInterface trait JFunction2$mcDID$sp extends Function2[Any, Any, Any] with Serializable {
def apply$mcDID$sp(v1: Int, v2: Double): Double
override def apply(v1: Any, v2: Any): Any = scala.runtime.BoxesRunTime.boxToDouble(apply$mcDID$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToDouble(v2)))
}
| lrytz/scala | src/library/scala/runtime/java8/JFunction2$mcDID$sp.scala | Scala | apache-2.0 | 661 |
package com.benjguin.nrt17.sparkjob1
import java.sql.Timestamp
import java.text.{DateFormat, SimpleDateFormat}
import collection.JavaConversions._
import org.apache.log4j.Logger
import org.apache.log4j.Level
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions._
import com.benjguin.nrt17.sparkjob1.proto.messages.TrackingPacket
import org.apache.spark.sql.Encoders
object Main {
private val logger = Logger.getLogger(this.getClass)
def main(args: Array[String]) {
Logger.getLogger("org").setLevel(Level.WARN)
Logger.getLogger("akka").setLevel(Level.WARN)
Logger.getLogger("kafka").setLevel(Level.WARN)
logger.setLevel(Level.INFO)
val sparkJob = new SparkJob()
try {
sparkJob.runJob()
} catch {
case ex: Exception =>
logger.error(ex.getMessage)
}
}
}
class SparkJob extends Serializable {
@transient lazy val logger = Logger.getLogger(this.getClass)
logger.setLevel(Level.INFO)
val sparkSession =
SparkSession.builder
.master("sparkm1")
.appName("nrt17sparkjob1")
.getOrCreate()
def runJob() = {
logger.info("Execution started with following configuration")
//implicit statisticsRecordEncoder = Encoders.product[StatisticsRecord]
val protobufDeserializerUDF = udf { bytes: Array[Byte] => TrackingPacket.parseFrom(bytes) }
import sparkSession.implicits._
val decodedRows = sparkSession.readStream
.format("kafka")
.option("subscribe", "inputtopic")
.option("kafka.bootstrap.servers", "ks1:9092,ks2:9092,ks3:9092")
.option("startingOffsets", "earliest")
.load()
.select(protobufDeserializerUDF($"value") as "decoded_value")
/* Notes on previous code block:
---------------------------------
.option("startingOffsets", "earliest")
can be replaced by
.option("startingOffsets", "latest")
for instance. cf https://databricks.com/blog/2017/04/26/processing-data-in-apache-kafka-with-structured-streaming-in-apache-spark-2-2.html
sources of inspiration:
- http://stackoverflow.com/questions/43934180/how-to-deserialize-records-from-kafka-using-structured-streaming-in-java
- https://databricks.com/blog/2017/04/26/processing-data-in-apache-kafka-with-structured-streaming-in-apache-spark-2-2.html
*/
val query = decodedRows
.writeStream
.format("kafka")
.option("kafka.bootstrap.servers", "ks1:9092,ks2:9092,ks3:9092")
.option("topic", "debugtopic")
.option("checkpointLocation", "/somehdfsfolder")
.start()
query.awaitTermination()
sparkSession.stop()
}
} | DXTEDStreamHack2017/nrt17 | containers/spark/master/code/src/main/scala/com/benjguin/nrt17/sparkjob1/Main.scala | Scala | apache-2.0 | 2,727 |
package service
import java.util.Date
import org.eclipse.jgit.api.Git
import util._
import _root_.util.ControlUtil._
import org.eclipse.jgit.treewalk.CanonicalTreeParser
import org.eclipse.jgit.lib._
import org.eclipse.jgit.dircache.DirCache
import org.eclipse.jgit.diff.{DiffEntry, DiffFormatter}
import java.io.ByteArrayInputStream
import org.eclipse.jgit.patch._
import org.eclipse.jgit.api.errors.PatchFormatException
import scala.collection.JavaConverters._
import service.RepositoryService.RepositoryInfo
object WikiService {
/**
* The model for wiki page.
*
* @param name the page name
* @param content the page content
* @param committer the last committer
* @param time the last modified time
* @param id the latest commit id
*/
case class WikiPageInfo(name: String, content: String, committer: String, time: Date, id: String)
/**
* The model for wiki page history.
*
* @param name the page name
* @param committer the committer the committer
* @param message the commit message
* @param date the commit date
*/
case class WikiPageHistoryInfo(name: String, committer: String, message: String, date: Date)
def httpUrl(repository: RepositoryInfo) = repository.httpUrl.replaceFirst("\\\\.git\\\\Z", ".wiki.git")
def sshUrl(repository: RepositoryInfo, settings: SystemSettingsService.SystemSettings, userName: String) =
repository.sshUrl(settings.sshPort.getOrElse(SystemSettingsService.DefaultSshPort), userName).replaceFirst("\\\\.git\\\\Z", ".wiki.git")
}
trait WikiService {
import WikiService._
def createWikiRepository(loginAccount: model.Account, owner: String, repository: String): Unit =
LockUtil.lock(s"${owner}/${repository}/wiki"){
defining(Directory.getWikiRepositoryDir(owner, repository)){ dir =>
if(!dir.exists){
JGitUtil.initRepository(dir)
saveWikiPage(owner, repository, "Home", "Home", s"Welcome to the ${repository} wiki!!", loginAccount, "Initial Commit", None)
}
}
}
/**
* Returns the wiki page.
*/
def getWikiPage(owner: String, repository: String, pageName: String): Option[WikiPageInfo] = {
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
if(!JGitUtil.isEmpty(git)){
JGitUtil.getFileList(git, "master", ".").find(_.name == pageName + ".md").map { file =>
WikiPageInfo(file.name, StringUtil.convertFromByteArray(git.getRepository.open(file.id).getBytes),
file.committer, file.time, file.commitId)
}
} else None
}
}
/**
* Returns the content of the specified file.
*/
def getFileContent(owner: String, repository: String, path: String): Option[Array[Byte]] =
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
if(!JGitUtil.isEmpty(git)){
val index = path.lastIndexOf('/')
val parentPath = if(index < 0) "." else path.substring(0, index)
val fileName = if(index < 0) path else path.substring(index + 1)
JGitUtil.getFileList(git, "master", parentPath).find(_.name == fileName).map { file =>
git.getRepository.open(file.id).getBytes
}
} else None
}
/**
* Returns the list of wiki page names.
*/
def getWikiPageList(owner: String, repository: String): List[String] = {
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
JGitUtil.getFileList(git, "master", ".")
.filter(_.name.endsWith(".md"))
.map(_.name.stripSuffix(".md"))
.sortBy(x => x)
}
}
/**
* Reverts specified changes.
*/
def revertWikiPage(owner: String, repository: String, from: String, to: String,
committer: model.Account, pageName: Option[String]): Boolean = {
case class RevertInfo(operation: String, filePath: String, source: String)
try {
LockUtil.lock(s"${owner}/${repository}/wiki"){
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
val reader = git.getRepository.newObjectReader
val oldTreeIter = new CanonicalTreeParser
oldTreeIter.reset(reader, git.getRepository.resolve(from + "^{tree}"))
val newTreeIter = new CanonicalTreeParser
newTreeIter.reset(reader, git.getRepository.resolve(to + "^{tree}"))
val diffs = git.diff.setNewTree(oldTreeIter).setOldTree(newTreeIter).call.asScala.filter { diff =>
pageName match {
case Some(x) => diff.getNewPath == x + ".md"
case None => true
}
}
val patch = using(new java.io.ByteArrayOutputStream()){ out =>
val formatter = new DiffFormatter(out)
formatter.setRepository(git.getRepository)
formatter.format(diffs.asJava)
new String(out.toByteArray, "UTF-8")
}
val p = new Patch()
p.parse(new ByteArrayInputStream(patch.getBytes("UTF-8")))
if(!p.getErrors.isEmpty){
throw new PatchFormatException(p.getErrors())
}
val revertInfo = (p.getFiles.asScala.map { fh =>
fh.getChangeType match {
case DiffEntry.ChangeType.MODIFY => {
val source = getWikiPage(owner, repository, fh.getNewPath.stripSuffix(".md")).map(_.content).getOrElse("")
val applied = PatchUtil.apply(source, patch, fh)
if(applied != null){
Seq(RevertInfo("ADD", fh.getNewPath, applied))
} else Nil
}
case DiffEntry.ChangeType.ADD => {
val applied = PatchUtil.apply("", patch, fh)
if(applied != null){
Seq(RevertInfo("ADD", fh.getNewPath, applied))
} else Nil
}
case DiffEntry.ChangeType.DELETE => {
Seq(RevertInfo("DELETE", fh.getNewPath, ""))
}
case DiffEntry.ChangeType.RENAME => {
val applied = PatchUtil.apply("", patch, fh)
if(applied != null){
Seq(RevertInfo("DELETE", fh.getOldPath, ""), RevertInfo("ADD", fh.getNewPath, applied))
} else {
Seq(RevertInfo("DELETE", fh.getOldPath, ""))
}
}
case _ => Nil
}
}).flatten
if(revertInfo.nonEmpty){
val builder = DirCache.newInCore.builder()
val inserter = git.getRepository.newObjectInserter()
val headId = git.getRepository.resolve(Constants.HEAD + "^{commit}")
JGitUtil.processTree(git, headId){ (path, tree) =>
if(revertInfo.find(x => x.filePath == path).isEmpty){
builder.add(JGitUtil.createDirCacheEntry(path, tree.getEntryFileMode, tree.getEntryObjectId))
}
}
revertInfo.filter(_.operation == "ADD").foreach { x =>
builder.add(JGitUtil.createDirCacheEntry(x.filePath, FileMode.REGULAR_FILE, inserter.insert(Constants.OBJ_BLOB, x.source.getBytes("UTF-8"))))
}
builder.finish()
JGitUtil.createNewCommit(git, inserter, headId, builder.getDirCache.writeTree(inserter), committer.fullName, committer.mailAddress,
pageName match {
case Some(x) => s"Revert ${from} ... ${to} on ${x}"
case None => s"Revert ${from} ... ${to}"
})
}
}
}
true
} catch {
case e: Exception => {
e.printStackTrace()
false
}
}
}
/**
* Save the wiki page.
*/
def saveWikiPage(owner: String, repository: String, currentPageName: String, newPageName: String,
content: String, committer: model.Account, message: String, currentId: Option[String]): Option[String] = {
LockUtil.lock(s"${owner}/${repository}/wiki"){
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
val builder = DirCache.newInCore.builder()
val inserter = git.getRepository.newObjectInserter()
val headId = git.getRepository.resolve(Constants.HEAD + "^{commit}")
var created = true
var updated = false
var removed = false
if(headId != null){
JGitUtil.processTree(git, headId){ (path, tree) =>
if(path == currentPageName + ".md" && currentPageName != newPageName){
removed = true
} else if(path != newPageName + ".md"){
builder.add(JGitUtil.createDirCacheEntry(path, tree.getEntryFileMode, tree.getEntryObjectId))
} else {
created = false
updated = JGitUtil.getContentFromId(git, tree.getEntryObjectId, true).map(new String(_, "UTF-8") != content).getOrElse(false)
}
}
}
if(created || updated || removed){
builder.add(JGitUtil.createDirCacheEntry(newPageName + ".md", FileMode.REGULAR_FILE, inserter.insert(Constants.OBJ_BLOB, content.getBytes("UTF-8"))))
builder.finish()
val newHeadId = JGitUtil.createNewCommit(git, inserter, headId, builder.getDirCache.writeTree(inserter), committer.fullName, committer.mailAddress,
if(message.trim.length == 0) {
if(removed){
s"Rename ${currentPageName} to ${newPageName}"
} else if(created){
s"Created ${newPageName}"
} else {
s"Updated ${newPageName}"
}
} else {
message
})
Some(newHeadId.getName)
} else None
}
}
}
/**
* Delete the wiki page.
*/
def deleteWikiPage(owner: String, repository: String, pageName: String,
committer: String, mailAddress: String, message: String): Unit = {
LockUtil.lock(s"${owner}/${repository}/wiki"){
using(Git.open(Directory.getWikiRepositoryDir(owner, repository))){ git =>
val builder = DirCache.newInCore.builder()
val inserter = git.getRepository.newObjectInserter()
val headId = git.getRepository.resolve(Constants.HEAD + "^{commit}")
var removed = false
JGitUtil.processTree(git, headId){ (path, tree) =>
if(path != pageName + ".md"){
builder.add(JGitUtil.createDirCacheEntry(path, tree.getEntryFileMode, tree.getEntryObjectId))
} else {
removed = true
}
}
if(removed){
builder.finish()
JGitUtil.createNewCommit(git, inserter, headId, builder.getDirCache.writeTree(inserter), committer, mailAddress, message)
}
}
}
}
}
| campolake/gitbucketV2.1 | src/main/scala/service/WikiService.scala | Scala | apache-2.0 | 10,752 |
package tests
object Global {
// This works for a solver binary in the path; if you want to point to a
// specific file on a disk you can provide here a path to it.
val solverPath: String = "z3"
val solverType: String = "z3" // Possible values: z3, cvc4, other
val solverArgs: Option[String] = None
val moreSolverArgs: String = "" // smt.string_solver=z3str3
val solverInteractive: String = "true"
def solverConfig: String = {
s" --solverPath ${Global.solverPath} " +
s" --solverType ${Global.solverType} " +
s" --solverInteractive ${Global.solverInteractive}" +
(if (solverArgs.isDefined) s" --solverArgs ${Global.solverArgs} " else "") +
(if (Global.moreSolverArgs != "") s" --moreSolverArgs ${Global.moreSolverArgs} " else "")
}
val specFirstname: String =
"""(set-logic SLIA)
|(synth-fun f ((name String)) String ((Start String (name))))
|
|;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|(define-fun ithSplit ((s String) (delimiter String) (i Int)) String
| (let ((firstSpacePos Int (str.indexof s delimiter 0)))
| (let ((SecondSpacePos Int (str.indexof s delimiter (+ firstSpacePos 1))))
| (ite (= i 0)
| (ite (= firstSpacePos (- 1))
| s ; Return the whole string, there was no space
| (str.substr s 0 firstSpacePos))
| (ite (= i 1)
| (ite (= firstSpacePos (- 1))
| "" ; There was no space, so index 1 is out of bounds
| (ite (= SecondSpacePos (- 1))
| (str.substr s (+ firstSpacePos 1) (str.len s)) ; till the end of the String
| (str.substr s (+ firstSpacePos 1) (- (- SecondSpacePos 1) firstSpacePos)) ; to the next space; second arg of str.substr is shift, not position
| )
| )
| "" ; Unhandled values of i
| )
| )
| )
| )
|)
|
|;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|; (constraint (= (f "Nancy FreeHafer") "Nancy"))
|(declare-var s String)
|(constraint (=> (and (distinct (str.indexof s " " 0) (- 1)) (>= (str.len s) 3) (distinct (str.at s 0) " ") (distinct (str.at s (- (str.len s) 1)) " "))
| (= (f s) (ithSplit s " " 0) )))
|(check-synth)""".stripMargin
}
| kkrawiec/CDGP | src/test/scala/Global.scala | Scala | mit | 2,517 |
Subsets and Splits
Filtered Scala Code Snippets
The query filters and retrieves a sample of code snippets that meet specific criteria, providing a basic overview of the dataset's content without revealing deeper insights.