code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package looty.model import scala.scalajs.js ////////////////////////////////////////////////////////////// // Copyright (c) 2013 Ben Jackman, Jeff Gomberg // All Rights Reserved // please contact ben@jackman.biz or jeff@cgtanalytics.com // for licensing inquiries // Created by bjackman @ 12/14/13 6:22 PM ////////////////////////////////////////////////////////////// object HighScorer { private var _all = new js.Array[HighScorer]() locally { def add(s: HighScorer) { _all.push(s) } def scorer(reason: String)(f: ComputedItem => Option[HighScore]) = { add { new HighScorer { def apply(i: ComputedItem) = f(i) } } } def fscore(reason: String)(fs: (ComputedItem => Boolean)*)(score: Int) { scorer(reason) { i => if (fs.forall(f => f(i))) Some(HighScore(List(reason), score)) else None } } def fscore1(reason: String)(fs: (ComputedItem => Boolean)*) { fscore(reason)(fs: _*)(1) } def fscore100(reason: String)(fs: (ComputedItem => Boolean)*) { fscore(reason)(fs: _*)(100) } //Based on https://www.reddit.com/r/pathofexile/comments/56rtmk/the_is_this_item_worth_something_guide/ //Might also implement some rules from https://www.reddit.com/r/pathofexile/comments/56mbwv/how_to_identify_a_good_weapon_new_player_needs/d8klcy0/ //Body Armour //75+ Life (if armour or evasion base) fscore1("75+ max life")(_.slots.isChest, _.plusTo.lifeAndManaWithStrInt.life >= 75, _.properties.energyShield == 0) //575+ Total Energy Shield fscore1("575+ energy shield")(_.slots.isChest, _.properties.energyShield >= 575) //350+ Total Energy Shield AND 70+ Life (if energy shield base or energy shield hybrid) fscore1("350+ ES, 70+ max life")(_.slots.isChest, _.properties.energyShield >= 350, _.plusTo.lifeAndManaWithStrInt.life >= 70) fscore1("40+ STR armour base")(_.slots.isChest, _.properties.armour > 0, _.properties.evasionRating == 0, _.properties.energyShield == 0, _.plusTo.attribute.strength >= 40) fscore1("40+ INT ES base")(_.slots.isChest, _.properties.armour == 0, _.properties.evasionRating == 0, _.properties.energyShield > 0, _.plusTo.attribute.intelligence >= 40) fscore1("80+ total res")(_.slots.isChest, _.plusTo.totalResistance >= 80) //Helmet fscore1("65+ max life")(_.slots.isHelmet, _.plusTo.lifeAndManaWithStrInt.life >= 65, _.properties.energyShield == 0) fscore1("300+ energy shield")(_.slots.isHelmet, _.properties.energyShield >= 300) fscore1("200+ ES, 60+ max life")(_.slots.isHelmet, _.properties.energyShield >= 200, _.plusTo.lifeAndManaWithStrInt.life >= 65) //40+ Intelligence on Armour or Evasion base fscore1("40+ INT armour or evasion base")(i => i.slots.isHelmet && (i.properties.armour > 0 || i.properties.evasionRating > 0) && i.properties.energyShield == 0 && i.plusTo.attribute.intelligence >= 40) //300+ Accuracy fscore1("300+ accuracy total")(_.slots.isHelmet, _.plusTo.accuracyRatingWithDex >= 300) //80+ total resistances fscore1("80+ total res")(_.slots.isHelmet, _.plusTo.totalResistance >= 80) //Boots //20+ % Movement Speed (Required or they are very hard to sell) fscore1("20+ movement speed")(_.slots.isBoots, _.increased.movementSpeed >= 20) //65+ Life (if armour or evasion base, if no life it's 90% likely to be vendor trash) fscore1("65+ max life")(_.slots.isBoots, _.plusTo.lifeAndManaWithStrInt.life >= 65, _.properties.energyShield == 0) //130+ Total Energy Shield fscore1("130+ energy shield")(_.slots.isBoots, _.properties.energyShield >= 130) //90+ Total Energy Shield AND 65+ Life (if energy shield base or energy shield hybrid) fscore1("90+ ES, 65+ max life")(_.slots.isBoots, _.properties.energyShield >= 90, _.plusTo.lifeAndManaWithStrInt.life >= 65) //70+% Total Resistance fscore1("70+ total res")(_.slots.isBoots, _.plusTo.totalResistance >= 70) //40+ Strength fscore1("40+ STR")(_.slots.isBoots, _.plusTo.attribute.strength >= 40) //40+ Intelligence fscore1("40+ INT")(_.slots.isBoots, _.plusTo.attribute.intelligence >= 40) //Gloves //65+ Life (if armour or evasion base) fscore1("65+ max life")(_.slots.isGloves, _.plusTo.lifeAndManaWithStrInt.life >= 65, _.properties.energyShield == 0) //150+ Total Energy Shield fscore1("150+ energy shield")(_.slots.isGloves, _.properties.energyShield >= 150) //100+ Total Energy Shield AND 65+ Life (if energy shield base or energy shield hybrid) fscore1("100+ ES, 65+ max life")(_.slots.isGloves, _.properties.energyShield >= 100, _.plusTo.lifeAndManaWithStrInt.life >= 65) //80+% Total Resistance fscore1("80+ total res")(_.slots.isGloves, _.plusTo.totalResistance >= 80) //300+ Accuracy fscore1("300+ accuracy total")(_.slots.isGloves, _.plusTo.accuracyRatingWithDex >= 300) //10+% Attack speed fscore1("10+% attack speed")(_.slots.isGloves, _.increased.attackSpeed >= 10) //40+ Dexterity (if Energy Shield or Armour gloves) fscore1("40+ dex armour or ES base")(i => i.slots.isGloves && (i.properties.armour > 0 || i.properties.energyShield > 0) && i.plusTo.attribute.dexterity >= 40) //Shield //80+ Life (if armor or evasion base) fscore1("80+ life armour or evasion base")(i => i.slots.isShield && (i.properties.armour > 0 || i.properties.evasionRating > 0) && i.properties.energyShield == 0 && i.plusTo.lifeAndManaWithStrInt.life >= 80) //350+ Total Energy Shield fscore1("350+ energy shield")(_.slots.isShield, _.properties.energyShield >= 350) //280+ Total Energy Shield AND 80+ Life (if energy shield base or energy shield hybrid) fscore1("280+ ES, 65+ max life")(_.slots.isShield, _.properties.energyShield >= 280, _.plusTo.lifeAndManaWithStrInt.life >= 65) //100+% Total Resistance fscore1("100+ total res")(_.slots.isShield, _.plusTo.totalResistance >= 100) //35+ Strength (if armour base) fscore1("35+ str armour base")(_.slots.isShield, _.properties.armour > 0, _.plusTo.attribute.strength >= 35) //35+ Intelligence (if energy shield base) fscore1("35+ int ES base")(_.slots.isShield, _.properties.energyShield > 0, _.plusTo.attribute.intelligence >= 35) //55+% Spell Damage fscore1("55%+ Increased Spell Dmg")(_.slots.isShield, _.increased.spellDamage >= 55) //80+% Spell Critical Strike Chance fscore1("80%+ Increased Spell Crit chance")(_.slots.isShield, _.increased.criticalStrikeChanceForSpells >= 80) /* //Sword / Axe / Mace / bow if (i.slots.isWeapon && (i.properties.weaponType.isSword || i.properties.weaponType.isAxe || (i.properties.weaponType.isMace && i.increased.elementalDamage == 0) || i.properties.weaponType.isBow)) { //170+% Physical Damage fscore1("170+ Increased Phys damage")(_.increased.damage.physical >= 170) //20+% Attack Speed fscore1("Increased Attack Speed")(_.increased.attackSpeed > 20) //flat damage if (i.slots.is1H) { //xx to 70+ Fire/Cold or 120+ Lightning Damage to Attacks (needs at least 2) //elemental (any 2 of 3 elements) fscore1("elemental damage 1h"){ i => (i.damages.fire.max >= 70 && i.damages.cold.max >= 70) || (i.damages.fire.max >= 70 && i.damages.lightning.max >= 120) || (i.damages.cold.max >= 70 && i.damages.lightning.max >= 120) } // elemental (1 point more for high all ele) fscore1("tri-ele damage 1h")(i => (i.damages.fire.max >= 70 && i.damages.cold.max >= 70 && i.damages.lightning.max >= 120)) //physical //xx-33 Physical Damage fscore1("additional phys damage 1h")(_.damages.physical.max >= 33) } //flat damage if (i.slots.is2H) { //xx to 100+ Fire/Cold or 190+ Lightning Damage to Attacks (if two-handed/bow, needs at least 2) //elemental (any 2 of 3 elements) fscore1("elemental damage 2h"){ i => (i.damages.fire.max >= 100 && i.damages.cold.max >= 100) || (i.damages.fire.max >= 100 && i.damages.lightning.max >= 190) || (i.damages.cold.max >= 100 && i.damages.lightning.max >= 190) } // elemental (1 point more for high all ele) fscore1("tri-ele damage 2h")(i => (i.damages.fire.max >= 70 && i.damages.cold.max >= 70 && i.damages.lightning.max >= 120)) //physical //xx-50 Physical Damage fscore1("additional phys damage 2h")(_.damages.physical.max >= 50) } if (i.properties.weaponType.isBow) { //30+% Critical Strike Chance (if Bow) fscore1("30+% Critical Strike Chance")(_.increased.criticalStrikeChance >= 30) //30+% Critical Strike Multiplier (if Bow) fscore1("30+% Critical Strike Multi")(_.increased.globalCriticalStrikeMultiplier >= 30) //+2 to Total Socketed bow gems fscore1("+2 to bow gems")(_.gemLevel.bow >= 2) } } */ /* //Dagger / Wands / Sceptre if (i => i.slots.isWeapon && (i.properties.weaponType.isDagger || i.properties.weaponType.isWand || (i.properties.weaponType.isMace && i.increased.elementalDamage > 0) || i.properties.weaponType.isBow)) { -- Caster Dagger/Wand/Sceptre 90%+ Total Elemental Spell Damage 130%+ Total Spell Critical strike Chance xx to 50+ Fire/Cold or 90+ Lightning Damage to Spells 30+% Critical Strike Multiplier -- Attack Dagger/Wand 170+% Physical Damage xx-33+ Physical Damage 20+% Attack Speed (if Dagger) 10+% Attack Speed (if Wand) 30+% Critical Strike Chance 30+% Critical Strike Multiplier xx to 70+ Fire/Cold or 120+ Lightning Damage to Attacks } */ // Dagger (Spell) //90%+ Total Elemental Spell Damage fscore1("90%+ Total Elemental Spell Damage")(_.slots.isWeapon, _.properties.weaponType.isDagger, _.increasedSpell.elemental >= 90) //130%+ Total Spell Critical strike Chance fscore1("130%+ Increased Spell Crit chance"){ i => i.slots.isWeapon && i.properties.weaponType.isDagger && i.increased.globalCriticalStrikeChance + i.increased.criticalStrikeChanceForSpells >= 130 } //xx to 50+ Fire/Cold or 90+ Lightning Damage to Spells fscore1("50+ Fire or Cold dmg to Spells"){ i => i.slots.isWeapon && i.properties.weaponType.isDagger && ( i.addDamagesToSpells.fire.max >= 50 || i.addDamagesToSpells.cold.max >= 50 )} fscore1("90+ Lightning dmg to Spells")(_.slots.isWeapon, _.properties.weaponType.isDagger, _.addDamagesToSpells.lightning.max >= 90) //30+% Critical Strike Multiplier fscore1("30%+ Increased Crit Multi")(_.slots.isWeapon, _.properties.weaponType.isDagger, _.increased.globalCriticalStrikeMultiplier >= 30) // Wands (Spell) fscore1("90%+ Total Elemental Spell Damage")(_.slots.isWeapon, _.properties.weaponType.isWand, _.increasedSpell.elemental >= 90) //130%+ Total Spell Critical strike Chance fscore1("130%+ Increased Spell Crit chance"){ i => i.slots.isWeapon && i.properties.weaponType.isWand && i.increased.globalCriticalStrikeChance + i.increased.criticalStrikeChanceForSpells >= 130 } //xx to 50+ Fire/Cold or 90+ Lightning Damage to Spells fscore1("50+ Fire or Cold dmg to Spells"){ i => i.slots.isWeapon && i.properties.weaponType.isWand && ( i.addDamagesToSpells.fire.max >= 50 || i.addDamagesToSpells.cold.max >= 50 )} fscore1("90+ Lightning dmg to Spells")(_.slots.isWeapon, _.properties.weaponType.isWand, _.addDamagesToSpells.lightning.max >= 90) //30+% Critical Strike Multiplier fscore1("30%+ Increased Crit Multi")(_.slots.isWeapon, _.properties.weaponType.isWand, _.increased.globalCriticalStrikeMultiplier >= 30) // Sceptres (Spell) fscore1("90%+ Total Elemental Spell Damage")(_.slots.isWeapon, _.properties.weaponType.isMace, _.increased.elementalDamage > 0, _.increasedSpell.elemental >= 90) //130%+ Total Spell Critical strike Chance fscore1("130%+ Increased Spell Crit chance"){ i => i.slots.isWeapon && i.properties.weaponType.isMace && i.increased.elementalDamage > 0 && i.increased.globalCriticalStrikeChance + i.increased.criticalStrikeChanceForSpells >= 130 } //xx to 50+ Fire/Cold or 90+ Lightning Damage to Spells fscore1("50+ Fire or Cold dmg to Spells"){ i => i.slots.isWeapon && i.properties.weaponType.isMace && i.increased.elementalDamage > 0 && ( i.addDamagesToSpells.fire.max >= 50 || i.addDamagesToSpells.cold.max >= 50 )} fscore1("90+ Lightning dmg to Spells")(_.slots.isWeapon, _.properties.weaponType.isMace, _.increased.elementalDamage > 0, _.addDamagesToSpells.lightning.max >= 90) //30+% Critical Strike Multiplier fscore1("30%+ Increased Crit Multi")(_.slots.isWeapon, _.properties.weaponType.isMace, _.increased.elementalDamage > 0, _.increased.globalCriticalStrikeMultiplier >= 30) // Dagger (Attack) // Wands (Attack) /* //Staff if (_.slots.isWeapon, _.properties.weaponType.isStaff) { +1 to Socketed gems AND +2 to Socketed (ele) gems xx to 70+ Fire/Cold or 150+ Lightning Damage to Spells 160+% Total Elemental Spell Damage } */ /*Jewel: % Life % Energy Shield % Cast Speed % Critical Strike Multiplier 2 or more Attack Speed compatible rolls 2 or more compatible Damage rolls*/ //Belt //70+ Life (if armour or evasion base) fscore1("70+ max life")(_.slots.isBelt, _.plusTo.lifeAndManaWithStrInt.life >= 70, _.properties.energyShield == 0) //35+ Strength fscore1("35+ str on belt")(_.slots.isBelt, _.plusTo.attribute.strength >= 35) //280+ Armour fscore1("280+ armour belt")(_.slots.isBelt, _.properties.armour >= 280) //45+ Energy Shield fscore1("45+ energy shield")(_.slots.isBelt, _.properties.energyShield >= 45) //30+ Energy Shield, 40+ Life fscore1("30 ES 40 life hybrid belt")(_.slots.isBelt, _.properties.energyShield >= 30, _.plusTo.lifeAndManaWithStrInt.life >= 40) //70+% Total Resistance fscore1("70+ total res")(_.slots.isBelt, _.plusTo.totalResistance >= 70) //30+% Weapon Elemental Damage fscore1("+Weapon Elemental Damage")(_.slots.isBelt, _.increased.elementalDamageWithWeapons >= 30) //% Reduced Flask Charges Used fscore1("Reduced Flask Charges Used")(_.slots.isBelt, _.flask.reduced.flaskChargesUsed > 0) //% Increased Flask Charges Gained fscore1("Increased Flask Charges Gained")(_.slots.isBelt, _.flask.increased.chargesGained > 0) //% Flask Effect Duration fscore1("Increased Flask Effect Duration")(_.slots.isBelt, _.flask.increased.effectDuration > 0) //Ring //55+ Life fscore1("55+ max life")(_.slots.isRing, _.plusTo.lifeAndManaWithStrInt.life >= 55) //50+ Energy Shield (if Moonstone Ring base) fscore1("50+ energy shield")(_.slots.isRing, _.properties.energyShield >= 50) //xx-11+ Physical Damage to Attacks fscore1("xx-11+ Physical Damage to Attacks")(_.slots.isRing, _.damages.physical.max >= 11) //30+% Weapon Elemental Damage fscore1("+Weapon Elemental Damage")(_.slots.isRing, _.increased.elementalDamageWithWeapons >= 30) //40+% Increased Rarity fscore1("40+% Increased Rarity")(_.slots.isRing, _.increased.rarityOfItemsFound >= 40) //80+% Total Resistance fscore1("80+ total res")(_.slots.isRing, _.plusTo.totalResistance >= 80) //50+% Mana Regeneration fscore1("50+% Mana Regeneration")(_.slots.isRing, _.increased.manaRegenerationRate >= 50) //250+ Accuracy Rating fscore1("250+ accuracy total")(_.slots.isRing, _.plusTo.accuracyRatingWithDex >= 250) //75+ Total Attributes fscore1("75+ Total Attributes")(_.slots.isRing, _.plusTo.attribute.all.exists(_ >= 80)) //Amulet //55+ Life fscore1("55+ max life")(_.slots.isAmulet, _.plusTo.lifeAndManaWithStrInt.life >= 55) //xx-11 Physical Damage to Attacks fscore1("xx-11+ Physical Damage to Attacks")(_.slots.isAmulet, _.damages.physical.max >= 11) //30+% Weapon Elemental Damage fscore1("+Weapon Elemental Damage")(_.slots.isAmulet, _.increased.elementalDamageWithWeapons >= 30) //40+% Increased Rarity fscore1("40+% Increased Rarity")(_.slots.isAmulet, _.increased.rarityOfItemsFound >= 40) //90+% Total Resistance fscore1("90+ total res")(_.slots.isAmulet, _.plusTo.totalResistance >= 90) //65+% Mana Regeneration fscore1("65+% Mana Regeneration")(_.slots.isAmulet, _.increased.manaRegenerationRate >= 65) //250+ Accuracy Rating fscore1("250+ accuracy total")(_.slots.isAmulet, _.plusTo.accuracyRatingWithDex >= 250) //70+ Total of any Attribute fscore1("70+ any attribute"){i => i.slots.isAmulet && (i.plusTo.attribute.strength >= 70 || i.plusTo.attribute.intelligence >= 70 || i.plusTo.attribute.dexterity >= 70 )} //30+% Critical Strike Multiplier fscore1("30+% Critical Strike Multi")(_.slots.isAmulet, _.increased.globalCriticalStrikeMultiplier >= 30) //30+% Critical Strike Chance fscore1("30+% Critical Strike Chance")(_.slots.isAmulet, _.increased.criticalStrikeChance >= 30) //30+% Total Elemental Spell Damage //15+% Energy Shield fscore1("15+% Energy Shield")(_.slots.isAmulet, _.increased.energyShield >= 15) /* //Quiver if (_.slots.isQuiver) { 75+ Life 30+% Weapon Elemental Damage 30+% Critical Strike Multiplier 30+% Critical Strike Chance 70+% Total Resistance }*/ //5L+ are an auto +100, implying "never vendor these." fscore100("5+ Linked Sockets")(_.maxLinks > 4) fscore1("4 Linked Sockets or 5+ sockets")(i => i.maxLinks == 4 || i.item.sockets.toOption.exists(_.size >= 5)) /* fscore1("60+ max Life")(_.plusTo.lifeAndManaWithStrInt.life >= 60) fscore1("60+ max Mana")(_.plusTo.lifeAndManaWithStrInt.mana >= 40) fscore1("60%+ armour")(_.increased.armour >= 60) fscore1("60%+ evasion")(_.increased.evasion >= 60) fscore1("60%+ energyShield")(_.increased.energyShield >= 60) fscore1("200+ energy shield")(_.properties.energyShield >= 200) fscore1("1000+ armour + evasion")(i => i.properties.evasionRating + i.properties.armour >= 1000) fscore1("200+ armour belt")(_.slots.isBelt, _.plusTo.armour >= 200) fscore1("8%+ Attack speed Non-Weapon")(!_.slots.isWeapon, _.increased.attackSpeed >= 8) fscore1("Fire Dmg Non-Weapon")(!_.slots.isWeapon, _.damages.fire.max >= 20) fscore1("Cold Dmg Non-Weapon")(!_.slots.isWeapon, _.damages.cold.max >= 16) fscore1("Lit Dmg Non-Weapon")(!_.slots.isWeapon, _.damages.lightning.max >= 30) fscore1("Phys Dmg Non-Weapon")(!_.slots.isWeapon, _.damages.physical.max >= 15) fscore1("Adds Speed")(_.increased.movementSpeed >= 20) fscore1("+Weapon Elemental Damage")(!_.slots.isWeapon, _.increased.elementalDamageWithWeapons >= 15) fscore1("Good Resists") { i => i.plusTo.resistance.all.exists(_ > 35) || i.plusTo.resistance.all.count(_ > 20) >= 2 || i.plusTo.resistance.all.count(_ > 10) >= 3 } fscore1("Projectile Speed")(_.increased.projectileSpeed >= 20) fscore1("+2+ for gems")(_.gemLevel.max >= 2) fscore1("20%+ Increased Spell Damage")(!_.slots.isWeapon, !_.slots.isSpiritShield, _.increased.spellDamage >= 20) fscore1("30%+ Increased Spell Damage (Spirit Shield)")( !_.slots.isWeapon, _.slots.isSpiritShield, _.increased.spellDamage >= 30) fscore1("Good Attributes") { i => i.plusTo.attribute.all.exists(_ >= 30) || i.plusTo.attribute.all.count(_ >= 25) >= 2 || i.plusTo.attribute.all.count(_ >= 20) >= 3 } fscore1("15%+ to IIQ")(_.increased.quantityOfItemsFound >= 15) fscore1("15%+ to IIR")(_.increased.rarityOfItemsFound >= 15) fscore1("1H Fire")(_.slots.is1H, _.damages.fire.max > 30) fscore1("1H Cold")(_.slots.is1H, _.damages.cold.max > 25) fscore1("1H Lit")(_.slots.is1H, _.damages.lightning.max > 50) fscore1("2H Fire")(_.slots.is2H, _.damages.fire.max > 45) fscore1("2H Cold")(_.slots.is2H, _.damages.cold.max > 35) fscore1("2H Lit")(_.slots.is2H, _.damages.lightning.max > 75) fscore1("Increased Attack Speed")(_.increased.attackSpeed > 15) fscore1("1H Increased Phys Dmg")(_.slots.is1H, _.increased.damage.physical >= 75) fscore1("2H Increased Phys Dmg")(_.slots.is2H, _.increased.damage.physical >= 140) fscore1("60%+ Increased Spell Crit chance")(_.increased.criticalStrikeChanceForSpells >= 60) fscore1("60%+ Increased Crit chance")(_.increased.criticalStrikeChance >= 60) fscore1("1H 30%+ Increased Spell Dmg")(_.increased.spellDamage >= 30) fscore1("2H 50%+ Increased Spell Dmg")(_.increased.spellDamage >= 50) fscore1("Life Leech")(_.leech.physical.life > 0) fscore1("Mana Leech")(_.leech.physical.mana > 0) fscore("1H DPS")(_.slots.is1H, _.total.dps >= 250)(10) fscore("2H DPS")(_.slots.is2H, _.total.dps >= 375)(10) */ } def apply(i: ComputedItem): Option[HighScore] = { all.map(_(i)).flatten.reduceOption(_ + _) } val all = _all.toList } case class HighScore(reason: List[String], score: Double) { def +(that: HighScore) = HighScore(this.reason ::: that.reason, this.score + that.score) } trait HighScorer { def apply(i: ComputedItem): Option[HighScore] }
mihailim/looty
looty/src/main/scala/looty/model/HighScorer.scala
Scala
gpl-2.0
21,441
package com.sfxcode.sapphire.core.demo.issues.deltaspike import org.apache.deltaspike.cdise.api.CdiContainerLoader object DeltaspikeLauncher { private var initialized = false def init() { if (!initialized) { val container = CdiContainerLoader.getCdiContainer container.boot() initialized = true } } def shutdown() { if (initialized) { val container = CdiContainerLoader.getCdiContainer container.shutdown() initialized = false } } def isInitialized: Boolean = initialized }
sfxcode/sapphire-core
demos/issues/src/main/scala/com/sfxcode/sapphire/core/demo/issues/deltaspike/DeltaspikeLauncher.scala
Scala
apache-2.0
544
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.marshallers.json import org.scalatest.{FlatSpec, Matchers} import org.squbs.marshallers.json.TestData._ import scala.collection.immutable import scala.reflect.ClassTag import scala.reflect.runtime.universe._ class ReflectHelperSpec extends FlatSpec with Matchers{ it should "determine an object is Scala or Java" in { ReflectHelper.isJavaClass(fullTeamWithPrivateMembers) shouldBe true ReflectHelper.isJavaClass(fullTeam) shouldBe false } it should "determine a class is Scala or Java" in { ReflectHelper.isJavaClass(classOf[TeamWithPrivateMembers]) shouldBe true ReflectHelper.isJavaClass(classOf[Team]) shouldBe false } it should "convert TypeTag to Manifest for any type" in { def assertTypeTagToManifestConversion[T](implicit typeTag: TypeTag[T], manifest: Manifest[T]) = ReflectHelper.toManifest[T] shouldBe manifest assertTypeTagToManifestConversion[Team] assertTypeTagToManifestConversion[List[Employee]] assertTypeTagToManifestConversion[Map[String, Seq[Employee]]] } it should "find the right class given a type and TypeTag, with erasure" in { ReflectHelper.toClass[Team] shouldBe classOf[Team] ReflectHelper.toClass[immutable.Seq[Employee]] shouldBe classOf[immutable.Seq[_]] } it should "convert TypeTag to ClassTag for any type, with erasure" in { ReflectHelper.toClassTag[Team] shouldBe ClassTag[Team](classOf[Team]) ReflectHelper.toClassTag[immutable.Seq[Employee]] shouldBe ClassTag[immutable.Seq[_]](classOf[immutable.Seq[_]]) } }
SarathChandran/squbs
squbs-ext/src/test/scala/org/squbs/marshallers/json/ReflectHelperSpec.scala
Scala
apache-2.0
2,145
/* * Copyright 2014–2020 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.std import slamdata.Predef._ import quasar._ import quasar.common.data.Data import quasar.ArgumentError._ import quasar.fp.ski._ import qdata.time.{DateTimeInterval, OffsetDate => QOffsetDate} import java.time.{ LocalDate => JLocalDate, LocalDateTime => JLocalDateTime, LocalTime => JLocalTime, OffsetDateTime => JOffsetDateTime, OffsetTime => JOffsetTime, } import scalaz._ import scalaz.syntax.either._ trait DateLib extends Library with Serializable { def parseOffsetDateTime(str: String): ArgumentError \\/ Data.OffsetDateTime = \\/.fromTryCatchNonFatal(JOffsetDateTime.parse(str)).bimap( κ(temporalFormatError(OffsetDateTime, str, None)), Data.OffsetDateTime.apply) def parseOffsetTime(str: String): ArgumentError \\/ Data.OffsetTime = \\/.fromTryCatchNonFatal(JOffsetTime.parse(str)).bimap( κ(temporalFormatError(OffsetTime, str, None)), Data.OffsetTime.apply) def parseOffsetDate(str: String): ArgumentError \\/ Data.OffsetDate = \\/.fromTryCatchNonFatal(QOffsetDate.parse(str)).bimap( κ(temporalFormatError(OffsetDate, str, None)), Data.OffsetDate.apply) def parseLocalDateTime(str: String): ArgumentError \\/ Data.LocalDateTime = \\/.fromTryCatchNonFatal(JLocalDateTime.parse(str)).bimap( κ(temporalFormatError(OffsetDate, str, None)), Data.LocalDateTime.apply) def parseLocalTime(str: String): ArgumentError \\/ Data.LocalTime = \\/.fromTryCatchNonFatal(JLocalTime.parse(str)).bimap( κ(temporalFormatError(LocalTime, str, None)), Data.LocalTime.apply) def parseLocalDate(str: String): ArgumentError \\/ Data.LocalDate = \\/.fromTryCatchNonFatal(JLocalDate.parse(str)).bimap( κ(temporalFormatError(LocalDate, str, None)), Data.LocalDate.apply) def parseInterval(str: String): ArgumentError \\/ Data.Interval = DateTimeInterval.parse(str) match { case Some(i) => Data.Interval(i).right case None => temporalFormatError(Interval, str, Some("expected, e.g. P3DT12H30M15.0S")).left } // NB: SQL specifies a function called `extract`, but that doesn't have comma- // separated arguments. `date_part` is Postgres’ name for the same thing // with commas. private def unaryFunc(help: String) = UnaryFunc( Mapping, help, noSimplification) private def binaryFunc(help: String) = BinaryFunc( Mapping, help, noSimplification) val ExtractCentury = unaryFunc( "Pulls out the century subfield from a date/time value (currently (year - 1)/100 + 1).") val ExtractDayOfMonth = unaryFunc( "Pulls out the day of month (`day`) subfield from a date/time value (1-31).") val ExtractDecade = unaryFunc( "Pulls out the decade subfield from a date/time value (year/10).") val ExtractDayOfWeek = unaryFunc( "Pulls out the day of week (`dow`) subfield from a date/time value " + "(Sunday: 0 to Saturday: 6).") val ExtractDayOfYear = unaryFunc( "Pulls out the day of year (`doy`) subfield from a date/time value (1-365 or -366).") val ExtractEpoch = unaryFunc( "Pulls out the epoch subfield from a datetime value with timezone offset. " + "This is the number of seconds since midnight, 1970-01-01.") val ExtractHour = unaryFunc( "Pulls out the hour subfield from a date/time value (0-23).") val ExtractIsoDayOfWeek = unaryFunc( "Pulls out the ISO day of week (`isodow`) subfield from a date/time value (Monday: 1 to Sunday: 7).") val ExtractIsoYear = unaryFunc( "Pulls out the ISO year (`isoyear`) subfield from a date/time value (based on the first week (Monday is the first day of the week) containing Jan. 4).") val ExtractMicrosecond = unaryFunc( "Computes the microseconds of a date/time value (including seconds).") val ExtractMillennium = unaryFunc( "Pulls out the millennium subfield from a date/time value (currently (year - 1)/1000 + 1).") val ExtractMillisecond = unaryFunc( "Computes the milliseconds of a date/time value (including seconds).") val ExtractMinute = unaryFunc( "Pulls out the minute subfield from a date/time value (0-59).") val ExtractMonth = unaryFunc( "Pulls out the month subfield from a date/time value (1-12).") val ExtractQuarter = unaryFunc( "Pulls out the quarter subfield from a date/time value (1-4).") val ExtractSecond = unaryFunc( "Pulls out the second subfield from a date/time value (0-59, with fractional parts).") val ExtractTimeZone = unaryFunc( "Pulls out the timezone subfield from a date/time value (in seconds east of UTC).") val ExtractTimeZoneHour = unaryFunc( "Pulls out the hour component of the timezone subfield from a date/time value.") val ExtractTimeZoneMinute = unaryFunc( "Pulls out the minute component of the timezone subfield from a date/time value.") val ExtractWeek = unaryFunc( "Pulls out the week subfield from a date/time value (1-53).") val ExtractYear = unaryFunc( "Pulls out the year subfield from a date/time value.") // FIXME `ZoneOffset.ofTotalSeconds` throws an exception if the integer // input is not in the range [-64800, 64800] val SetTimeZone = binaryFunc( "Sets the timezone subfield in a date/time value (in seconds east of UTC).") val SetTimeZoneMinute = binaryFunc( "Sets the minute component of the timezone subfield in a date/time value.") val SetTimeZoneHour = binaryFunc( "Sets the hour component of the timezone subfield in a date/time value.") val Now = NullaryFunc( Mapping, "Returns the current datetime in the current time zone – this must always return the same value within the same execution of a query.", noSimplification) val NowTime = NullaryFunc( Mapping, "Returns the current time in the current time zone – this must always return the same value within the same execution of a query.", noSimplification) val NowDate = NullaryFunc( Mapping, "Returns the current date in the current time zone – this must always return the same value within the same execution of a query.", noSimplification) val CurrentTimeZone = NullaryFunc( Mapping, "Returns the current time zone offset in total seconds - this must always return the same value within the same execution of a query.", noSimplification) val OffsetDateTime = UnaryFunc( Mapping, "Converts a string in the format (YYYY-MM-DDTHH:MM:SS((+/-)HH[:MM[:SS]])/Z) to a timestamp value with a time zone offset. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) val OffsetTime = UnaryFunc( Mapping, "Converts a string in the format (HH:MM:SS[.SSS]((+/-)HH:MM:SS)/Z) to a time value with a time zone offset. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) val OffsetDate = UnaryFunc( Mapping, "Converts a string in the format (YYYY-MM-DD((+/-)HH:MM:SS)/Z) to a date value with a time zone offset. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) val LocalDateTime = UnaryFunc( Mapping, "Converts a string in the format (YYYY-MM-DDTHH:MM:SS) to a date value paired with a time. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) val LocalTime = UnaryFunc( Mapping, "Converts a string in the format (HH:MM:SS[.SSS]) to a time value. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) val LocalDate = UnaryFunc( Mapping, "Converts a string in the format (YYYY-MM-DD) to a date value. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) /** * TODO: document behavior change, now that years and months work */ val Interval = UnaryFunc( Mapping, "Converts a string in the format (ISO 8601, e.g. P3DT12H30M15.0S) to an interval value. This is a partial function – arguments that don’t satisify the constraint have undefined results.", noSimplification) /** * TODO: document behavior change, `StartOfDay` only makes `OffsetDateTime`s out of other `OffsetDateTime`s. */ val StartOfDay = UnaryFunc( Mapping, "Converts a DateTime or Date to a DateTime at the start of that day.", noSimplification) val TimeOfDay = UnaryFunc( Mapping, "Extracts the time of day from a datetime value. Preserves time zone information.", noSimplification) val ToTimestamp = UnaryFunc( Mapping, "Converts an integer epoch time value (i.e. milliseconds since 1 Jan. 1970, UTC) to a timestamp constant.", noSimplification) val ToLocal = BinaryFunc( Mapping, "Converts an offset date, time, or datetime and offset string to a local date, time, or datetime.", noSimplification) val ToOffset = BinaryFunc( Mapping, "Converts a local date, time, or datetime and offset string to an offset date, time, or datetime.", noSimplification) } object DateLib extends DateLib
slamdata/quasar
frontend/src/main/scala/quasar/std/date.scala
Scala
apache-2.0
9,845
/* * SPDX-License-Identifier: Apache-2.0 * * Copyright 2015-2021 Andre White. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.truthencode.ddo.support.matching /** * Matches or manipulates based on the first character being UPPERCASE */ trait FirstCharacterUpperCaseMatch extends UpperCaseStrategy with FirstCharacter
adarro/ddo-calc
subprojects/common/ddo-util/src/main/scala/io/truthencode/ddo/support/matching/FirstCharacterUpperCaseMatch.scala
Scala
apache-2.0
849
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.util import collection.mutable.ArrayBuffer /** * <p>A circular buffer</p> * * @author <a href="http://hiramchirino.com">Hiram Chirino</a> */ class CircularBuffer[T](max:Int) extends ArrayBuffer[T](max) { def max_size = max private var pos = 0 override def +=(elem: T): this.type = { if( size < initialSize ) { super.+=(elem) } else { evicted(this(pos)) this.update(pos, elem) pos += 1 if( pos >= initialSize ) { pos = 0 } } this } /** * Sub classes can override this method to so they can be * notified when an element is being evicted from the circular * buffer. */ protected def evicted(elem:T) = {} }
chirino/activemq-apollo
apollo-util/src/main/scala/org/apache/activemq/apollo/util/CircularBuffer.scala
Scala
apache-2.0
1,542
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.mv.plans.modular import org.apache.spark.sql.catalyst.expressions.{Attribute, _} import org.apache.spark.sql.catalyst.plans.JoinType import org.apache.carbondata.mv.plans._ import org.apache.carbondata.mv.plans.modular.Flags._ private[mv] trait Matchable extends ModularPlan { def outputList: Seq[NamedExpression] def predicateList: Seq[Expression] } case class GroupBy( outputList: Seq[NamedExpression], inputList: Seq[Expression], predicateList: Seq[Expression], alias: Option[String], child: ModularPlan, flags: FlagSet, flagSpec: Seq[Seq[Any]], dataMapTableRelation: Option[ModularPlan] = None) extends UnaryNode with Matchable { override def output: Seq[Attribute] = outputList.map(_.toAttribute) override def makeCopy(newArgs: Array[AnyRef]): GroupBy = { val groupBy = super.makeCopy(newArgs).asInstanceOf[GroupBy] if (rewritten) groupBy.setRewritten() groupBy } } case class Select( outputList: Seq[NamedExpression], inputList: Seq[Expression], predicateList: Seq[Expression], aliasMap: Map[Int, String], joinEdges: Seq[JoinEdge], children: Seq[ModularPlan], flags: FlagSet, flagSpec: Seq[Seq[Any]], windowSpec: Seq[Seq[Any]], dataMapTableRelation: Option[ModularPlan] = None) extends ModularPlan with Matchable { override def output: Seq[Attribute] = outputList.map(_.toAttribute) override def adjacencyList: scala.collection.immutable.Map[Int, Seq[(Int, JoinType)]] = { joinEdges.groupBy { _.left }.map { case (k, v) => (k, v.map(e => (e.right, e.joinType))) } } override def extractJoinConditions( left: ModularPlan, right: ModularPlan): Seq[Expression] = { predicateList.filter(p => p.references.intersect(left.outputSet).nonEmpty && p.references.intersect(right.outputSet).nonEmpty && p.references.subsetOf(left.outputSet ++ right.outputSet)) } override def extractRightEvaluableConditions( left: ModularPlan, right: ModularPlan): Seq[Expression] = { predicateList.filter(p => p.references.subsetOf(left.outputSet ++ right.outputSet) && p.references.intersect(right.outputSet).nonEmpty) } override def extractEvaluableConditions(plan: ModularPlan): Seq[Expression] = { predicateList.filter(p => canEvaluate(p, plan)) } override def makeCopy(newArgs: Array[AnyRef]): Select = { val select = super.makeCopy(newArgs).asInstanceOf[Select] if (rewritten) select.setRewritten() select } } case class Union(children: Seq[ModularPlan], flags: FlagSet, flagSpec: Seq[Seq[Any]]) extends ModularPlan { override def output: Seq[Attribute] = children.head.output } case object OneRowTable extends LeafNode { override def output: Seq[Attribute] = Nil }
jackylk/incubator-carbondata
mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/basicOperators.scala
Scala
apache-2.0
3,649
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.command.v1 import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.execution.command /** * This base suite contains unified tests for the `ALTER TABLE .. RENAME` command that check V1 * table catalogs. The tests that cannot run for all V1 catalogs are located in more * specific test suites: * * - V1 In-Memory catalog: `org.apache.spark.sql.execution.command.v1.AlterTableRenameSuite` * - V1 Hive External catalog: `org.apache.spark.sql.hive.execution.command.AlterTableRenameSuite` */ trait AlterTableRenameSuiteBase extends command.AlterTableRenameSuiteBase { test("destination database is different") { withNamespaceAndTable("dst_ns", "dst_tbl") { dst => withNamespace("src_ns") { sql(s"CREATE NAMESPACE $catalog.src_ns") val src = dst.replace("dst", "src") sql(s"CREATE TABLE $src (c0 INT) $defaultUsing") val errMsg = intercept[AnalysisException] { sql(s"ALTER TABLE $src RENAME TO dst_ns.dst_tbl") }.getMessage assert(errMsg.contains("source and destination databases do not match")) } } } test("preserve table stats") { withNamespaceAndTable("ns", "dst_tbl") { dst => val src = dst.replace("dst", "src") sql(s"CREATE TABLE $src (c0 INT) $defaultUsing") sql(s"INSERT INTO $src SELECT 0") sql(s"ANALYZE TABLE $src COMPUTE STATISTICS") val size = getTableSize(src) assert(size > 0) sql(s"ALTER TABLE $src RENAME TO ns.dst_tbl") assert(size === getTableSize(dst)) } } test("the destination folder exists already") { withNamespaceAndTable("ns", "dst_tbl") { dst => val src = dst.replace("dst", "src") sql(s"CREATE TABLE $src (c0 INT) $defaultUsing") sql(s"INSERT INTO $src SELECT 0") sql(s"CREATE TABLE $dst (c0 INT) $defaultUsing") withTableDir(dst) { (fs, dst_dir) => sql(s"DROP TABLE $dst") fs.mkdirs(dst_dir) val errMsg = intercept[AnalysisException] { sql(s"ALTER TABLE $src RENAME TO ns.dst_tbl") }.getMessage assert(errMsg.matches("Can not rename the managed table(.+). " + "The associated location(.+) already exists.")) } } } } /** * The class contains tests for the `ALTER TABLE .. RENAME` command to check * V1 In-Memory table catalog. */ class AlterTableRenameSuite extends AlterTableRenameSuiteBase with CommandSuiteBase
maropu/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/command/v1/AlterTableRenameSuite.scala
Scala
apache-2.0
3,266
package net.spals.appbuilder.mapstore.dynamodb import javax.validation.constraints.NotNull import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials} import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration import com.amazonaws.regions.Regions import com.amazonaws.services.dynamodbv2.document.DynamoDB import com.amazonaws.services.dynamodbv2.{AmazonDynamoDB, AmazonDynamoDBClientBuilder} import com.google.inject.Provider import com.netflix.governator.annotations.Configuration import com.typesafe.config.ConfigException import net.spals.appbuilder.annotations.service.AutoBindProvider import scala.util.Try /** * A [[Provider]] of the AWS [[DynamoDB]] * Document API object. * * @author tkral */ @AutoBindProvider private[dynamodb] class DynamoDBClientProvider extends Provider[AmazonDynamoDB] { @NotNull @Configuration("mapStore.dynamoDB.awsAccessKeyId") private[dynamodb] var awsAccessKeyId: String = null @NotNull @Configuration("mapStore.dynamoDB.awsSecretKey") private[dynamodb] var awsSecretKey: String = null @NotNull @Configuration("mapStore.dynamoDB.endpoint") private[dynamodb] var endpoint: String = null override def get(): AmazonDynamoDB = { val awsCredentials = new BasicAWSCredentials(awsAccessKeyId, awsSecretKey) val dynamoDBClientBuilder = AmazonDynamoDBClientBuilder.standard() .withCredentials(new AWSStaticCredentialsProvider(awsCredentials)) endpoint match { case httpEndpoint if httpEndpoint.startsWith("http://") => { val endpointConfig = new EndpointConfiguration(httpEndpoint, null) dynamoDBClientBuilder.withEndpointConfiguration(endpointConfig) } case regionEndpoint if Try(Regions.fromName(regionEndpoint)).isSuccess => dynamoDBClientBuilder.withRegion(regionEndpoint) case _ => throw new ConfigException.BadValue("mapStore.dynamoDB.endpoint", s"Unrecognized DynamoDB endpoint. Value is neither an http endpoint nor a known region: $endpoint") } dynamoDBClientBuilder.build() } }
timkral/appbuilder
mapstore-dynamodb/src/main/scala/net/spals/appbuilder/mapstore/dynamodb/DynamoDBClientProvider.scala
Scala
bsd-3-clause
2,065
package org.atnos.site import lib._ object OutOfTheBox extends UserGuidePage { def is = "Out of the box".title ^ s2""" This library comes with the following effects: Name | Description | Link --------- | ------------------------------------------- | ----- `EvalEffect` | an effect for delayed computations | ${"link" ~ EvalEffectPage} `OptionEffect` | an effect for optional computations, stopping when there's no available value | ${"link" ~ OptionEffectPage} `EitherEffect` | an effect for computations with failures, stopping when there is a failure | ${"link" ~ EitherEffectPage} `ValidateEffect` | an effect for computations with failures, allowing to collect failures | ${"link" ~ ValidateEffectPage} `ErrorEffect` | a mix of Eval and Either, catching exceptions and returning them as failures | ${"link" ~ ErrorEffectPage} `ReaderEffect` | an effect for depending on a configuration or an environment | ${"link" ~ ReaderEffectPage} `WriterEffect` | an effect to log messages | ${"link" ~ WriterEffectPage} `StateEffect` | an effect to pass state around | ${"link" ~ StateEffectPage} `ListEffect` | an effect for computations returning several values | ${"link" ~ ListEffectPage} `ChooseEffect` | an effect for modeling non-determinism | ${"link" ~ ChooseEffectPage} `MemoEffect` | an effect for memoizing values | ${"link" ~ MemoEffectPage} `FutureEffect` | an effect for asynchronous computations | ${"link" ~ TimedFutureEffectPage} `TaskEffect` | an effect for asynchronous computations using Monix Tasks | ${"link" ~ TaskEffectPage} `SafeEffect` | an effect for guaranteeing resource safety | ${"link" ~ SafeEffectPage} <small>(from `org.atnos.eff._`)</small> Other modules listed in $Installation provide additional effects (Twitter Future, Scalaz Task, Doobie ConnectionIO,...). <br/> ## What's next? Now you can learn how to ${"create your own effects" ~/ CreateEffects}! """ }
etorreborre/eff-cats
src/test/scala/org/atnos/site/OutOfTheBox.scala
Scala
mit
2,548
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalactic import org.scalatest._ import scala.collection.GenSeq import scala.collection.GenMap import scala.collection.GenSet import scala.collection.GenIterable import scala.collection.GenTraversable import scala.collection.GenTraversableOnce class FreshTypeCheckedSeqEqualityConstraintsSpec extends FunSpec with NonImplicitAssertions with CheckedEquality { case class Super(size: Int) class Sub(sz: Int) extends Super(sz) val super1: Super = new Super(1) val sub1: Sub = new Sub(1) val super2: Super = new Super(2) val sub2: Sub = new Sub(2) val nullSuper: Super = null case class Fruit(name: String) class Apple extends Fruit("apple") class Orange extends Fruit("orange") implicit class IntWrapper(val value: Int) { override def equals(o: Any): Boolean = o match { case that: IntWrapper => this.value == that.value case _ => false } override def hashCode: Int = value.hashCode } describe("the SeqEqualityConstraints trait") { it("should allow any Seq to be compared with any other Seq, so long as the element types of the two Seq's have a recursive EqualityConstraint") { assert(Vector(1, 2, 3) === List(1, 2, 3)) assert(Vector(1, 2, 3) === List(1L, 2L, 3L)) // Test for something convertible assertTypeError("Vector(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3)) === List(1, 2, 3)") assertTypeError("Vector(1, 2, 3) === List(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3))") assert(Vector(new Apple, new Apple) === List(new Fruit("apple"), new Fruit("apple"))) assert(List(new Fruit("apple"), new Fruit("apple")) === Vector(new Apple, new Apple)) assertTypeError("Vector(new Apple, new Apple) === List(new Orange, new Orange)") assertTypeError("List(new Orange, new Orange) === Vector(new Apple, new Apple)") } it("should allow an Array to be compared with any other Seq, so long as the element types of the two objects have a recursive EqualityConstraint") { assert(Array(1, 2, 3) === List(1, 2, 3)) assert(Array(1, 2, 3) === List(1L, 2L, 3L)) assert(Array(1L, 2L, 3L) === List(1, 2, 3)) // Test for something convertible assertTypeError("Array(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3)) === List(1, 2, 3)") assertTypeError("Array(1, 2, 3) === List(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3))") assert(Array(new Apple, new Apple) === List(new Fruit("apple"), new Fruit("apple"))) assert(Array(new Fruit("apple"), new Fruit("apple")) === Vector(new Apple, new Apple)) assertTypeError("Array(new Apple, new Apple) === List(new Orange, new Orange)") assertTypeError("Array(new Orange, new Orange) === Vector(new Apple, new Apple)") } it("should allow any Seq to be compared with an Array, so long as the element types of the two objects have a recursive EqualityConstraint") { assert(Vector(1, 2, 3) === Array(1, 2, 3)) assert(Vector(1, 2, 3) === Array(1L, 2L, 3L)) assert(Vector(1L, 2L, 3L) === Array(1, 2, 3)) // Test for something convertible assertTypeError("Vector(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3)) === Array(1, 2, 3)") assertTypeError("Vector(1, 2, 3) === Array(new IntWrapper(1), new IntWrapper(2), new IntWrapper(3))") assert(Vector(new Apple, new Apple) === Array(new Fruit("apple"), new Fruit("apple"))) assert(List(new Fruit("apple"), new Fruit("apple")) === Array(new Apple, new Apple)) assertTypeError("Vector(new Apple, new Apple) === Array(new Orange, new Orange)") assertTypeError("List(new Orange, new Orange) === Array(new Apple, new Apple)") } } }
SRGOM/scalatest
scalactic-test/src/test/scala/org/scalactic/FreshTypeCheckedSeqEqualityConstraintsSpec.scala
Scala
apache-2.0
4,331
package com.github.j5ik2o.dddbase trait AggregateLongId extends AggregateId { override type IdType = Long }
j5ik2o/scala-ddd-base-functional
core/src/main/scala/com/github/j5ik2o/dddbase/AggregateLongId.scala
Scala
mit
113
/* * Copyright 2012-2013 Eligotech BV. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.eligosource.eventsourced.journal.common import akka.actor._ import org.eligosource.eventsourced.core.actor /** * Journal configuration object. */ trait JournalProps { /** * Optional journal name. */ def name: Option[String] /** * Optional dispatcher name. */ def dispatcherName: Option[String] /** * Creates and starts a new journal using the settings of this configuration object. */ def createJournal(implicit actorRefFactory: ActorRefFactory): ActorRef = { val journalRef = actor(createJournalActor, name, dispatcherName) if(readOnly) actor(new ReadOnlyFacade(journalRef)) else journalRef } /** * Creates a journal actor instance. */ protected def createJournalActor: Actor /** * Make journal read only (e.g. offline snapshot) */ def readOnly: Boolean }
CoderPaulK/eventsourced
es-journal/es-journal-common/src/main/scala/org/eligosource/eventsourced/journal/common/JournalProps.scala
Scala
apache-2.0
1,452
package model import play.api.libs.json.Json import play.twirl.api.Html case class FuneralSchedule(name: String, content: List[Funeral]) case class Funeral(hour: String, who: String, age: String) object formats{ implicit val funeralFormat = Json.format[Funeral] implicit val tableFormat = Json.format[FuneralSchedule] }
Hajto/Scrapper
app/model/table.scala
Scala
mit
325
package org.bitcoins.marshallers.rpc.bitcoincore.blockchain import org.bitcoins.protocol.rpc.bitcoincore.blockchain.{MemPoolInfo, MemPoolInfoImpl} import spray.json._ /** * Created by Tom on 1/11/2016. */ object MemPoolInfoMarshaller extends DefaultJsonProtocol { val sizeKey = "size" val bytesKey = "bytes" implicit object MemPoolInfoFormatter extends RootJsonFormat[MemPoolInfo] { override def read (value : JsValue) : MemPoolInfo = { val obj = value.asJsObject val size = obj.fields(sizeKey).convertTo[Int] val bytes = obj.fields(bytesKey).convertTo[Int] MemPoolInfoImpl(size, bytes) } override def write (mempool : MemPoolInfo) : JsValue = { val m : Map[String, JsValue] = Map ( sizeKey -> JsNumber(mempool.size), bytesKey -> JsNumber(mempool.bytes) ) JsObject(m) } } }
Christewart/scalacoin
src/main/scala/org/bitcoins/marshallers/rpc/bitcoincore/blockchain/MemPoolInfoMarshaller.scala
Scala
mit
862
class Foo { val x = "hello" val fun1: Int => Int = n => 0 + n + list.size val fun2: Int => Int = n => 1 + n + list.size fun2(5) List(5, 9).map(n => 2 + n + list.size) // error final val list = List(1, 2, 3) // error List(5, 9).map(n => 3 + n + list.size) }
dotty-staging/dotty
tests/init/neg/function1.scala
Scala
apache-2.0
297
/* * Copyright 2014 websudos ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.websudos.reactiveneo.client import java.util.concurrent.TimeUnit import com.typesafe.scalalogging.slf4j.LazyLogging import com.websudos.reactiveneo.dsl.MatchQuery import org.jboss.netty.handler.codec.http.HttpMethod import play.api.libs.json.Reads import scala.concurrent.Future import scala.concurrent.duration.FiniteDuration /** * REST API endpoints definitions. * @param path Server query path. * @param method HTTP method, with POST as default. */ case class RestEndpoint(path: String, method: HttpMethod = HttpMethod.POST) object SingleTransaction extends RestEndpoint("/db/data/transaction/commit") object BeginTransaction extends RestEndpoint("/db/data/transaction") class ContinueInTransaction(transactionId: Int) extends RestEndpoint(s"/db/data/transaction/$transactionId") class CommitTransaction(transactionId: Int) extends RestEndpoint(s"/db/data/transaction/$transactionId/commit") class RollbackTransaction(transactionId: Int) extends RestEndpoint(s"/db/data/transaction/$transactionId", HttpMethod.DELETE) /** * Model of a call to Neo4j server. * @tparam RT Type of result call response. */ class RestCall[RT](endpoint: RestEndpoint, content: Option[String], resultParser: Reads[RT])(implicit client: RestClient) extends ServerCall[Seq[RT]] with LazyLogging { implicit lazy val parser = { val parser = new CypherResultParser[RT]()(resultParser) parser } def execute: Future[Seq[RT]] = { val result = client.makeRequest[Seq[RT]](endpoint.path, endpoint.method, content) result } } object RestCall { def apply[RT](endpoint: RestEndpoint, resultParser: Reads[RT], query: String)(implicit client: RestClient) = { new RestCall[RT](endpoint, Some(query), resultParser) } def apply[RT](endpoint: RestEndpoint, resultParser: Reads[RT])(implicit client: RestClient) = { new RestCall[RT](endpoint, None, resultParser) } } /** * Service that prepares and executes rest call */ class RestConnection(config: ClientConfiguration) { implicit def client: RestClient = new RestClient(config) def neoStatement( cypher: String ) = s"""{ | "statements" : [ { | "statement" : "$cypher" | } ] |}""".stripMargin implicit def makeRequest[RT](matchQuery: MatchQuery[_, _, _, _, _, RT]): RestCall[RT] = { val (query, retType) = matchQuery.finalQuery val requestContent = neoStatement(query) val call = RestCall(SingleTransaction, retType.resultParser, requestContent) call } implicit def makeRequest[RT](cypher: String)(implicit resultParser: Reads[RT]): RestCall[RT] = { val requestContent = neoStatement(cypher) val call = RestCall(SingleTransaction, resultParser, requestContent) call } } object RestConnection { def apply(host: String, port: Int): RestConnection = { val config = ClientConfiguration(host, port, FiniteDuration(10, TimeUnit.SECONDS)) new RestConnection(config) } }
zarthross/reactiveneo
reactiveneo-dsl/src/main/scala/com/websudos/reactiveneo/client/RestCall.scala
Scala
gpl-2.0
3,550
// Copyright (c) 2013-2020 Rob Norris and Contributors // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package doobie.enumerated import doobie.util.invariant._ import java.sql.ResultSetMetaData._ import cats.ApplicativeError import cats.kernel.Eq import cats.kernel.instances.int._ /** @group Types */ sealed abstract class ColumnNullable(val toInt: Int) extends Product with Serializable { def toNullability: Nullability = Nullability.fromColumnNullable(this) } /** @group Modules */ object ColumnNullable { /** @group Values */ case object NoNulls extends ColumnNullable(columnNoNulls) /** @group Values */ case object Nullable extends ColumnNullable(columnNullable) /** @group Values */ case object NullableUnknown extends ColumnNullable(columnNullableUnknown) def fromInt(n:Int): Option[ColumnNullable] = Some(n) collect { case NoNulls.toInt => NoNulls case Nullable.toInt => Nullable case NullableUnknown.toInt => NullableUnknown } def fromNullability(n: Nullability): ColumnNullable = n match { case Nullability.NoNulls => NoNulls case Nullability.Nullable => Nullable case Nullability.NullableUnknown => NullableUnknown } def fromIntF[F[_]](n: Int)(implicit AE: ApplicativeError[F, Throwable]): F[ColumnNullable] = ApplicativeError.liftFromOption(fromInt(n), InvalidOrdinal[ColumnNullable](n)) implicit val EqColumnNullable: Eq[ColumnNullable] = Eq.by(_.toInt) }
tpolecat/doobie
modules/core/src/main/scala/doobie/enumerated/columnnullable.scala
Scala
mit
1,592
import scala.util.FromDigits import scala.quoted._ case class Even(n: Int) object Even { def evenFromDigits(digits: String): Even = { val intValue = FromDigits.intFromDigits(digits) if (intValue % 2 == 0) Even(intValue) else throw FromDigits.MalformedNumber(s"$digits is odd") } class EvenFromDigits extends FromDigits[Even] { def fromDigits(digits: String) = evenFromDigits(digits) } given EvenFromDigits { override inline def fromDigits(digits: String) = ${ EvenFromDigitsImpl('digits) } } }
som-snytt/dotty
tests/neg-with-compiler/GenericNumLits/Even_1.scala
Scala
apache-2.0
541
/* * Artificial Intelligence for Humans * Volume 1: Fundamental Algorithms * Scala Version * http://www.aifh.org * http://www.jeffheaton.com * * Code repository: * https://github.com/jeffheaton/aifh * Copyright 2013 by Jeff Heaton * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * For more information on Heaton Research copyrights, licenses * and trademarks visit: * http://www.heatonresearch.com/copyright */ package com.heatonresearch.aifh.general.fns import java.text.NumberFormat import scala.collection.mutable.ArrayBuffer /** * Provides the basics for an RBF function. RBF functions take their "parameters" from a vector (and starting index). * This allows many RBF's to be "stacked" together in a single vector. RBF parameters are: a single width and a * vector of centers. Therefor the size required to store one RBF is (dimensions + 1). There is no peak parameter, * the peak is assumed to be 1. * * Construct the RBF. Each RBF will require space equal to (dimensions + 1) in the params vector. * * @param dimensions The number of dimensions. * @param params The parameter vector. Holds the RBF width and centers. This vector may hold multiple RBF's. * @param theIndex The index into the params vector. You can store multiple RBF's in a vector. */ abstract class AbstractRBF(dimensions: Int, params: ArrayBuffer[Double], theIndex: Int) extends FnRBF { /** * The index to the widths. */ private val indexWidth = theIndex /** * The index to the centers. */ private val indexCenters: Int = theIndex + 1 final override def getCenter(dimension: Int): Double = { params(indexCenters + dimension) } final override def getDimensions: Int = dimensions final override def getWidth: Double = params(indexWidth) override def setWidth(theWidth: Double) { params(indexWidth) = theWidth } override def toString: String = { val f = NumberFormat.getNumberInstance f.setMinimumFractionDigits(2) val centersStr = (indexCenters until (indexCenters+dimensions)).map(params(_)).map(f.format).mkString(",") s"[${getClass.getSimpleName}:width=${f.format(this.getWidth)},center=$centersStr]" } override def setCenter(dimension: Int, value: Double) { params(indexCenters + dimension) = value } }
PeterLauris/aifh
vol1/scala-examples/src/main/scala/com/heatonresearch/aifh/general/fns/AbstractRBF.scala
Scala
apache-2.0
2,812
package rros /** * Created by namnguyen on 3/18/15. */ object GlobalConfig { //Config 6 seconds for fail and recover fast val PING_TIME_OUT:Long = 6000 val PING_DURATION:Long = 2000 }
namhnguyen/RROS
src/main/scala/rros/GlobalConfig.scala
Scala
apache-2.0
193
package io.getquill.sqlserver import java.sql.{ Connection, ResultSet } import io.getquill.PrepareZioJdbcSpecBase import io.getquill.Prefix import org.scalatest.BeforeAndAfter class PrepareJdbcSpec extends PrepareZioJdbcSpecBase with BeforeAndAfter { override def prefix: Prefix = Prefix("testSqlServerDB") val context = testContext import context._ before { testContext.run(query[Product].delete).runSyncUnsafe() } def productExtractor = (rs: ResultSet, conn: Connection) => materializeQueryMeta[Product].extract(rs, conn) val prepareQuery = prepare(query[Product]) implicit val im = insertMeta[Product](_.id) "single" in { val prepareInsert = prepare(query[Product].insert(lift(productEntries.head))) singleInsert(prepareInsert) mustEqual false extractProducts(prepareQuery) === List(productEntries.head) } "batch" in { val prepareBatchInsert = prepare( liftQuery(withOrderedIds(productEntries)).foreach(p => query[Product].insert(p)) ) batchInsert(prepareBatchInsert).distinct mustEqual List(false) extractProducts(prepareQuery) === withOrderedIds(productEntries) } }
getquill/quill
quill-jdbc-zio/src/test/scala/io/getquill/sqlserver/PrepareJdbcSpec.scala
Scala
apache-2.0
1,141
/* * Copyright 2012 Albert Örwall * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.evactor.model.events import scala.Array import java.net.URLEncoder /** * The event class all other event's should inherit. * * It's identified by the event type (class name) and id. An event * can have clones with different paths. * */ trait Event extends Serializable { val id: String val timestamp: Int }
beni55/pickling
benchmark/Event.scala
Scala
bsd-3-clause
937
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.sql.Timestamp import org.apache.spark.SparkFunSuite import org.apache.spark.metrics.source.CodegenMetrics import org.apache.spark.sql.Row import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.dsl.expressions._ import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.expressions.objects.{CreateExternalRow, GetExternalRowField, ValidateExternalType} import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, DateTimeUtils, GenericArrayData} import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.UTF8String import org.apache.spark.util.ThreadUtils /** * Additional tests for code generation. */ class CodeGenerationSuite extends SparkFunSuite with ExpressionEvalHelper { test("multithreaded eval") { import scala.concurrent._ import ExecutionContext.Implicits.global import scala.concurrent.duration._ val futures = (1 to 20).map { _ => Future { GeneratePredicate.generate(EqualTo(Literal(1), Literal(1))) GenerateMutableProjection.generate(EqualTo(Literal(1), Literal(1)) :: Nil) GenerateOrdering.generate(Add(Literal(1), Literal(1)).asc :: Nil) } } futures.foreach(ThreadUtils.awaitResult(_, 10.seconds)) } test("metrics are recorded on compile") { val startCount1 = CodegenMetrics.METRIC_COMPILATION_TIME.getCount() val startCount2 = CodegenMetrics.METRIC_SOURCE_CODE_SIZE.getCount() val startCount3 = CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.getCount() val startCount4 = CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.getCount() GenerateOrdering.generate(Add(Literal(123), Literal(1)).asc :: Nil) assert(CodegenMetrics.METRIC_COMPILATION_TIME.getCount() == startCount1 + 1) assert(CodegenMetrics.METRIC_SOURCE_CODE_SIZE.getCount() == startCount2 + 1) assert(CodegenMetrics.METRIC_GENERATED_CLASS_BYTECODE_SIZE.getCount() > startCount3) assert(CodegenMetrics.METRIC_GENERATED_METHOD_BYTECODE_SIZE.getCount() > startCount4) } test("SPARK-8443: split wide projections into blocks due to JVM code size limit") { val length = 5000 val expressions = List.fill(length)(EqualTo(Literal(1), Literal(1))) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq.fill(length)(true) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-13242: case-when expression with large number of branches (or cases)") { val cases = 50 val clauses = 20 // Generate an individual case def generateCase(n: Int): (Expression, Expression) = { val condition = (1 to clauses) .map(c => EqualTo(BoundReference(0, StringType, false), Literal(s"$c:$n"))) .reduceLeft[Expression]((l, r) => Or(l, r)) (condition, Literal(n)) } val expression = CaseWhen((1 to cases).map(generateCase(_))) val plan = GenerateMutableProjection.generate(Seq(expression)) val input = new GenericInternalRow(Array[Any](UTF8String.fromString(s"${clauses}:${cases}"))) val actual = plan(input).toSeq(Seq(expression.dataType)) assert(actual(0) == cases) } test("SPARK-18091: split large if expressions into blocks due to JVM code size limit") { var strExpr: Expression = Literal("abc") for (_ <- 1 to 150) { strExpr = Decode(Encode(strExpr, "utf-8"), "utf-8") } val expressions = Seq(If(EqualTo(strExpr, strExpr), strExpr, strExpr)) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(null).toSeq(expressions.map(_.dataType)) val expected = Seq(UTF8String.fromString("abc")) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-14793: split wide array creation into blocks due to JVM code size limit") { val length = 5000 val expressions = Seq(CreateArray(List.fill(length)(EqualTo(Literal(1), Literal(1))))) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq(new GenericArrayData(Seq.fill(length)(true))) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-14793: split wide map creation into blocks due to JVM code size limit") { val length = 5000 val expressions = Seq(CreateMap( List.fill(length)(EqualTo(Literal(1), Literal(1))).zipWithIndex.flatMap { case (expr, i) => Seq(Literal(i), expr) })) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)).map { case m: ArrayBasedMapData => ArrayBasedMapData.toScalaMap(m) } val expected = (0 until length).map((_, true)).toMap :: Nil if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-14793: split wide struct creation into blocks due to JVM code size limit") { val length = 5000 val expressions = Seq(CreateStruct(List.fill(length)(EqualTo(Literal(1), Literal(1))))) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq(InternalRow(Seq.fill(length)(true): _*)) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-14793: split wide named struct creation into blocks due to JVM code size limit") { val length = 5000 val expressions = Seq(CreateNamedStruct( List.fill(length)(EqualTo(Literal(1), Literal(1))).flatMap { expr => Seq(Literal(expr.toString), expr) })) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq(InternalRow(Seq.fill(length)(true): _*)) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-14224: split wide external row creation into blocks due to JVM code size limit") { val length = 5000 val schema = StructType(Seq.fill(length)(StructField("int", IntegerType))) val expressions = Seq(CreateExternalRow(Seq.fill(length)(Literal(1)), schema)) val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq(Row.fromSeq(Seq.fill(length)(1))) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("SPARK-17702: split wide constructor into blocks due to JVM code size limit") { val length = 5000 val expressions = Seq.fill(length) { ToUTCTimestamp( Literal.create(Timestamp.valueOf("2015-07-24 00:00:00"), TimestampType), Literal.create("PST", StringType)) } val plan = GenerateMutableProjection.generate(expressions) val actual = plan(new GenericInternalRow(length)).toSeq(expressions.map(_.dataType)) val expected = Seq.fill(length)( DateTimeUtils.fromJavaTimestamp(Timestamp.valueOf("2015-07-24 07:00:00"))) if (!checkResult(actual, expected)) { fail(s"Incorrect Evaluation: expressions: $expressions, actual: $actual, expected: $expected") } } test("test generated safe and unsafe projection") { val schema = new StructType(Array( StructField("a", StringType, true), StructField("b", IntegerType, true), StructField("c", new StructType(Array( StructField("aa", StringType, true), StructField("bb", IntegerType, true) )), true), StructField("d", new StructType(Array( StructField("a", new StructType(Array( StructField("b", StringType, true), StructField("", IntegerType, true) )), true) )), true) )) val row = Row("a", 1, Row("b", 2), Row(Row("c", 3))) val lit = Literal.create(row, schema) val internalRow = lit.value.asInstanceOf[InternalRow] val unsafeProj = UnsafeProjection.create(schema) val unsafeRow: UnsafeRow = unsafeProj(internalRow) assert(unsafeRow.getUTF8String(0) === UTF8String.fromString("a")) assert(unsafeRow.getInt(1) === 1) assert(unsafeRow.getStruct(2, 2).getUTF8String(0) === UTF8String.fromString("b")) assert(unsafeRow.getStruct(2, 2).getInt(1) === 2) assert(unsafeRow.getStruct(3, 1).getStruct(0, 2).getUTF8String(0) === UTF8String.fromString("c")) assert(unsafeRow.getStruct(3, 1).getStruct(0, 2).getInt(1) === 3) val fromUnsafe = FromUnsafeProjection(schema) val internalRow2 = fromUnsafe(unsafeRow) assert(internalRow === internalRow2) // update unsafeRow should not affect internalRow2 unsafeRow.setInt(1, 10) unsafeRow.getStruct(2, 2).setInt(1, 10) unsafeRow.getStruct(3, 1).getStruct(0, 2).setInt(1, 4) assert(internalRow === internalRow2) } test("*/ in the data") { // When */ appears in a comment block (i.e. in /**/), code gen will break. // So, in Expression and CodegenFallback, we escape */ to \\*\\/. checkEvaluation( EqualTo(BoundReference(0, StringType, false), Literal.create("*/", StringType)), true, InternalRow(UTF8String.fromString("*/"))) } test("\\\\u in the data") { // When \\ u appears in a comment block (i.e. in /**/), code gen will break. // So, in Expression and CodegenFallback, we escape \\ u to \\\\u. checkEvaluation( EqualTo(BoundReference(0, StringType, false), Literal.create("\\\\u", StringType)), true, InternalRow(UTF8String.fromString("\\\\u"))) } test("check compilation error doesn't occur caused by specific literal") { // The end of comment (*/) should be escaped. GenerateUnsafeProjection.generate( Literal.create("*/Compilation error occurs/*", StringType) :: Nil) // `\\u002A` is `*` and `\\u002F` is `/` // so if the end of comment consists of those characters in queries, we need to escape them. GenerateUnsafeProjection.generate( Literal.create("\\\\u002A/Compilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\\\\\u002A/Compilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\u002a/Compilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\\\\\u002a/Compilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("*\\\\u002FCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("*\\\\\\\\u002FCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("*\\\\002fCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("*\\\\\\\\002fCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\002A\\\\002FCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\\\\\002A\\\\002FCompilation error occurs/*", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\002A\\\\\\\\002FCompilation error occurs/*", StringType) :: Nil) // \\ u002X is an invalid unicode literal so it should be escaped. GenerateUnsafeProjection.generate( Literal.create("\\\\u002X/Compilation error occurs", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\\\\\u002X/Compilation error occurs", StringType) :: Nil) // \\ u001 is an invalid unicode literal so it should be escaped. GenerateUnsafeProjection.generate( Literal.create("\\\\u001/Compilation error occurs", StringType) :: Nil) GenerateUnsafeProjection.generate( Literal.create("\\\\\\\\u001/Compilation error occurs", StringType) :: Nil) } test("SPARK-17160: field names are properly escaped by GetExternalRowField") { val inputObject = BoundReference(0, ObjectType(classOf[Row]), nullable = true) GenerateUnsafeProjection.generate( ValidateExternalType( GetExternalRowField(inputObject, index = 0, fieldName = "\\"quote"), IntegerType) :: Nil) } test("SPARK-17160: field names are properly escaped by AssertTrue") { GenerateUnsafeProjection.generate(AssertTrue(Cast(Literal("\\""), BooleanType)) :: Nil) } }
Panos-Bletsos/spark-cost-model-optimizer
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CodeGenerationSuite.scala
Scala
apache-2.0
13,896
package blended.jms.bridge import blended.container.context.api.ContainerIdentifierService import blended.jms.utils.{JmsDestination, ProviderAware} import blended.util.config.Implicits._ import com.typesafe.config.Config import scala.util.Try case class BridgeProviderConfig( vendor : String, provider : String, internal: Boolean, inbound : JmsDestination, outbound : JmsDestination, errors : JmsDestination, transactions : JmsDestination, cbes : JmsDestination ) extends ProviderAware { override def toString: String = s"${getClass().getSimpleName()}(vendor=$vendor, provider=$provider, internal=$internal, errors=$errors, transactions=$transactions cbe=$cbes)" def osgiBrokerFilter : String = s"(&(vendor=$vendor)(provider=$provider))" } object BridgeProviderConfig { def create(idSvc: ContainerIdentifierService, cfg: Config) : Try[BridgeProviderConfig] = Try { def resolve(value: String) : String = idSvc.resolvePropertyString(value).map(_.toString()).get val vendor = resolve(cfg.getString("vendor")) val provider = resolve(cfg.getString("provider")) val errorDest = resolve(cfg.getString("errors", "blended.error")) val eventDest = resolve(cfg.getString("transactions", "blended.transaction")) val cbeDest = resolve(cfg.getString("cbes", "blended.cbe")) val inbound = s"${cfg.getString("inbound")}" val outbound = s"${cfg.getString("outbound")}" val internal = cfg.getBoolean("internal", false) BridgeProviderConfig( vendor = vendor, provider = provider, internal = internal, inbound = JmsDestination.create(inbound).get, outbound = JmsDestination.create(outbound).get, errors = JmsDestination.create(errorDest).get, transactions = JmsDestination.create(eventDest).get, cbes = JmsDestination.create(cbeDest).get ) } }
lefou/blended
blended.jms.bridge/src/main/scala/blended/jms/bridge/BridgeProviderConfig.scala
Scala
apache-2.0
1,858
package uk.gov.gds.router.controller import com.google.inject.Guice import com.google.inject.servlet.{ServletModule, GuiceServletContextListener} import javax.servlet.Filter import uk.gov.gds.router.util.Logging import uk.gov.gds.router.management.{RouterManagementFilter, RouterRequestLoggingFilter} class RouterModule extends ServletModule with Logging { protected override def configureServlets { serve("/*", classOf[RouterRequestLoggingFilter]) serve("/*", classOf[RouteController]) serve("/*", classOf[RouterApiController]) serve("/*", classOf[RouterManagementFilter]) } private def serve[A <: Filter](path: String, filterClass: Class[A]) = { filter(path).through(filterClass) } } class GuiceServletConfig extends GuiceServletContextListener { protected def getInjector = Guice.createInjector(new RouterModule) }
gds-attic/scala-router
router/router/src/main/scala/uk/gov/gds/router/controller/webappConfiguration.scala
Scala
mit
852
package epic.parser package models import java.io.File import breeze.config.Help import breeze.features.FeatureVector import breeze.linalg._ import breeze.util.Index import epic.constraints.ChartConstraints import epic.dense.{IdentityTransform, AffineTransform, Transform} import epic.features.SurfaceFeaturizer.SingleWordSpanFeaturizer import epic.features._ import epic.framework.Feature import epic.lexicon.Lexicon import epic.parser.projections.GrammarRefinements import epic.trees._ import epic.trees.annotations.TreeAnnotator import epic.util.{LRUCache, Optional} import epic.dense.Transform import epic.dense.TanhTransform import epic.dense.OutputTransform import epic.dense.AffineOutputTransform import epic.dense.OutputEmbeddingTransform import epic.dense.Word2Vec import scala.collection.mutable.HashMap import epic.dense.Word2VecSurfaceFeaturizerIndexed import epic.dense.Word2VecDepFeaturizerIndexed import epic.dense.Word2VecIndexed import epic.dense.FrequencyTagger import epic.dense.CachingLookupTransform import epic.dense.CachingLookupAndAffineTransformDense import epic.dense.EmbeddingsTransform import epic.dense.NonlinearTransform import scala.io.Source import scala.collection.mutable.HashSet import epic.dense.BatchNormalizationTransform /** * Entry point for instantiating a neural CRF parser. Parameters specify neural * net parameters, word vectors, and sparse features to use. * * @author gdurrett **/ /** * Less-used parameters */ case class ExtraPNMParams(@Help(text="Used for ablations with random word embeddings; don't change this. Options: normal, random, trivial, normalpos") embeddingType: String = "normal", @Help(text="Use longest frequent suffix (standard representation) for sparse feats") useSparseLfsuf: Boolean = true, @Help(text="Use sparse Brown cluster features") useSparseBrown: Boolean = false, @Help(text="Use expanded set of sparse surface features (doesn't help)") useMostSparseIndicators: Boolean = false, @Help(text="Scaling factor for all input vectors") vectorRescaling: Double = 1.0, @Help(text="Use the output embedding model (Figure 4b in the neural CRF paper)") outputEmbedding: Boolean = false, @Help(text="Dimension of the output embedding model") outputEmbeddingDim: Int = 20, @Help(text="When initializing the output embedding model, initialize based on root symbols") coarsenByRoot: Boolean = false, @Help(text="Use separate neural net parameters for span/unary/binary settings. Doesn't help.") decoupleTransforms: Boolean = false, @Help(text="Extract additional output features based on root label.") useRootLabel: Boolean = false, @Help(text="Set unknown word vectors to be random rather than 0") randomizeUnks: Boolean = false) case class ExtraPNMSparseParams(@Help(text="Use n-gram features in the sparse featurizer (good for sentiment)") useNGrams: Boolean = false, @Help(text="Max order of n-grams to use in these features") maxNGramOrder:Int = 2, @Help(text="Count threshold for firing n-gram features") ngramCountThreshold: Int = 1, @Help(text="Additional span shape features based on tags") useTagSpanShape: Boolean = false) case class PositionalNeuralModelFactory(@Help(text= """The kind of annotation to do on the refined grammar. Default uses just parent annotation. You can also epic.trees.annotations.KMAnnotator to get more or less Klein and Manning 2003. """) annotator: TreeAnnotator[AnnotatedLabel, String, AnnotatedLabel] = GenerativeParser.defaultAnnotator(), @Help(text="For features not seen in gold trees, we bin them into dummyFeats * numGoldFeatures bins using hashing. If negative, use absolute value as number of hash features.") dummyFeats: Double = 0.5, @Help(text="Sparse features only fire on suffixes seen at lease this many times. Lower than 100 doesn't seem to do better.") commonWordThreshold: Int = 100, @Help(text="Combine the neural net features with sparse features. The NN does well on its own but sparse helps by >1 F1.") useSparseFeatures: Boolean = true, @Help(text="Nonlinearity to use. Options: tanh, relu, cube") nonLinType: String = "relu", @Help(text="Backpropagate into word embeddings (tune them during training). Doesn't help.") backpropIntoEmbeddings: Boolean = false, @Help(text="Dropout rate; 0.0 won't instantiate any dropout units, higher rates will but it doesn't seem to help.") dropoutRate: Double = 0.0, @Help(text="Width of hidden layer to use.") numHidden: Int = 200, @Help(text="Number of hidden layers to use. More than 1 slows down dramatically and doesn't help.") numHiddenLayers: Int = 1, @Help(text="How much surface context should we use as input to the neural network? Default is +/-2 words around begin/end/split. See Word2VecSurfaceFeaturizer for options") neuralSurfaceWordsToUse: String = "most", @Help(text="Path to word vectors. Can either be .bin like Mikolov et al.'s or .txt like Bansal et al.'s") word2vecPath: String = "", @Help(text="Load additional word vectors into the model rather than just those in the training set. Doesn't help.") vocFile: String = "", @Help(text="Set to true if your word vectors are all lowercase. Otherwise true case is used.") lowercasedVectors: Boolean = false, extraPNMParams: ExtraPNMParams = ExtraPNMParams(), extraPNMSparseParams: ExtraPNMSparseParams = ExtraPNMSparseParams()) extends ParserModelFactory[AnnotatedLabel, String] { type MyModel = PositionalNeuralModel[AnnotatedLabel, AnnotatedLabel, String] override def make(trainTrees: IndexedSeq[TreeInstance[AnnotatedLabel, String]], topology: RuleTopology[AnnotatedLabel], lexicon: Lexicon[AnnotatedLabel, String], constrainer: ChartConstraints.Factory[AnnotatedLabel, String]): MyModel = { import extraPNMParams._ import extraPNMSparseParams._ val annTrees: IndexedSeq[TreeInstance[AnnotatedLabel, String]] = trainTrees.map(annotator(_)) println("Here's what the annotation looks like on the first few trees") annTrees.slice(0, Math.min(3, annTrees.size)).foreach(tree => println(tree.render(false))) val (annWords, annBinaries, annUnaries) = this.extractBasicCounts(annTrees) val refGrammar = RuleTopology(AnnotatedLabel.TOP, annBinaries, annUnaries) val xbarGrammar = topology val xbarLexicon = lexicon val indexedRefinements = GrammarRefinements(xbarGrammar, refGrammar, (_: AnnotatedLabel).baseAnnotatedLabel) def labelFeaturizer(l: AnnotatedLabel) = Set(l, l.baseAnnotatedLabel).toSeq def ruleFeaturizer(r: Rule[AnnotatedLabel]) = if (useRootLabel) { Set(r, r.map(_.baseAnnotatedLabel), ParentFeature(r.parent)).toSeq } else { Set(r, r.map(_.baseAnnotatedLabel)).toSeq } val prodFeaturizer = new ProductionFeaturizer[AnnotatedLabel, AnnotatedLabel, String](xbarGrammar, indexedRefinements, lGen=labelFeaturizer, rGen=ruleFeaturizer) /////////////////////// // READ IN WORD VECTORS val tagCountsLexicon = TagSpanShapeGenerator.makeStandardLexicon(annTrees) val freqTagger = new FrequencyTagger(tagCountsLexicon) val voc = new HashSet[String]() // Add words in the training set val summedWordCounts: Counter[String, Double] = sum(annWords, Axis._0) voc ++= summedWordCounts.keySet.toSet[String].map(str => Word2Vec.convertWord(str, lowercasedVectors)) // Read in a file of words in the treebank; this allows us to load words that are // in the dev or test sets but not in train voc ++= (if (vocFile != "") Source.fromFile(vocFile).getLines().map(str => Word2Vec.convertWord(str, lowercasedVectors)).toSet else Set[String]()) val word2vec = if (embeddingType == "trivial") { Word2Vec.makeRandomVectorsForVocabulary(voc.toSet, 0, true) } else if (embeddingType == "random") { Word2Vec.makeRandomVectorsForVocabulary(voc.toSet, 50, true) } else { Word2Vec.smartLoadVectorsForVocabulary(word2vecPath.split(":"), voc.toSet, summedWordCounts, if (embeddingType == "trivial") 1 else Int.MaxValue, true, randomizeUnks) } // Convert Array[Float] values to Array[Double] values and rescale them val word2vecDoubleVect = word2vec.map(keyValue => keyValue._1 -> keyValue._2.map(_.toDouble * vectorRescaling)) // val word2vecDoubleVect = word2vec.map(keyValue => (keyValue._1 -> new DenseVector[Double](keyValue._2.map(_.toDouble)))) val word2vecIndexed: Word2VecIndexed[String] = if (embeddingType == "normalpos") { Word2VecIndexed(word2vecDoubleVect, (str: String) => Word2Vec.convertWord(str, lowercasedVectors)).augment(freqTagger.tagTypesIdx.size, freqTagger.convertToFeaturizer) } else { Word2VecIndexed(word2vecDoubleVect, (str: String) => Word2Vec.convertWord(str, lowercasedVectors)) } ////////////////////// val surfaceFeaturizer = new Word2VecSurfaceFeaturizerIndexed(word2vecIndexed, neuralSurfaceWordsToUse) val depFeaturizer = new Word2VecDepFeaturizerIndexed(word2vecIndexed, freqTagger, topology) val transforms = if (decoupleTransforms) { IndexedSeq[AffineOutputTransform[Array[Int]]]() } else { val inputSize = surfaceFeaturizer.splitInputSize val transform = if (outputEmbedding) { val coarsenerForInitialization = if (coarsenByRoot) { Option(PositionalNeuralModelFactory.getRuleToParentMapping(prodFeaturizer.index)) } else { None } PositionalNeuralModelFactory.buildNetOutputEmbedding(word2vecIndexed, inputSize, numHidden, numHiddenLayers, prodFeaturizer.index.size, nonLinType, dropoutRate, backpropIntoEmbeddings, outputEmbeddingDim, coarsenerForInitialization) } else { // THIS IS THE STANDARD CODE PATH println(inputSize + " x (" + numHidden + ")^" + numHiddenLayers + " x " + prodFeaturizer.index.size + " neural net") PositionalNeuralModelFactory.buildNet(word2vecIndexed, inputSize, numHidden, numHiddenLayers, prodFeaturizer.index.size, nonLinType, dropoutRate, backpropIntoEmbeddings) } IndexedSeq(transform) } val depTransforms: IndexedSeq[AffineOutputTransform[Array[Int]]] = IndexedSeq() val decoupledTransforms = if (decoupleTransforms) { // Span and unary use the reduced input (no split point features), whereas surface uses the split point features val inputSizes = Seq(surfaceFeaturizer.reducedInputSize, surfaceFeaturizer.reducedInputSize, surfaceFeaturizer.splitInputSize) inputSizes.map(inputSize => PositionalNeuralModelFactory.buildNet(word2vecIndexed, inputSize, numHidden, numHiddenLayers, prodFeaturizer.index.size, nonLinType, dropoutRate, backpropIntoEmbeddings)) } else { IndexedSeq[AffineOutputTransform[Array[Int]]]() } println(transforms.size + " transforms, " + transforms.map(_.index.size).toSeq + " parameters for each") println(depTransforms.size + " dep transforms, " + depTransforms.map(_.index.size).toSeq + " parameters for each") println(decoupledTransforms.size + " decoupled transforms, " + decoupledTransforms.map(_.index.size).toSeq + " parameters for each") val maybeSparseFeaturizer = if (useSparseFeatures) { var wf = SpanModelFactory.defaultPOSFeaturizer(annWords, useBrown = useSparseBrown) var span = SpanModelFactory.goodFeaturizer(annWords, commonWordThreshold, useShape = false, useLfsuf = useSparseLfsuf, useBrown = useSparseBrown, useMostSparseIndicators = useMostSparseIndicators) span += new SingleWordSpanFeaturizer[String](wf) if (useNGrams) { span += new NGramSpanFeaturizer(summedWordCounts, NGramSpanFeaturizer.countBigrams(annTrees), annTrees.map(_.words), ngramCountThreshold, maxNGramOrder, useNot = false) } if (useTagSpanShape) { span += new TagSpanShapeFeaturizer(TagSpanShapeGenerator.makeBaseLexicon(trainTrees)) } val indexedWord = IndexedWordFeaturizer.fromData(wf, annTrees.map{_.words}, deduplicateFeatures = false) val indexedSurface = IndexedSplitSpanFeaturizer.fromData(span, annTrees, bloomFilter = false) def sparseLabelFeaturizer(l: AnnotatedLabel) = Set(l, l.baseAnnotatedLabel).toSeq def sparseRuleFeaturizer(r: Rule[AnnotatedLabel]) = Set(r, r.map(_.baseAnnotatedLabel)).toSeq val sparseProdFeaturizer = new ProductionFeaturizer[AnnotatedLabel, AnnotatedLabel, String](xbarGrammar, indexedRefinements, lGen=sparseLabelFeaturizer, rGen=sparseRuleFeaturizer) val indexed = IndexedSpanFeaturizer.extract[AnnotatedLabel, AnnotatedLabel, String](indexedWord, indexedSurface, sparseProdFeaturizer, new ZeroRuleAndSpansFeaturizer(), annotator.latent, indexedRefinements, xbarGrammar, if (dummyFeats < 0) HashFeature.Absolute(-dummyFeats.toInt) else HashFeature.Relative(dummyFeats), filterUnseenFeatures = false, minFeatCount = 1, trainTrees) Option(indexed) } else { None } new PositionalNeuralModel(annotator.latent, constrainer, topology, lexicon, refGrammar, indexedRefinements, prodFeaturizer, surfaceFeaturizer, depFeaturizer, transforms, maybeSparseFeaturizer, depTransforms, decoupledTransforms) } } object PositionalNeuralModelFactory { def buildNetInnerTransforms(word2vecIndexed: Word2VecIndexed[String], inputSize: Int, numHidden: Int, numHiddenLayers: Int, nonLinType: String, dropoutRate: Double, backpropIntoEmbeddings: Boolean): Transform[Array[Int],DenseVector[Double]] = { if (numHiddenLayers == 0) { new CachingLookupTransform(word2vecIndexed) } else { val baseTransformLayer = if (backpropIntoEmbeddings) { new EmbeddingsTransform(numHidden, inputSize, word2vecIndexed) } else { new CachingLookupAndAffineTransformDense(numHidden, inputSize, word2vecIndexed) } var currLayer = addNonlinearity(nonLinType, numHidden, dropoutRate, baseTransformLayer) for (i <- 1 until numHiddenLayers) { currLayer = addNonlinearity(nonLinType, numHidden, dropoutRate, new AffineTransform(numHidden, numHidden, currLayer)) } currLayer } } def buildNet(word2vecIndexed: Word2VecIndexed[String], inputSize: Int, numHidden: Int, numHiddenLayers: Int, outputSize: Int, nonLinType: String, dropoutRate: Double, backpropIntoEmbeddings: Boolean): AffineOutputTransform[Array[Int]] = { val innerTransform = buildNetInnerTransforms(word2vecIndexed, inputSize, numHidden, numHiddenLayers, nonLinType, dropoutRate, backpropIntoEmbeddings) new AffineOutputTransform(outputSize, if (numHiddenLayers >= 1) numHidden else inputSize, innerTransform) } def buildNetOutputEmbedding(word2vecIndexed: Word2VecIndexed[String], inputSize: Int, numHidden: Int, numHiddenLayers: Int, outputSize: Int, nonLinType: String, dropoutRate: Double, backpropIntoEmbeddings: Boolean, outputEmbeddingDim: Int, coarsenerForInitialization: Option[Int => Int]): OutputTransform[Array[Int],DenseVector[Double]] = { val innerTransform = buildNetInnerTransforms(word2vecIndexed, inputSize, numHidden, numHiddenLayers, nonLinType, dropoutRate, backpropIntoEmbeddings) val innerTransformLastLayer = new AffineTransform(outputEmbeddingDim, if (numHiddenLayers >= 1) numHidden else inputSize, innerTransform) new OutputEmbeddingTransform(outputSize, outputEmbeddingDim, innerTransformLastLayer, coarsenerForInitialization) } def addNonlinearity(nonLinType: String, numHidden: Int, dropoutRate: Double, currLayer: Transform[Array[Int],DenseVector[Double]]) = { val useDropout = dropoutRate > 1e-8 var tmpLayer = currLayer tmpLayer = new NonlinearTransform(nonLinType, numHidden, tmpLayer) if (useDropout) { tmpLayer = new NonlinearTransform("dropout", numHidden, tmpLayer, dropoutRate) } tmpLayer } def getRuleToParentMapping(index: Index[Feature]): Int => Int = { (i: Int) => { if (index.get(i).isInstanceOf[Rule[AnnotatedLabel]]) { val parentIdx = index(index.get(i).asInstanceOf[Rule[AnnotatedLabel]].parent) if (parentIdx == -1) { 0 } else { parentIdx } } else { i } } } } case class ParentFeature(f: Feature) extends Feature case class LeftChildFeature(f: Feature) extends Feature case class RightChildFeature(f: Feature) extends Feature
langkilde/epic
src/main/scala/epic/parser/models/PositionalNeuralModelFactory.scala
Scala
apache-2.0
18,545
/** * Copyright 2012-2014 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package service import securesocial.core.RuntimeEnvironment import securesocial.core.services.UserService class MyEnvironment extends RuntimeEnvironment.Default[DemoUser] { override val userService: UserService[DemoUser] = new InMemoryUserService() }
mojo22jojo/securesocial-test
samples/java/demo/app/service/MyEnvironment.scala
Scala
apache-2.0
903
/* * Copyright (c) 2014-2021 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.compression import java.io.ByteArrayInputStream import java.util.Arrays import java.util.zip.{Deflater, DeflaterInputStream, Inflater, InflaterInputStream} import monix.reactive.Observable import scala.annotation.tailrec trait DeflateTestUtils extends CompressionTestData { val inflateRandomExampleThatFailed: Array[Byte] = Array(100, 96, 2, 14, 108, -122, 110, -37, 35, -11, -10, 14, 47, 30, 43, 111, -80, 44, -34, 35, 35, 37, -103).map( _.toByte ) def deflatedStream(bytes: Array[Byte], chunkSize: Int = 32 * 1024) = deflatedWith(bytes, new Deflater(), chunkSize) def noWrapDeflatedStream(bytes: Array[Byte], chunkSize: Int = 32 * 1024) = deflatedWith(bytes, new Deflater(9, true), chunkSize) def jdkDeflate(bytes: Array[Byte], deflater: Deflater): Array[Byte] = { val bigBuffer = new Array[Byte](1024 * 1024) val dif = new DeflaterInputStream(new ByteArrayInputStream(bytes), deflater) val read = dif.read(bigBuffer, 0, bigBuffer.length) Arrays.copyOf(bigBuffer, read) } def deflatedWith(bytes: Array[Byte], deflater: Deflater, chunkSize: Int = 32 * 1024) = { val arr = jdkDeflate(bytes, deflater) Observable .fromIterable(arr) .bufferTumbling(chunkSize) .map(_.toArray) } def jdkInflate(bytes: Array[Byte], noWrap: Boolean): Array[Byte] = { val bigBuffer = new Array[Byte](1024 * 1024) val inflater = new Inflater(noWrap) val iif = new InflaterInputStream( new ByteArrayInputStream(bytes), inflater ) @tailrec def inflate(acc: Array[Byte]): Array[Byte] = { val read = iif.read(bigBuffer, 0, bigBuffer.length) if (read <= 0) acc else inflate(acc ++ bigBuffer.take(read).toList) } inflate(Array.emptyByteArray) } }
monixio/monix
monix-reactive/jvm/src/test/scala/monix/reactive/compression/DeflateTestUtils.scala
Scala
apache-2.0
2,470
package me.michaelgagnon.pets.web.actors import akka.actor.Actor import akka.event.Logging import akka.http.scaladsl.Http import akka.http.scaladsl.model._ import akka.http.scaladsl.model.headers._ import akka.stream.ActorMaterializer import akka.stream.ActorMaterializerSettings import java.util.UUID import play.api.Configuration import play.api.libs.json._ import scala.concurrent._ import scala.concurrent.duration._ import scala.util.{Success, Failure} import me.michaelgagnon.pets.contest.Games import me.michaelgagnon.pets.web.controllers.ContestRequest import me.michaelgagnon.pets.web._ case class Pet( id: String, name: String, strength: Int, speed: Int, intelligence: Int, integrity:Int) object NewContestActor { val database = Actors.databaseActor implicit val petReads = Json.reads[Pet] } class NewContestActor(config: Configuration)(implicit ec: ExecutionContext) extends Actor { import NewContestActor._ val petApiToken = config.getString("pet.api.token").get val petApiHost = config.getString("pet.api.host").get val timeout = config.getString("me.michaelgagnon.pets.reqTimeout").get.toInt.seconds val log = Logging(context.system, this) final implicit val materializer: ActorMaterializer = ActorMaterializer(ActorMaterializerSettings(context.system)) val http = Http(context.system) def receive = { case contestWithId: ContestWithId => handleNewContest(contestWithId) case _ => throw new IllegalArgumentException("NewContestActor received unknown message") } def petFromJson(json: String, contestId: UUID): Either[ContestError, Pet] = { val result: JsResult[Pet] = Json.fromJson[Pet](Json.parse(json)) result match { case error: JsError => Left(ErrorJsonFromPetService(contestId)) case success: JsSuccess[Pet] => Right(success.value) } } def getPet(petId: String, contestId: UUID): Future[Either[ContestError, Pet]] = { val uri = s"$petApiHost/pets/$petId" val httpRequest = HttpRequest(uri = uri) .withHeaders(RawHeader("X-Pets-Token", petApiToken)) val httpResponse: Future[HttpResponse]= http.singleRequest(httpRequest) val body: Future[String] = httpResponse .flatMap { response => // Grab the body future from http response response .entity .toStrict(timeout) .map { _.data.utf8String } } body.map { petFromJson(_, contestId) } } def runContest(contestId: UUID, pet1: Pet, pet2: Pet, contestType: String): ContestStatus = Games .get(contestType) .map { game => ContestResultWithId(contestId, game(pet1, pet2)) } .getOrElse(ErrorInvalidGame(contestId)) def handleNewContest(contestWithId: ContestWithId) = { log.info("New contest: " + contestWithId) val ContestWithId(ContestRequest(petId1, petId2, contestType), contestId) = contestWithId database ! PostStatus(InProgress(contestId)) val pet1: Future[Either[ContestError, Pet]] = getPet(petId1, contestId) val pet2: Future[Either[ContestError, Pet]] = getPet(petId2, contestId) // Join the futures val pets: Future[(Either[ContestError, Pet], Either[ContestError, Pet])] = for { p1 <- pet1 p2 <- pet2 } yield (p1, p2) pets.onComplete { case Failure(t) => database ! PostStatus(ErrorAccessPetService(contestId, petApiHost)) case Success((Left(error), _)) => database ! PostStatus(error) case Success((_, Left(error))) => database ! PostStatus(error) case Success((Right(pet1), Right(pet2))) => { database ! PostStatus(runContest(contestId, pet1, pet2, contestType)) } } context.stop(self) } }
mikegagnon/battle-pets-arena
app/web/actors/NewContestActor.scala
Scala
mit
3,710
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming import java.util.UUID import scala.collection.mutable import scala.concurrent.duration._ import org.scalactic.TolerantNumerics import org.scalatest.concurrent.AsyncAssertions.Waiter import org.scalatest.concurrent.Eventually._ import org.scalatest.concurrent.PatienceConfiguration.Timeout import org.scalatest.BeforeAndAfter import org.apache.spark.SparkException import org.apache.spark.scheduler._ import org.apache.spark.sql.{Encoder, SparkSession} import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.StreamingQueryListener._ import org.apache.spark.util.{JsonProtocol, Utils} class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter { import testImplicits._ // To make === between double tolerate inexact values implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01) after { spark.streams.active.foreach(_.stop()) // finalize method removes the StreamingQueryListener registered for structured streaming UI. spark.finalize() assert(spark.streams.active.isEmpty) assert(addedListeners().isEmpty) // Make sure we don't leak any events to the next test spark.sparkContext.listenerBus.waitUntilEmpty(10000) } testQuietly("single listener, check trigger events are generated correctly") { val clock = new StreamManualClock val inputData = new MemoryStream[Int](0, sqlContext) val df = inputData.toDS().as[Long].map { 10 / _ } val listener = new EventCollector try { // No events until started spark.streams.addListener(listener) assert(listener.startEvent === null) assert(listener.progressEvents.isEmpty) assert(listener.terminationEvent === null) testStream(df, OutputMode.Append)( // Start event generated when query started StartStream(ProcessingTime(100), triggerClock = clock), AssertOnQuery { query => assert(listener.startEvent !== null) assert(listener.startEvent.id === query.id) assert(listener.startEvent.runId === query.runId) assert(listener.startEvent.name === query.name) assert(listener.progressEvents.isEmpty) assert(listener.terminationEvent === null) true }, // Progress event generated when data processed AddData(inputData, 1, 2), AdvanceManualClock(100), CheckAnswer(10, 5), AssertOnQuery { query => assert(listener.progressEvents.nonEmpty) // SPARK-18868: We can't use query.lastProgress, because in progressEvents, we filter // out non-zero input rows, but the lastProgress may be a zero input row trigger val lastNonZeroProgress = query.recentProgress.filter(_.numInputRows > 0).lastOption .getOrElse(fail("No progress updates received in StreamingQuery!")) assert(listener.progressEvents.last.json === lastNonZeroProgress.json) assert(listener.terminationEvent === null) true }, // Termination event generated when stopped cleanly StopStream, AssertOnQuery { query => eventually(Timeout(streamingTimeout)) { assert(listener.terminationEvent !== null) assert(listener.terminationEvent.id === query.id) assert(listener.terminationEvent.runId === query.runId) assert(listener.terminationEvent.exception === None) } listener.checkAsyncErrors() listener.reset() true }, // Termination event generated with exception message when stopped with error StartStream(ProcessingTime(100), triggerClock = clock), AddData(inputData, 0), AdvanceManualClock(100), ExpectFailure[SparkException](), AssertOnQuery { query => eventually(Timeout(streamingTimeout)) { assert(listener.terminationEvent !== null) assert(listener.terminationEvent.id === query.id) assert(listener.terminationEvent.exception.nonEmpty) // Make sure that the exception message reported through listener // contains the actual exception and relevant stack trace assert(!listener.terminationEvent.exception.get.contains("StreamingQueryException")) assert( listener.terminationEvent.exception.get.contains("java.lang.ArithmeticException")) assert(listener.terminationEvent.exception.get.contains("StreamingQueryListenerSuite")) } listener.checkAsyncErrors() true } ) } finally { spark.streams.removeListener(listener) } } test("SPARK-19594: all of listeners should receive QueryTerminatedEvent") { val df = MemoryStream[Int].toDS().as[Long] val listeners = (1 to 5).map(_ => new EventCollector) try { listeners.foreach(listener => spark.streams.addListener(listener)) testStream(df, OutputMode.Append)( StartStream(), StopStream, AssertOnQuery { query => eventually(Timeout(streamingTimeout)) { listeners.foreach(listener => assert(listener.terminationEvent !== null)) listeners.foreach(listener => assert(listener.terminationEvent.id === query.id)) listeners.foreach(listener => assert(listener.terminationEvent.runId === query.runId)) listeners.foreach(listener => assert(listener.terminationEvent.exception === None)) } listeners.foreach(listener => listener.checkAsyncErrors()) listeners.foreach(listener => listener.reset()) true } ) } finally { listeners.foreach(spark.streams.removeListener) } } test("adding and removing listener") { def isListenerActive(listener: EventCollector): Boolean = { listener.reset() testStream(MemoryStream[Int].toDS)( StartStream(), StopStream ) listener.startEvent != null } try { val listener1 = new EventCollector val listener2 = new EventCollector spark.streams.addListener(listener1) assert(isListenerActive(listener1) === true) assert(isListenerActive(listener2) === false) spark.streams.addListener(listener2) assert(isListenerActive(listener1) === true) assert(isListenerActive(listener2) === true) spark.streams.removeListener(listener1) assert(isListenerActive(listener1) === false) assert(isListenerActive(listener2) === true) } finally { addedListeners().foreach(spark.streams.removeListener) } } test("event ordering") { val listener = new EventCollector withListenerAdded(listener) { for (i <- 1 to 100) { listener.reset() require(listener.startEvent === null) testStream(MemoryStream[Int].toDS)( StartStream(), Assert(listener.startEvent !== null, "onQueryStarted not called before query returned"), StopStream, Assert { listener.checkAsyncErrors() } ) } } } test("QueryStartedEvent serialization") { def testSerialization(event: QueryStartedEvent): Unit = { val json = JsonProtocol.sparkEventToJson(event) val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryStartedEvent] assert(newEvent.id === event.id) assert(newEvent.runId === event.runId) assert(newEvent.name === event.name) assert(newEvent.triggerInterval === event.triggerInterval) } testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, "name", ProcessingTime("1 second").intervalMs)) testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, null)) } test("QueryProgressEvent serialization") { def testSerialization(event: QueryProgressEvent): Unit = { import scala.collection.JavaConverters._ val json = JsonProtocol.sparkEventToJson(event) val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryProgressEvent] assert(newEvent.progress.json === event.progress.json) // json as a proxy for equality assert(newEvent.progress.durationMs.asScala === event.progress.durationMs.asScala) assert(newEvent.progress.eventTime.asScala === event.progress.eventTime.asScala) } testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress1)) testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress2)) } test("QueryTerminatedEvent serialization") { def testSerialization(event: QueryTerminatedEvent): Unit = { val json = JsonProtocol.sparkEventToJson(event) val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryTerminatedEvent] assert(newEvent.id === event.id) assert(newEvent.runId === event.runId) assert(newEvent.exception === event.exception) } val exception = new RuntimeException("exception") testSerialization( new QueryTerminatedEvent(UUID.randomUUID, UUID.randomUUID, Some(exception.getMessage))) } test("only one progress event per interval when no data") { // This test will start a query but not push any data, and then check if we push too many events withSQLConf(SQLConf.STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL.key -> "100ms") { @volatile var numProgressEvent = 0 val listener = new StreamingQueryListener { override def onQueryStarted(event: QueryStartedEvent): Unit = {} override def onQueryProgress(event: QueryProgressEvent): Unit = { numProgressEvent += 1 } override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {} } spark.streams.addListener(listener) try { val input = new MemoryStream[Int](0, sqlContext) { @volatile var numTriggers = 0 override def getOffset: Option[Offset] = { numTriggers += 1 super.getOffset } } val clock = new StreamManualClock() val actions = mutable.ArrayBuffer[StreamAction]() actions += StartStream(trigger = ProcessingTime(10), triggerClock = clock) for (_ <- 1 to 100) { actions += AdvanceManualClock(10) } actions += AssertOnQuery { _ => eventually(timeout(streamingTimeout)) { assert(input.numTriggers > 100) // at least 100 triggers have occurred } true } // `recentProgress` should not receive too many no data events actions += AssertOnQuery { q => q.recentProgress.size > 1 && q.recentProgress.size <= 11 } testStream(input.toDS)(actions: _*) spark.sparkContext.listenerBus.waitUntilEmpty(10000) // 11 is the max value of the possible numbers of events. assert(numProgressEvent > 1 && numProgressEvent <= 11) } finally { spark.streams.removeListener(listener) } } } test("listener only posts events from queries started in the related sessions") { val session1 = spark.newSession() val session2 = spark.newSession() val collector1 = new EventCollector val collector2 = new EventCollector def runQuery(session: SparkSession): Unit = { collector1.reset() collector2.reset() val mem = MemoryStream[Int](implicitly[Encoder[Int]], session.sqlContext) testStream(mem.toDS)( AddData(mem, 1, 2, 3), CheckAnswer(1, 2, 3) ) session.sparkContext.listenerBus.waitUntilEmpty(5000) } def assertEventsCollected(collector: EventCollector): Unit = { assert(collector.startEvent !== null) assert(collector.progressEvents.nonEmpty) assert(collector.terminationEvent !== null) } def assertEventsNotCollected(collector: EventCollector): Unit = { assert(collector.startEvent === null) assert(collector.progressEvents.isEmpty) assert(collector.terminationEvent === null) } assert(session1.ne(session2)) assert(session1.streams.ne(session2.streams)) withListenerAdded(collector1, session1) { assert(addedListeners(session1).nonEmpty) withListenerAdded(collector2, session2) { assert(addedListeners(session2).nonEmpty) // query on session1 should send events only to collector1 runQuery(session1) assertEventsCollected(collector1) assertEventsNotCollected(collector2) // query on session2 should send events only to collector2 runQuery(session2) assertEventsCollected(collector2) assertEventsNotCollected(collector1) } } } testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.0") { // query-event-logs-version-2.0.0.txt has all types of events generated by // Structured Streaming in Spark 2.0.0. // SparkListenerApplicationEnd is the only valid event and it's the last event. We use it // to verify that we can skip broken jsons generated by Structured Streaming. testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.0.txt") } testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.1") { // query-event-logs-version-2.0.1.txt has all types of events generated by // Structured Streaming in Spark 2.0.1. // SparkListenerApplicationEnd is the only valid event and it's the last event. We use it // to verify that we can skip broken jsons generated by Structured Streaming. testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.1.txt") } testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.2") { // query-event-logs-version-2.0.2.txt has all types of events generated by // Structured Streaming in Spark 2.0.2. // SparkListenerApplicationEnd is the only valid event and it's the last event. We use it // to verify that we can skip broken jsons generated by Structured Streaming. testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.2.txt") } private def testReplayListenerBusWithBorkenEventJsons(fileName: String): Unit = { val input = getClass.getResourceAsStream(s"/structured-streaming/$fileName") val events = mutable.ArrayBuffer[SparkListenerEvent]() try { val replayer = new ReplayListenerBus() { // Redirect all parsed events to `events` override def doPostEvent( listener: SparkListenerInterface, event: SparkListenerEvent): Unit = { events += event } } // Add a dummy listener so that "doPostEvent" will be called. replayer.addListener(new SparkListener {}) replayer.replay(input, fileName) // SparkListenerApplicationEnd is the only valid event assert(events.size === 1) assert(events(0).isInstanceOf[SparkListenerApplicationEnd]) } finally { input.close() } } private def withListenerAdded( listener: StreamingQueryListener, session: SparkSession = spark)(body: => Unit): Unit = { try { failAfter(streamingTimeout) { session.streams.addListener(listener) body } } finally { session.streams.removeListener(listener) } } private lazy val getListenerBusField = { val clazz = Utils.classForName("org.apache.spark.sql.streaming.StreamingQueryManager") val listenerBus = clazz.getDeclaredField("listenerBus") listenerBus.setAccessible(true) listenerBus } private def addedListeners(session: SparkSession = spark): Array[StreamingQueryListener] = { getListenerBusField.get(session.streams).asInstanceOf[StreamingQueryListenerBus].listeners .toArray.map(_.asInstanceOf[StreamingQueryListener]) } /** Collects events from the StreamingQueryListener for testing */ class EventCollector extends StreamingQueryListener { // to catch errors in the async listener events @volatile private var asyncTestWaiter = new Waiter @volatile var startEvent: QueryStartedEvent = null @volatile var terminationEvent: QueryTerminatedEvent = null private val _progressEvents = new mutable.Queue[StreamingQueryProgress] def progressEvents: Seq[StreamingQueryProgress] = _progressEvents.synchronized { _progressEvents.filter(_.numInputRows > 0) } def reset(): Unit = { startEvent = null terminationEvent = null _progressEvents.clear() asyncTestWaiter = new Waiter } def checkAsyncErrors(): Unit = { asyncTestWaiter.await(timeout(streamingTimeout)) } override def onQueryStarted(queryStarted: QueryStartedEvent): Unit = { asyncTestWaiter { startEvent = queryStarted } } override def onQueryProgress(queryProgress: QueryProgressEvent): Unit = { asyncTestWaiter { assert(startEvent != null, "onQueryProgress called before onQueryStarted") _progressEvents.synchronized { _progressEvents += queryProgress.progress } } } override def onQueryTerminated(queryTerminated: QueryTerminatedEvent): Unit = { asyncTestWaiter { assert(startEvent != null, "onQueryTerminated called before onQueryStarted") terminationEvent = queryTerminated } asyncTestWaiter.dismiss() } } }
SnappyDataInc/spark
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
Scala
apache-2.0
18,226
/* __ *\\ ** ________ ___ / / ___ Scala API ** ** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | | ** ** |/ ** \\* */ package scala package collection package immutable import generic._ import parallel.immutable.ParMap /** * A generic template for immutable maps from keys of type `K` * to values of type `V`. * To implement a concrete map, you need to provide implementations of the * following methods (where `This` is the type of the actual map implementation): * * {{{ * def get(key: K): Option[V] * def iterator: Iterator[(K, V)] * def + [V1 >: V](kv: (K, V)): Map[K, V1] * def - (key: K): This * }}} * * If you wish that transformer methods like `take`, `drop`, `filter` return the * same kind of map, you should also override: * * {{{ * def empty: This * }}} * * It is also good idea to override methods `foreach` and * `size` for efficiency. * * @tparam K the type of the keys contained in this collection. * @tparam V the type of the values associated with the keys. * @tparam This The type of the actual map implementation. * * @author Martin Odersky * @version 2.8 * @since 2.8 * @define Coll immutable.Map * @define coll immutable map */ trait MapLike[K, +V, +This <: MapLike[K, V, This] with Map[K, V]] extends scala.collection.MapLike[K, V, This] with Parallelizable[(K, V), ParMap[K, V]] { self => protected[this] override def parCombiner = ParMap.newCombiner[K, V] /** A new immutable map containing updating this map with a given key/value mapping. * @param key the key * @param value the value * @return A new map with the new key/value mapping */ override def updated [V1 >: V](key: K, value: V1): immutable.Map[K, V1] = this + ((key, value)) /** Add a key/value pair to this map, returning a new map. * @param kv the key/value pair. * @return A new map with the new binding added to this map. */ def + [V1 >: V] (kv: (K, V1)): immutable.Map[K, V1] /** Adds two or more elements to this collection and returns * a new collection. * * @param elem1 the first element to add. * @param elem2 the second element to add. * @param elems the remaining elements to add. * @return A new map with the new bindings added to this map. */ override def + [V1 >: V] (elem1: (K, V1), elem2: (K, V1), elems: (K, V1) *): immutable.Map[K, V1] = this + elem1 + elem2 ++ elems /** Adds a number of elements provided by a traversable object * and returns a new collection with the added elements. * * @param xs the traversable object consisting of key-value pairs. * @return a new immutable map with the bindings of this map and those from `xs`. */ override def ++[V1 >: V](xs: GenTraversableOnce[(K, V1)]): immutable.Map[K, V1] = ((repr: immutable.Map[K, V1]) /: xs.seq) (_ + _) /** Filters this map by retaining only keys satisfying a predicate. * @param p the predicate used to test keys * @return an immutable map consisting only of those key value pairs of this map where the key satisfies * the predicate `p`. The resulting map wraps the original map without copying any elements. */ override def filterKeys(p: K => Boolean): Map[K, V] = new FilteredKeys(p) with DefaultMap[K, V] /** Transforms this map by applying a function to every retrieved value. * @param f the function used to transform values of this map. * @return a map view which maps every key of this map * to `f(this(key))`. The resulting map wraps the original map without copying any elements. */ override def mapValues[W](f: V => W): Map[K, W] = new MappedValues(f) with DefaultMap[K, W] /** Collects all keys of this map in a set. * @return a set containing all keys of this map. */ override def keySet: immutable.Set[K] = new ImmutableDefaultKeySet protected class ImmutableDefaultKeySet extends super.DefaultKeySet with immutable.Set[K] { override def + (elem: K): immutable.Set[K] = if (this(elem)) this else immutable.Set[K]() ++ this + elem override def - (elem: K): immutable.Set[K] = if (this(elem)) immutable.Set[K]() ++ this - elem else this // ImmutableDefaultKeySet is only protected, so we won't warn on override. // Someone could override in a way that makes widening not okay // (e.g. by overriding +, though the version in this class is fine) override def toSet[B >: K]: Set[B] = this.asInstanceOf[Set[B]] } /** This function transforms all the values of mappings contained * in this map with function `f`. * * @param f A function over keys and values * @return the updated map */ def transform[W, That](f: (K, V) => W)(implicit bf: CanBuildFrom[This, (K, W), That]): That = { val b = bf(repr) for ((key, value) <- this) b += ((key, f(key, value))) b.result() } }
felixmulder/scala
src/library/scala/collection/immutable/MapLike.scala
Scala
bsd-3-clause
5,345
package akka.http.extensions.security import akka.http.scaladsl.server._ import akka.http.scaladsl.util.FastFuture._ import scala.concurrent.Future import scala.util._ case class LoginMagnet(directive: Directive1[LoginInfo]) object LoginMagnet extends FutureLoginMagnet with TryLoginMagnet sealed trait FutureLoginMagnet { type FutureLogin = (String,String)=>Future[LoginResult] protected def futureLoginDirective(params:(String,String, FutureLogin)): Directive1[LoginInfo] = Directive[Tuple1[LoginInfo]] { inner ⇒ ctx ⇒ import ctx.executionContext val (username,password,futureLogin) = params futureLogin(username,password).fast .flatMap{ case e: UserDoesNotExist => ctx.reject(e) case p: PasswordDoesNotMuch => ctx.reject(p) case e: EmailDoesNotExist => ctx.reject(e) case l: LoggedIn =>inner(Tuple1(l.user))(ctx) } .recoverWith{ case th=> ctx.reject(ReadErrorRejection(s"cannot login $username",th)) } } implicit def futureLoginDefault(tryLogin: FutureLogin):LoginMagnet = LoginMagnet( Directives.parameter("username","password") //todo add email support .tflatMap{ case (username,password)=> futureLoginDirective((username,password,tryLogin)) } ) implicit def futureLoginDefault(logins: (FutureLogin,FutureLogin)):LoginMagnet = { val dir: Directive1[LoginInfo] = Directives.parameter("username","password") //login by username .tflatMap{ case (username,password)=> futureLoginDirective((username,password,logins._1)) } | Directives.parameter("email","password") //login by email .tflatMap{ case (email,password)=> futureLoginDirective((email,password,logins._2)) } LoginMagnet(dir) } implicit def futureLogin(params:(String,String, FutureLogin)):LoginMagnet = LoginMagnet(futureLoginDirective(params) ) } sealed trait TryLoginMagnet { type TryLogin = (String,String)=>Try[LoginResult] protected def tryLoginDirective(params:(String,String, TryLogin)): Directive1[LoginInfo] = { Directive[Tuple1[LoginInfo]]{ inner ⇒ ctx ⇒ val (username,password,login) = params login(username,password) match { case Success(e: UserDoesNotExist) => ctx.reject(e) case Success(p: PasswordDoesNotMuch) => ctx.reject(p) case Success(e: EmailDoesNotExist) => ctx.reject(e) case Success(l: LoggedIn) =>inner(Tuple1(l.user))(ctx) case Failure(th) =>ctx.reject(ReadErrorRejection(s"cannot login $username",th)) } } } implicit def tryLogin(params:(String,String, TryLogin)):LoginMagnet = LoginMagnet(tryLoginDirective(params)) implicit def tryLoginDefault(tryLogin: TryLogin):LoginMagnet = { LoginMagnet(Directives.parameter("username","password").tflatMap{ case (username,password)=> tryLoginDirective((username,password,tryLogin)) }) } }
denigma/akka-http-extensions
extensions/src/main/scala/akka/http/extensions/security/LoginMagnet.scala
Scala
mpl-2.0
2,930
/******************************************************************************* * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. * * Copyright (c) 2013,2014 by Peter Pilgrim, Addiscombe, Surrey, XeNoNiQUe UK * * All rights reserved. This program and the accompanying materials * are made available under the terms of the GNU GPL v3.0 * which accompanies this distribution, and is available at: * http://www.gnu.org/licenses/gpl-3.0.txt * * Developers: * Peter Pilgrim -- design, development and implementation * -- Blog: http://www.xenonique.co.uk/blog/ * -- Twitter: @peter_pilgrim * * Contributors: * *******************************************************************************/ package uk.co.xenonique.javacro; import org.scalatest.{Matchers, FlatSpec} import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class AppSpec extends FlatSpec with Matchers { "BasicArithmetic" should "perform competent addition" in { val a = 1 val b = 2 (a + b) should be (3) } }
peterpilgrim/javacro
src/test/scala/uk/co/xenonique/javacro/AppSpec.scala
Scala
gpl-3.0
1,091
package com.jejking.rprng.png import akka.http.scaladsl.coding.DeflateCompressor import akka.util.ByteString import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers import java.nio.ByteBuffer import java.nio.charset.Charset import java.util.zip.CRC32 /** * Created by jking on 09/07/2017. */ class PngSpec extends AnyFlatSpec with Matchers { "Png" should "define the PNG signature" in { val expectedBytes = ByteString(0xFFFFFF89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A) Png.PNG_SIGNATURE shouldBe expectedBytes } it should "define correct bytes for IHDR_CHUNK_TYPE critical chunk type" in { val expectedBytes = ByteString(73, 72, 68, 82) Png.IHDR_CHUNK_TYPE shouldBe expectedBytes } it should "define correct bytes for IDAT_CHUNK_TYPE critical chunk type" in { val expectedBytes = ByteString(73, 68, 65, 84) Png.IDAT_CHUNK_TYPE shouldBe expectedBytes } it should "define correct bytes for IEND_CHUNK_TYPE critical chunk type" in { val expectedBytes = ByteString(73, 69, 78, 68) Png.IEND_CHUNK_TYPE shouldBe expectedBytes } "crc32" should "behave like the Java one" in { val bytes = ByteString(1, 2, 3, 4, 5, 6, 7, 8) Png.crc32(bytes) shouldBe javaCrc(bytes) } it should "also do so with negative bytes" in { val bytes = ByteString(-1, -2, -3, -4, -5, -6, -7, -8) Png.crc32(bytes) shouldBe javaCrc(bytes) } "ihdr" should "convert a signed 32 bit integer to unsigned four byte array" in { val expectedLong = java.lang.Integer.toUnsignedLong(256) val expectedByteString = ByteString.fromArray(ByteBuffer.allocate(8).putLong(expectedLong).array()).drop(4) Png.toUnsignedFourByteInt(256) shouldBe expectedByteString } it should "reject a negative width parameter" in { a [IllegalArgumentException] should be thrownBy { Png.ihdr(-10, 512) } } it should "reject a zero width parameter" in { a [IllegalArgumentException] should be thrownBy { Png.ihdr(0, 512) } } it should "reject a negative height parameter" in { a [IllegalArgumentException] should be thrownBy { Png.ihdr(256, -512) } } it should "reject a zero height parameter" in { a [IllegalArgumentException] should be thrownBy { Png.ihdr(256, 0) } } it should "define the correct IHDR chunk given a positive width and a positive header" in { val width = ByteString.fromArray(ByteBuffer.allocate(4).putInt(256).array()) val height = ByteString.fromArray(ByteBuffer.allocate(4).putInt(512).array()) val crc = javaCrc(Png.IHDR_CHUNK_TYPE ++ width ++ height ++ ByteString(8, 6, 0, 0, 0)) val expectedBytes = Png.toUnsignedFourByteInt(13) ++ Png.IHDR_CHUNK_TYPE ++ width ++ height ++ ByteString(8, 6, 0, 0, 0) ++ crc Png.ihdr(256, 512) shouldBe expectedBytes } "scanline" should "create prepend a filter type 0 to a byte string assumed to represent a scanline" in { val width = 3 val bytesPerPixel = 2 val inputBytes = ByteString(1, 2, 3, 4, 5, 6) val expected = ByteString(0) ++ inputBytes val scanline: ByteString => ByteString = Png.scanline(bytesPerPixel, width) scanline(inputBytes) shouldBe expected } it should "fail if the scanline is not the same size as the value given in the width parameter" in { assertThrows[IllegalArgumentException] { val width = 5 val bytesPerPixel = 1 val inputBytes = ByteString(1, 2, 3) Png.scanline(bytesPerPixel, width)(inputBytes) } } "idat" should "create an IDAT chunk given a byte string assumed to represent scanlines and finish deflate" in { val bytes = ByteString("this is a very nice picture", Charset.forName("UTF-8")) val compressedBytes = javaDeflateFinish(bytes) val toChecksum = Png.IDAT_CHUNK_TYPE ++ compressedBytes val checkSum = javaCrc(toChecksum) val expected = Png.toUnsignedFourByteInt(compressedBytes.length) ++ toChecksum ++ checkSum val deflateHelper = new DeflateHelper() val idat = Png.idat(deflateHelper) _ idat(bytes, true) shouldBe expected } it should "create an IDAT chunk given a byte string assumed to represent scanlines and flush deflate" in { val bytes = ByteString("this is a very nice picture", Charset.forName("UTF-8")) val compressedBytes = javaDeflateFlush(bytes) val toChecksum = Png.IDAT_CHUNK_TYPE ++ compressedBytes val checkSum = javaCrc(toChecksum) val expected = Png.toUnsignedFourByteInt(compressedBytes.length) ++ toChecksum ++ checkSum val deflateHelper = new DeflateHelper() val idat = Png.idat(deflateHelper) _ idat(bytes, false) shouldBe expected } "iend" should "create an IEND chunk" in { val zeroLength = Png.toUnsignedFourByteInt(0) val crc = javaCrc(Png.IEND_CHUNK_TYPE) val expectedByteString = zeroLength ++ Png.IEND_CHUNK_TYPE ++ crc Png.iend() shouldBe expectedByteString } private def javaDeflateFinish(bytes: ByteString): ByteString = { import java.util.zip.Deflater val deflater = new Deflater(6, false) deflater.setInput(bytes.toArray) deflater.finish() val buffer = new Array[Byte](1024) val writtenBytes = deflater.deflate(buffer) deflater.end() ByteString.fromArray(buffer, 0, writtenBytes) } private def javaDeflateFlush(bytes: ByteString): ByteString = { import java.util.zip.Deflater val deflater = new Deflater(6, false) deflater.setInput(bytes.toArray) val buffer = new Array[Byte](1024) val writtenBytes = deflater.deflate(buffer, 0, buffer.length, Deflater.SYNC_FLUSH) ByteString.fromArray(buffer, 0, writtenBytes) } private def javaCrc(byteString: ByteString): ByteString = { val crc = new CRC32() crc.update(byteString.toArray) val crcValue = crc.getValue ByteString.fromArray(ByteBuffer.allocate(8).putLong(crcValue).array()).drop(4) } }
jejking/rprng
src/test/scala/com/jejking/rprng/png/PngSpec.scala
Scala
apache-2.0
5,901
package org.http4s.server package middleware import cats.data.Kleisli import cats._ import cats.implicits._ import org.http4s._ object ErrorHandling { def apply[F[_], G[_], A](k: Kleisli[F, Request[G], Response[G]])( implicit F: MonadError[F, Throwable], G: Applicative[G]): Kleisli[F, Request[G], Response[G]] = Kleisli { req => val pf: PartialFunction[Throwable, F[Response[G]]] = inDefaultServiceErrorHandler[F, G](F, G)(req) k.run(req).handleErrorWith { e => pf.lift(e) match { case Some(resp) => resp case None => F.raiseError(e) } } } }
ChristopherDavenport/http4s
server/src/main/scala/org/http4s/server/middleware/ErrorHandling.scala
Scala
apache-2.0
631
/** * Copyright (C) 2013 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.fr import java.net.URI import org.orbeon.oxf.externalcontext.URLRewriter import org.orbeon.oxf.fr.relational.Version._ import org.orbeon.oxf.http.Headers._ import org.orbeon.oxf.util.ScalaUtils._ import org.orbeon.oxf.util._ import org.orbeon.oxf.xforms.action.XFormsAPI._ import org.orbeon.oxf.xforms.analysis.model.ValidationLevels._ import org.orbeon.oxf.xforms.control.controls.XFormsUploadControl import org.orbeon.oxf.xml.{TransformerUtils, XMLUtils} import org.orbeon.saxon.om.{DocumentInfo, NodeInfo} import org.orbeon.scaxon.XML._ import scala.collection.JavaConverters._ trait FormRunnerPersistence { import org.orbeon.oxf.fr.FormRunner._ val CRUDBasePath = "/fr/service/persistence/crud" val FormMetadataBasePath = "/fr/service/persistence/form" val PersistencePropertyPrefix = "oxf.fr.persistence" val PersistenceProviderPropertyPrefix = PersistencePropertyPrefix + ".provider" val StandardProviderProperties = Set("uri", "autosave", "active", "permissions") // NOTE: We generate .bin, but sample data can contain other extensions private val RecognizedAttachmentExtensions = Set("bin", "jpg", "jpeg", "gif", "png", "pdf") // Check whether a value correspond to an uploaded file // // For this to be true // - the protocol must be file: // - the URL must have a valid signature // // This guarantees that the local file was in fact placed there by the upload control, and not tampered with. def isUploadedFileURL(value: String): Boolean = value.startsWith("file:/") && XFormsUploadControl.verifyMAC(value) //@XPathFunction def createFormDataBasePath(app: String, form: String, isDraft: Boolean, document: String): String = CRUDBasePath :: app :: form :: (if (isDraft) "draft" else "data") :: document :: "" :: Nil mkString "/" //@XPathFunction def createFormDefinitionBasePath(app: String, form: String) = CRUDBasePath :: app :: form :: "form" :: "" :: Nil mkString "/" def createFormMetadataPath(app: String, form: String) = FormMetadataBasePath :: app :: form :: Nil mkString "/" // Whether the given path is an attachment path (ignoring an optional query string) def isAttachmentURLFor(basePath: String, url: String) = url.startsWith(basePath) && (split[List](splitQuery(url)._1, ".").lastOption exists RecognizedAttachmentExtensions) // For a given attachment path, return the filename def getAttachmentPathFilenameRemoveQuery(pathQuery: String) = splitQuery(pathQuery)._1.split('/').last def findProvider(app: String, form: String, formOrData: String) = { val providerProperty = PersistenceProviderPropertyPrefix :: app :: form :: formOrData :: Nil mkString "." Option(properties.getString(providerProperty)) } def providerPropertyAsURL(provider: String, property: String) = properties.getStringOrURIAsString(PersistencePropertyPrefix :: provider :: property :: Nil mkString ".") def providerPropertyAsBoolean(provider: String, property: String, default: Boolean) = properties.getBoolean(PersistencePropertyPrefix :: provider :: property :: Nil mkString ".", default) //@XPathFunction def autosaveSupported(app: String, form: String) = providerPropertyAsBoolean(findProvider(app, form, "data").get, "autosave", default = false) //@XPathFunction def ownerGroupPermissionsSupported(app: String, form: String) = providerPropertyAsBoolean(findProvider(app, form, "data").get, "permissions", default = false) //@XPathFunction def versioningSupported(app: String, form: String) = providerPropertyAsBoolean(findProvider(app, form, "data").get, "versioning", default = false) def isActiveProvider(provider: String) = providerPropertyAsBoolean(provider, "active", default = true) def getPersistenceURLHeaders(app: String, form: String, formOrData: String) = { require(augmentString(app).nonEmpty) require(augmentString(form).nonEmpty) require(Set("form", "data")(formOrData)) getPersistenceURLHeadersFromProvider(findProvider(app, form, formOrData).get) } def getPersistenceURLHeadersFromProvider(provider: String) = { val propertyPrefix = PersistencePropertyPrefix :: provider :: Nil mkString "." val propertyPrefixTokenCount = split[List](propertyPrefix, ".").size // Build headers map val headers = ( for { propertyName ← properties.propertiesStartsWith(propertyPrefix, matchWildcards = false) lowerSuffix ← split[List](propertyName, ".").drop(propertyPrefixTokenCount).headOption if ! StandardProviderProperties(lowerSuffix) headerName = "Orbeon-" + capitalizeSplitHeader(lowerSuffix) headerValue = properties.getObject(propertyName).toString } yield headerName → headerValue) toMap (providerPropertyAsURL(provider, "uri"), headers) } def getPersistenceHeadersAsXML(app: String, form: String, formOrData: String) = { val (_, headers) = getPersistenceURLHeaders(app, form, formOrData) // Build headers document val headersXML = <headers>{ for { (name, value) ← headers } yield <header><name>{XMLUtils.escapeXMLMinimal(name)}</name><value>{XMLUtils.escapeXMLMinimal(value)}</value></header> }</headers>.toString // Convert to TinyTree TransformerUtils.stringToTinyTree(XPath.GlobalConfiguration, headersXML, false, false) } // Reads a document forwarding headers. The URL is rewritten, and is expected to be like "/fr/…" def readDocument(urlString: String)(implicit logger: IndentedLogger): Option[DocumentInfo] = { val request = NetUtils.getExternalContext.getRequest val rewrittenURLString = URLRewriterUtils.rewriteServiceURL( request, urlString, URLRewriter.REWRITE_MODE_ABSOLUTE ) val url = new URI(rewrittenURLString) val headers = Connection.buildConnectionHeadersLowerIfNeeded( scheme = url.getScheme, hasCredentials = false, customHeaders = Map(), headersToForward = Connection.headersToForwardFromProperty, cookiesToForward = Connection.cookiesToForwardFromProperty, Connection.getHeaderFromRequest(request) ) val cxr = Connection( httpMethodUpper = "GET", url = url, credentials = None, content = None, headers = headers, loadState = true, logBody = false ).connect( saveState = true ) // Libraries are typically not present. In that case, the persistence layer should return a 404 (thus the test // on status code), but the MySQL persistence layer returns a [200 with an empty body][1] (thus a body is // required). // [1]: https://github.com/orbeon/orbeon-forms/issues/771 ConnectionResult.tryWithSuccessConnection(cxr, closeOnSuccess = true) { is ⇒ // do process XInclude, so FB's model gets included TransformerUtils.readTinyTree(XPath.GlobalConfiguration, is, rewrittenURLString, true, false) } toOption } // Retrieves a form definition from the persistence layer def readPublishedForm(appName: String, formName: String)(implicit logger: IndentedLogger): Option[DocumentInfo] = readDocument(createFormDefinitionBasePath(appName, formName) + "form.xhtml") // Retrieves the metadata for a form from the persistence layer def readFormMetadata(appName: String, formName: String)(implicit logger: IndentedLogger): Option[DocumentInfo] = readDocument(createFormMetadataPath(appName, formName)) // Whether the form data is valid as per the error summary // We use instance('fr-error-summary-instance')/valid and not valid() because the instance validity may not be // reflected with the use of XBL components. def dataValid = errorSummaryInstance.rootElement \ "valid" === "true" // Return the number of failed validations captured by the error summary for the given level def countValidationsByLevel(level: ValidationLevel) = (errorSummaryInstance.rootElement \ "counts" \@ level.name stringValue).toInt // Return all nodes which refer to data attachments //@XPathFunction def collectDataAttachmentNodesJava(data: NodeInfo, fromBasePath: String) = collectAttachments(data.getDocumentRoot, fromBasePath, fromBasePath, forceAttachments = true)._1.asJava def collectAttachments(data: DocumentInfo, fromBasePath: String, toBasePath: String, forceAttachments: Boolean) = ( for { holder ← data \\ Node if isAttribute(holder) || isElement(holder) && ! hasChildElement(holder) beforeURL = holder.stringValue.trimAllToEmpty isUploaded = isUploadedFileURL(beforeURL) if isUploaded || isAttachmentURLFor(fromBasePath, beforeURL) && ! isAttachmentURLFor(toBasePath, beforeURL) || isAttachmentURLFor(toBasePath, beforeURL) && forceAttachments } yield { // Here we could decide to use a nicer extension for the file. But since initially the filename comes from // the client, it cannot be trusted, nor can its mediatype. A first step would be to do content-sniffing to // determine a more trusted mediatype. A second step would be to put in an API for virus scanning. For now, // we just use .bin as an extension. val filename = if (isUploaded) SecureUtils.randomHexId + ".bin" else getAttachmentPathFilenameRemoveQuery(beforeURL) val afterURL = toBasePath + filename (holder, beforeURL, afterURL) } ).unzip3 def putWithAttachments( data : DocumentInfo, toBaseURI : String, fromBasePath : String, toBasePath : String, filename : String, commonQueryString : String, forceAttachments : Boolean, username : Option[String] = None, password : Option[String] = None, formVersion : Option[String] = None ) = { // Find all instance nodes containing file URLs we need to upload val (uploadHolders, beforeURLs, afterURLs) = collectAttachments(data, fromBasePath, toBasePath, forceAttachments) // Save all attachments def saveAttachments(): Unit = uploadHolders zip afterURLs foreach { case (holder, resource) ⇒ sendThrowOnError("fr-create-update-attachment-submission", Map( "holder" → Some(holder), "resource" → Some(appendQueryString(toBaseURI + resource, commonQueryString)), "username" → username, "password" → password, "form-version" → formVersion) ) } // Update the paths on success def updatePaths() = uploadHolders zip afterURLs foreach { case (holder, resource) ⇒ setvalue(holder, resource) } // Save XML document def saveData() = sendThrowOnError("fr-create-update-submission", Map( "holder" → Some(data.rootElement), "resource" → Some(appendQueryString(toBaseURI + toBasePath + filename, commonQueryString)), "username" → username, "password" → password, "form-version" → formVersion) ) // Do things in order, so we don't update path or save the data if any the upload fails saveAttachments() updatePaths() // Save and try to retrieve returned version val versionOpt = for { done ← saveData() headers ← done.headers versions ← headers collectFirst { case (name, values) if name equalsIgnoreCase OrbeonFormDefinitionVersion ⇒ values } version ← versions.headOption } yield version (beforeURLs, afterURLs, versionOpt map (_.toInt) getOrElse 1) } }
joansmith/orbeon-forms
src/main/scala/org/orbeon/oxf/fr/FormRunnerPersistence.scala
Scala
lgpl-2.1
12,460
package controllers.breaks_in_care import app.BreaksInCareGatherOptions import controllers.mappings.Mappings import models.domain.Breaks import org.specs2.mutable._ import utils.pageobjects._ import utils.pageobjects.breaks_in_care.{GBreaksInCareHospitalPage, GBreaksInCareOtherPage, GBreaksInCareRespitePage, GBreaksInCareSummaryPage} import utils.pageobjects.s_education.GYourCourseDetailsPage import utils.{LightFakeApplication, WithBrowser} class GBreaksInCareMaxBreaksSpec extends Specification { // 3 rows in breaks summary table for single break ... 1) header 2) data row 3) hidden delete confirm prompt // 6 columns in breaks data row Who/Where/From/To/Change-link/Delete-link val ROWSINHEADER = 1 val ROWSPERBREAK = 2 section("integration", models.domain.Breaks.id) "Breaks in care pages" should { "present summary page" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { println("GBreaksInCareMaxBreaksSpec max breaks set to:"+Breaks.maximumBreaks) val page = GBreaksInCareSummaryPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareSummaryPage] } /* "present error on summary page with max breaks and try to submit Hospital" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareSummaryPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareSummaryPage] val claim = new TestData claim.BreaktypeHospitalCheckbox = "true" claim.BreaktypeOtherYesNo = "no" page fillPageWith claim val nextPage = page submitPage() nextPage.source must contain("Maximum breaks (3) is reached.") } "present error on summary page with max breaks and try to submit CareHome" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareSummaryPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareSummaryPage] val claim = new TestData claim.BreaktypeCareHomeCheckbox = "true" claim.BreaktypeOtherYesNo = "no" page fillPageWith claim val nextPage = page submitPage() nextPage.source must contain("Maximum breaks (3) is reached.") } "present error on summary page with max breaks and try to submit Other=yes" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareSummaryPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareSummaryPage] val claim = new TestData claim.BreaktypeNoneCheckbox = "true" claim.BreaktypeOtherYesNo = "yes" page fillPageWith claim val nextPage = page submitPage() nextPage.source must contain("Maximum breaks (3) is reached.") } "allow submit of summary page with max breaks and None / no selected" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareSummaryPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareSummaryPage] val claim = new TestData claim.BreaktypeNoneCheckbox = "true" claim.BreaktypeOtherYesNo = "no" page fillPageWith claim val nextPage = page submitPage() nextPage must beAnInstanceOf[GYourCourseDetailsPage] } "not add more than max breaks in Hospital page if navigate directly" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareHospitalPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareHospitalPage] val claim = new TestData claim.AboutTheCareYouProvideBreakWhoWasInHospital_1 = BreaksInCareGatherOptions.You claim.AboutTheCareYouProvideBreakWhenWereYouAdmitted_1 = "01/01/2016" claim.AboutTheCareYouProvideYourStayEnded_1 = Mappings.no page fillPageWith claim val nextPage = page submitPage() nextPage must beAnInstanceOf[GBreaksInCareSummaryPage] val summaryTableRows = browser.find("#summary-table tr") summaryTableRows.size() mustEqual ROWSINHEADER + ROWSPERBREAK * 3 } "not add more than max breaks in CareHome page if navigate directly" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareRespitePage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareRespitePage] val claim = new TestData claim.AboutTheCareYouProvideBreakWhoWasInRespite_1 = BreaksInCareGatherOptions.You claim.AboutTheCareYouProvideBreakWhenWereYouAdmitted_1 = "01/10/2015" claim.AboutTheCareYouProvideYourStayEnded_1 = Mappings.no claim.AboutTheCareYouProvideYourMedicalProfessional_1 = Mappings.no page fillPageWith claim val nextPage = page submitPage() nextPage must beAnInstanceOf[GBreaksInCareSummaryPage] val summaryTableRows = browser.find("#summary-table tr") summaryTableRows.size() mustEqual ROWSINHEADER + ROWSPERBREAK * 3 } "not add more than max breaks in Other page if navigate directly" in new WithBrowser(app = LightFakeApplication(additionalConfiguration = Map("maximumBreaksInCare" -> 3))) with PageObjects { GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) GBreaksInCareHospitalPage.fillDetails(context, testData => {}) val page = GBreaksInCareOtherPage(context) page goToThePage() page must beAnInstanceOf[GBreaksInCareOtherPage] val claim = new TestData claim.AboutTheCareYouProvideBreakEndDate_1 = "01/10/2015" claim.AboutTheCareYouProvideBreakStartAnswer_1 = Mappings.no page fillPageWith claim val nextPage = page submitPage() nextPage must beAnInstanceOf[GBreaksInCareSummaryPage] val summaryTableRows = browser.find("#summary-table tr") summaryTableRows.size() mustEqual ROWSINHEADER + ROWSPERBREAK * 3 } */ } section("integration", models.domain.Breaks.id) }
Department-for-Work-and-Pensions/ClaimCapture
c3/test/controllers/breaks_in_care/GBreaksInCareMaxBreaksSpec.scala
Scala
mit
7,658
import scala.language.experimental.macros import scala.reflect.macros.whitebox object A { def f: Unit = macro f_impl implicit def f_impl(c: whitebox.Context): c.Expr[Unit] = throw new OutOfMemoryError("OOM") with scala.util.control.NoStackTrace }
lrytz/scala
test/files/run/t10552/Macros_1.scala
Scala
apache-2.0
255
// Copyright 2013 Foursquare Labs Inc. All Rights Reserved. package io.fsq.spindle.codegen.runtime import io.fsq.spindle.__shaded_for_spindle_bootstrap__.descriptors.{Field, FieldProxy, Requiredness} import scala.annotation.tailrec class ScalaField( override val underlying: Field, resolver: TypeReferenceResolver, val isPrimaryKey: Boolean = false, val isForeignKey: Boolean = false ) extends FieldProxy with HasAnnotations { val escapedName: String = CodegenUtil.escapeScalaFieldName(name) val wireNameOpt: Option[String] = annotations.get("wire_name") val wireName: String = wireNameOpt.getOrElse(name) val varName: String = "_" + name val tfieldName: String = name.toUpperCase + "_FDESC" val isSetName: String = name + "IsSet" val isSetVar: String = "_" + isSetName val builderRequired: Boolean = (this.requirednessIsSet == false || this.requirednessOption.exists(_ == Requiredness.REQUIRED) || isPrimaryKey || annotations.get("builder_required").exists(_ == "true")) val typeReference: TypeReference = resolver .resolveTypeId(underlying.typeId, annotations) .fold( missingTypeReference => missingTypeReference match { case AliasNotFound(typeAlias) => throw new CodegenException( "Unknown type `%s` referenced in field %d: _ %s" .format(typeAlias, underlying.identifier, underlying.name) ) case TypeIdNotFound(typeId) => throw new CodegenException( "Unresolveable type id `%s` in field %d: _ %s".format(typeId, underlying.identifier, underlying.name) ) case EnhancedTypeFailure => throw new CodegenException( "Failure with enhanced types in field %d: _ %s".format(underlying.identifier, underlying.name) ) }, foundTypeReference => foundTypeReference ) // TODO: implement. must be in terms of custom TypeReference val enhancedTypeAnnotations = extractEnhancedType(typeReference) val renderType: RenderType = RenderType(typeReference, annotations) val nullOrDefault = if (this.defaultValueIsSet || !renderType.isNullable) "OrDefault" else "OrNull" val defaultName = this.name + nullOrDefault /** * Should this field be serialized with an enhanced type? * * This is used by TBSONObjectProtocol (and possibly other custom protocols) * to know whether a given field should get special treatment when being * serialized. */ @tailrec final def extractEnhancedType(tpe: TypeReference): Option[(String, String)] = tpe match { case EnhancedTypeRef(name, _) => val parts = name.split(':') Some(parts(0), parts(1)) // We unroll typedefs to determine the underlying serialization case TypedefRef(_, ref) => extractEnhancedType(ref) case NewtypeRef(_, ref) => extractEnhancedType(ref) // NOTE: these are ugly but necessary hacks. The only hook we have // to pass on info about enhanced types is the writeFieldBegin call to // TProtocol, so if there is an enhanced type anywhere inside, we must pass // it in at the nearest field. For collections, this means now. case ListRef(ref) => extractEnhancedType(ref) case SetRef(ref) => extractEnhancedType(ref) case MapRef(_, ref) => extractEnhancedType(ref) case _ => None } }
foursquare/fsqio
src/jvm/io/fsq/spindle/codegen/runtime/ScalaField.scala
Scala
apache-2.0
3,449
/* * Copyright 2020 Precog Data * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.api.push import slamdata.Predef.{Eq => _, _} import quasar.api.{Column, ColumnType} import quasar.api.resource.ResourcePath import cats.{Apply, Eq, Eval, NonEmptyTraverse, Show} import cats.data.NonEmptyList import cats.implicits._ import monocle.{Lens, PLens, Prism} import shims.{equalToCats, functorToScalaz, showToCats} sealed trait PushConfig[O, +Q] extends Product with Serializable { def path: ResourcePath def query: Q def columns: PushConfig.Columns } object PushConfig { type OutputColumn = Column[(ColumnType.Scalar, SelectedType)] type Columns = NonEmptyList[OutputColumn] final case class Full[O, Q]( path: ResourcePath, query: Q, columns: Columns) extends PushConfig[O, Q] final case class Incremental[O, Q]( path: ResourcePath, query: Q, outputColumns: List[OutputColumn], resumeConfig: ResumeConfig[O], initialOffset: Option[OffsetKey.Actual[O]]) extends PushConfig[O, Q] { def columns: Columns = NonEmptyList( resumeConfig.resultIdColumn.map(_.leftMap(IdType.scalarP(_))), outputColumns) } def full[O, Q]: Prism[PushConfig[O, Q], (ResourcePath, Q, Columns)] = Prism.partial[PushConfig[O, Q], (ResourcePath, Q, Columns)] { case Full(p, q, c) => (p, q, c) } ((Full[O, Q] _).tupled) def incremental[O, Q]: Prism[PushConfig[O, Q], (ResourcePath, Q, List[OutputColumn], ResumeConfig[O], Option[OffsetKey.Actual[O]])] = Prism.partial[PushConfig[O, Q], (ResourcePath, Q, List[OutputColumn], ResumeConfig[O], Option[OffsetKey.Actual[O]])] { case Incremental(p, q, c, r, o) => (p, q, c, r, o) } ((Incremental[O, Q] _).tupled) def query[O, Q1, Q2]: PLens[PushConfig[O, Q1], PushConfig[O, Q2], Q1, Q2] = PLens[PushConfig[O, Q1], PushConfig[O, Q2], Q1, Q2](_.query)(q2 => { case f @ Full(_, _, _) => f.copy(query = q2) case i @ Incremental(_, _, _, _, _) => i.copy(query = q2) }) def path[O, Q]: Lens[PushConfig[O, Q], ResourcePath] = Lens[PushConfig[O, Q], ResourcePath](_.path)(p2 => { case f @ Full(_, _, _) => f.copy(path = p2) case i @ Incremental(_, _, _, _, _) => i.copy(path = p2) }) implicit def pushConfigEq[O, Q: Eq]: Eq[PushConfig[O, Q]] = Eq.by(p => (full[O, Q].getOption(p), incremental[O, Q].getOption(p))) implicit def pushConfigShow[O, Q: Show]: Show[PushConfig[O, Q]] = Show show { case Full(p, q, c) => s"Full(${p.show}, ${q.show}, ${c.map(_.name).show})" case i @ Incremental(p, q, c, r, o) => s"Incremental(${p.show}, ${q.show}, ${c.map(_.name).show}, ${r.show}, ${o.show})" } implicit def pushConfigNonEmptyTraverse[O]: NonEmptyTraverse[PushConfig[O, ?]] = new NonEmptyTraverse[PushConfig[O, ?]] { def nonEmptyTraverse[F[_]: Apply, A, B](fa: PushConfig[O, A])(f: A => F[B]) = query[O, A, B].modifyF(f)(fa) def foldLeft[A, B](fa: PushConfig[O, A], b: B)(f: (B, A) => B): B = f(b, query[O, A, A].get(fa)) def foldRight[A, B](fa: PushConfig[O, A], lb: Eval[B])(f: (A, Eval[B]) => Eval[B]): Eval[B] = f(query[O, A, A].get(fa), lb) def reduceLeftTo[A, B](fa: PushConfig[O, A])(f: A => B)(g: (B, A) => B): B = f(query[O, A, A].get(fa)) def reduceRightTo[A, B](fa: PushConfig[O, A])(f: A => B)(g: (A, Eval[B]) => Eval[B]): Eval[B] = Eval.now(f(query[O, A, A].get(fa))) } }
djspiewak/quasar
api/src/main/scala/quasar/api/push/PushConfig.scala
Scala
apache-2.0
4,028
/* * SPDX-License-Identifier: Apache-2.0 * * Copyright 2015-2021 Andre White. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.truthencode.ddo.model.feats.classes import io.truthencode.ddo.model.classes.HeroicCharacterClass import io.truthencode.ddo.model.feats.{ClassFeatDisplayHelper, Feat} import org.concordion.api.FullOGNL import org.concordion.integration.junit4.ConcordionRunner import org.junit.runner.RunWith @FullOGNL @RunWith(classOf[ConcordionRunner]) class DruidClassFeatSpec extends ClassFeatDisplayHelper { override val cClass: HeroicCharacterClass = HeroicCharacterClass.Druid override val displayEnum: E = Feat }
adarro/ddo-calc
subprojects/common/ddo-core/src/specs/scala/io/truthencode/ddo/model/feats/classes/DruidClassFeatSpec.scala
Scala
apache-2.0
1,164
/* * Copyright 2016-2017 gutefrage.net GmbH * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.gutefrage.scalacheck.money import javax.money.{CurrencyUnit, Monetary, MonetaryAmount} import org.scalacheck.{Arbitrary, Gen} /** * [[org.scalacheck.Arbitrary$ Arbitrary]] instances for * [[https://jcp.org/en/jsr/detail?id=354 JSR 354]] monetary types from * `javax.money`. * * Import as wildcard to bring all instances in scope: * * {{{ * import net.gutefrage.money.arbitrary._ * }}} * * To use arbitrary monetary amounts you also need an instance for a currency * in scope: * * {{{ * import javax.money.MonetaryAmount * import net.gutefrage.scalacheck.money.arbitrary._ * * property("amounts in any currency") { * import currency.any * forAll { amount: MonetaryAmount => * … * } * } * * property("amounts in €") { * import currency.byCode.EUR * forAll { amount: MonetaryAmount => * … * } * } * * }}} */ object arbitrary { /** * [[org.scalacheck.Arbitrary$ Arbitrary]] instance for * `javax.money.MonetaryAmount`. * * This instance requires an [[org.scalacheck.Arbitrary$ Arbitrary]] instance * for [[javax.money.CurrencyUnit]] in implicit scope. The package * [[net.gutefrage.scalacheck.money.arbitrary.currency]] provides a couple of * these instances. * * @param currency An [[org.scalacheck.Arbitrary$ Arbitrary]] instance for * the currency to use */ implicit def arbitraryMonetaryAmount( implicit currency: Arbitrary[CurrencyUnit] ): Arbitrary[MonetaryAmount] = Arbitrary(MonetaryGen.monetaryAmount(currency.arbitrary)) /** * Provides different [[org.scalacheck.Arbitrary$ Arbitrary]] instances for * [[javax.money.CurrencyUnit]]. * * It's recommended to import the required instances in local scope! */ object currency { /** * [[org.scalacheck.Arbitrary$ Arbitrary]] instance that generates all * currencies. * * @see [[net.gutefrage.scalacheck.money.MonetaryGen.currency]] */ implicit val any: Arbitrary[CurrencyUnit] = Arbitrary(MonetaryGen.currency) /** * [[org.scalacheck.Arbitrary$ Arbitrary]] instances that only generate * currencies with a particular currency code. */ object byCode { private def arbitraryForCode(code: String): Arbitrary[CurrencyUnit] = Arbitrary(Gen.const(Monetary.getCurrency(code))) // Only specific currencies /** * Chinese yuan (CNY). */ implicit def CNY: Arbitrary[CurrencyUnit] = arbitraryForCode("CNY") /** * Euro (EUR) */ implicit def EUR: Arbitrary[CurrencyUnit] = arbitraryForCode("EUR") /** * British pound (GBP) */ implicit def GBP: Arbitrary[CurrencyUnit] = arbitraryForCode("GBP") /** * Japanese yen (JPY) */ implicit def JPY: Arbitrary[CurrencyUnit] = arbitraryForCode("JPY") /** * US dollar (USD) */ implicit def USD: Arbitrary[CurrencyUnit] = arbitraryForCode("USD") } } }
gutefrage/scalacheck-money
src/main/scala/net/gutefrage/scalacheck/money/arbitrary.scala
Scala
apache-2.0
3,720
package com.tackmobile.scala.slider import android.graphics.Bitmap import android.util.Log import java.util.Random import scala.collection.mutable.Set import scala.collection.mutable.ArrayBuffer class TileServer( original:Bitmap, rows:Int, columns:Int, tileSize:Int ) { val unservedSlices:ArrayBuffer[Bitmap] = new ArrayBuffer[Bitmap] val slices:Set[Bitmap] = Set() val random:Random = new Random sliceOriginal() def reset() = slices.foreach( slice => unservedSlices += slice ) private def sliceOriginal() { val fullWidth = tileSize * rows val fullHeight = tileSize * columns val scaledImage = Bitmap.createScaledBitmap(original, fullWidth, fullHeight, true) var bitmap:Bitmap = null for (rowI <- 0 to 3; colI <- 0 to 3) { slices += Bitmap.createBitmap(scaledImage, rowI * tileSize, colI * tileSize, tileSize, tileSize) } reset() } def serveRandomSlice():Bitmap = { if (unservedSlices.size > 0) { val randomIndex = random.nextInt(unservedSlices.size) val drawable = unservedSlices(randomIndex) unservedSlices -= drawable return drawable } null } }
thillerson/scala-android-slider-puzzle
slider-puzzle-in-scala/src/main/scala/TileServer.scala
Scala
mit
1,125
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.torch import com.intel.analytics.bigdl.nn.{GradientChecker, Linear, MSECriterion} import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.utils.RandomGenerator._ import scala.util.Random import com.intel.analytics.bigdl._ @com.intel.analytics.bigdl.tags.Serial class LinearSpec extends TorchSpec { "Linear module" should "converge to correct weight and bias" in { torchCheck() val inputN = 5 val outputN = 2 val linear = new Linear[Double](inputN, outputN) val mse = new MSECriterion[Double] val input = Tensor[Double](inputN) val res = Tensor[Double](outputN) val grad = Tensor[Double](outputN).rand() val seed = 100 input.rand() val code = "torch.manualSeed(" + seed + ")\\n" + "linear:reset()\\n" + "weight = linear.weight\\n" + "bias = linear.bias\\n" + "output1 = linear:forward(input)\\n" + "output2 = linear:backward(input, grad)" val (luaTime, torchResult) = TH.run(code, Map("linear" -> linear, "input" -> input, "grad" -> grad), Array("weight", "bias", "output1", "output2")) val luaOutput1 = torchResult("output1").asInstanceOf[Tensor[Double]] val luaOutput2 = torchResult("output2").asInstanceOf[Tensor[Double]] val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]] val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]] val start = System.nanoTime() RNG.setSeed(seed) linear.reset() val weight = linear.weight val bias = linear.bias val output1 = linear.forward(input) val output2 = linear.backward(input, grad) val end = System.nanoTime() val scalaTime = end - start luaOutput1 should be(output1) luaOutput2 should be(output2) luaWeight should be(weight) luaBias should be(bias) println("Test case : Linear, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "Linear module without bias" should "converate to correct weight and bias" in { torchCheck() val inputN = 5 val outputN = 2 val linear = new Linear[Double](inputN, outputN, withBias = false) val mse = new MSECriterion[Double] val input = Tensor[Double](inputN) val res = Tensor[Double](outputN) val grad = Tensor[Double](outputN).rand() val seed = 100 input.rand() val code = "torch.manualSeed(" + seed + ")\\n" + "linear:reset()\\n" + "weight = linear.weight\\n" + "bias = linear.bias\\n" + "output1 = linear:forward(input)\\n" + "output2 = linear:backward(input, grad)" val (luaTime, torchResult) = TH.run(code, Map("linear" -> linear, "input" -> input, "grad" -> grad), Array("weight", "bias", "output1", "output2")) val luaOutput1 = torchResult("output1").asInstanceOf[Tensor[Double]] val luaOutput2 = torchResult("output2").asInstanceOf[Tensor[Double]] val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]] val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]] val start = System.nanoTime() RNG.setSeed(seed) linear.reset() val weight = linear.weight val bias = linear.bias val output1 = linear.forward(input) val output2 = linear.backward(input, grad) val end = System.nanoTime() val scalaTime = end - start luaOutput1 should be(output1) luaOutput2 should be(output2) luaWeight should be(weight) luaBias should be(bias) println("Test case : Linear, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "Linear (1024, 1000)" should "converate to correct weight and bias" in { torchCheck() val inputN = 1024 val outputN = 1000 val linear = new Linear[Double](inputN, outputN) val mse = new MSECriterion[Double] val input = Tensor[Double](inputN).rand() val grad = Tensor[Double](outputN).rand() val seed = 100 val code = "torch.manualSeed(" + seed + ")\\n" + "linear:reset()\\n" + "weight = linear.weight\\n" + "bias = linear.bias\\n" + "output1 = linear:forward(input)\\n" + "output2 = linear:backward(input, grad)" val (luaTime, torchResult) = TH.run(code, Map("linear" -> linear, "input" -> input, "grad" -> grad), Array("weight", "bias", "output1", "output2")) val luaOutput1 = torchResult("output1").asInstanceOf[Tensor[Double]] val luaOutput2 = torchResult("output2").asInstanceOf[Tensor[Double]] val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]] val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]] val start = System.nanoTime() RNG.setSeed(seed) linear.reset() val weight = linear.weight val bias = linear.bias val output1 = linear.forward(input) val output2 = linear.backward(input, grad) val end = System.nanoTime() val scalaTime = end - start luaOutput1 should be(output1) luaOutput2 should be(output2) luaWeight should be(weight) luaBias should be(bias) println("Test case : Linear, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "Linear (27, 64)" should "converge to correct weight and bias" in { torchCheck() val inputN = 27 val outputN = 64 val linear = new Linear[Double](inputN, outputN) val input = Tensor[Double](1156, inputN).rand() val grad = Tensor[Double](1156, outputN).rand() val seed = 100 val code = "torch.manualSeed(" + seed + ")\\n" + "linear:reset()\\n" + "weight = linear.weight\\n" + "bias = linear.bias\\n" + "output1 = linear:forward(input)\\n" + "output2 = linear:backward(input, grad)" val (luaTime, torchResult) = TH.run(code, Map("linear" -> linear, "input" -> input, "grad" -> grad), Array("weight", "bias", "output1", "output2")) val luaOutput1 = torchResult("output1").asInstanceOf[Tensor[Double]] val luaOutput2 = torchResult("output2").asInstanceOf[Tensor[Double]] val luaWeight = torchResult("weight").asInstanceOf[Tensor[Double]] val luaBias = torchResult("bias").asInstanceOf[Tensor[Double]] val start = System.nanoTime() RNG.setSeed(seed) linear.reset() val weight = linear.weight val bias = linear.bias val output1 = linear.forward(input) val output2 = linear.backward(input, grad) val end = System.nanoTime() val scalaTime = end - start luaOutput1 should be(output1) luaOutput2 should be(output2) luaWeight should be(weight) luaBias should be(bias) println("Test case : Linear, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "Linear module" should "be good in gradient check for input" in { torchCheck() val seed = 100 RNG.setSeed(seed) val linear = new Linear[Double](5, 2) val input = Tensor[Double](3, 5).apply1(e => Random.nextDouble()) val checker = new GradientChecker(1e-4) checker.checkLayer[Double](linear, input, 1e-3) should be(true) } "Linear module" should "be good in gradient check for weight" in { torchCheck() val seed = 100 RNG.setSeed(seed) val linear = new Linear[Double](5, 2) val input = Tensor[Double](3, 5).apply1(e => Random.nextDouble()) val checker = new GradientChecker(1e-4) checker.checkWeight[Double](linear, input, 1e-3) should be(true) } "Linear (27, 64) without bias" should "converate to correct weight and bias" in { torchCheck() val inputN = 27 val outputN = 64 val linear = new Linear[Double](inputN, outputN, withBias = false) val input = Tensor[Double](1156, inputN).rand() val grad = Tensor[Double](1156, outputN).rand() val seed = 100 val code = "torch.manualSeed(" + seed + ")\\n" + "linear:reset()\\n" + "output = linear:forward(input)\\n" + "gradInput = linear:backward(input, grad)" val (luaTime, torchResult) = TH.run(code, Map("linear" -> linear, "input" -> input, "grad" -> grad), Array("linear", "output", "gradInput")) val torchLinear = torchResult("linear").asInstanceOf[Linear[Double]] val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]] val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]] val luaWeight = torchLinear.weight val luaBias = torchLinear.bias val start = System.nanoTime() RNG.setSeed(seed) linear.reset() val weight = linear.weight val bias = linear.bias val output1 = linear.forward(input) val output2 = linear.backward(input, grad) val end = System.nanoTime() val scalaTime = end - start luaOutput1 should be(output1) luaOutput2 should be(output2) luaWeight should be(weight) luaBias should be(bias) println("Test case : Linear, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } }
jenniew/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/LinearSpec.scala
Scala
apache-2.0
9,401
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** @author John Miller * @version 1.3 * @date Sun Sep 16 22:35:14 EDT 2012 * @see LICENSE (MIT style license file). * @see http://en.wikipedia.org/wiki/Gillespie_algorithm */ // U N D E R D E V E L O P M E N T package scalation.dynamics import scalation.linalgebra.{MatrixD, MatrixI, VectorD} import scalation.util.Error //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SSA` class implements the Gillespie Stochastic Simulation Algorithm 'SSA'. * @param c the matrix giving sub-volume connectivity * @param r the matrix indicating which the reactions that are active in each sub-volume * @param z the matrix giving stoichiometry for all reactions * @param x the matrix giving species population per volume * @param t0 the start time for the simulation */ class SSA (c: MatrixI, r: MatrixI, z: MatrixI, x: MatrixD, t0: Double = 0.0) extends Error { val L = c.dim1 // the number of sub-volumes val R = r.dim2 // the number of possible reactions val S = z.dim2 // the number of species (e.g., types of molecules) if (c.dim2 != L) flaw ("constructor", "wrong dimensions for c matrix") if (r.dim1 != L) flaw ("constructor", "wrong dimensions for x matrix") if (z.dim1 != R) flaw ("constructor", "wrong dimensions for x matrix") if (x.dim1 != L || x.dim2 != S) flaw ("constructor", "wrong dimensions for x matrix") val cut = (.003, 3.0, 100.0) // cut-off values val e = for (l <- 0 until L) yield r(l).sum + c(l).sum // reaction + diffusion events var t = t0 // the simulation clock (current time) println ("e = " + e) val a = Array.ofDim [VectorD] (L) for (l <- 0 until L) { val a_l = new VectorD (e(l)) for (j <- 0 until e(l)) a_l(j) = .1 * x(l, j) // formula is application dependent a(l) = a_l } // for //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** */ def simulate (tf: Double) { } // simulate //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** */ override def toString = "a = " + a.deep } // SSA class //::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `SSATest` object tests the `SSA` class. */ object SSATest extends App { // Connectivity of (3) sub-volumes (L by L) val c = new MatrixI ((3, 3), 0, 1, 0, // connectivity: 0 <-> 1 <-> 2 1, 0, 1, 0, 1, 0) // Reactions that can occur in each sub-volume out of a total of 4 possible (L by R) val r = new MatrixI ((3, 4), 1, 1, 0, 0, // sub-vol 0: reactions 0, 1 0, 1, 1, 0, // sub-vol 1: reactions 1, 2 0, 0, 1, 1) // sub-vol 2: reactions 2, 3 // Stoichiometry for each of 4 possible reactions (R by S) val z = new MatrixI ((4, 4), -1, -1, 1, 0, // reaction 0: S0 + S1 -> S2 -1, 0, -1, 1, // reaction 1: S0 + S2 -> S3 0, -1, -1, 1, // reaction 2: S1 + S2 -> S3 0, 1, -1, -1) // reaction 4: S2 + S3 -> S1 // initial population for each species (L by S) val x = new MatrixD ((3, 4), 50.0, 50.0, 0.0, 0.0, // initial pop. in sub-vol. 0 0.0, 0.0, 0.0, 0.0, // initial pop. in sub-vol. 1 0.0, 0.0, 0.0, 0.0) // initial pop. in sub-vol. 1 val pathway = new SSA (c, r, z, x) println ("pathway = " + pathway) } // SSATest object
NBKlepp/fda
scalation_1.3/scalation_modeling/src/main/scala/scalation/dynamics/SSA.scala
Scala
mit
3,935
package org.scalacoin.util import java.io.{IOException, ByteArrayOutputStream} import java.util import org.bitcoinj.core.{ECKey, Sha256Hash} import org.bitcoinj.params.TestNet3Params import org.scalacoin.config.TestNet3 import org.scalacoin.crypto.ECPublicKey import org.scalacoin.protocol.script.{UpdateScriptPubKeyAsm, ScriptPubKey} import org.scalacoin.protocol.transaction.{TransactionOutput, Transaction} import org.scalacoin.script.ScriptOperationFactory import org.scalacoin.script.constant.{ScriptConstantImpl, ScriptToken} import org.slf4j.LoggerFactory import scala.collection.JavaConversions._ /** * Created by chris on 2/23/16. */ trait BitcoinjConversions { private def params = TestNet3Params.get private def logger = LoggerFactory.getLogger(this.getClass().toString) /** * Converts a bitcoinj script to a bitcoin-s ScriptPubKey * @param bitcoinjScript * @return */ def toScriptPubKey(bitcoinjScript : org.bitcoinj.script.Script) : ScriptPubKey = { val scriptPubKey = ScriptPubKey(bitcoinjScript.getProgram) require(BitcoinSUtil.encodeHex(bitcoinjScript.getProgram) == scriptPubKey.hex, "ScriptPubKey must be the same as the given bitcoinj script\n" + BitcoinSUtil.encodeHex(bitcoinjScript.getProgram) + "\n" + scriptPubKey.hex) scriptPubKey } /** * Performs the signature serialization that is implemented inside of bitcoinj * @param tx * @param inputIndex * @param connectedScript * @return */ def signatureSerialization(tx : org.bitcoinj.core.Transaction, inputIndex : Int, connectedScript : Seq[Byte], sigHashType : Byte) : String = { val params = TestNet3Params.get try { import org.bitcoinj.core._ import org.bitcoinj.script._ for { i <- 0 until tx.getInputs.size} { //empty script tx.getInput(i).setScriptSig(new ScriptBuilder().build()) } // This step has no purpose beyond being synchronized with Bitcoin Core's bugs. OP_CODESEPARATOR // is a legacy holdover from a previous, broken design of executing scripts that shipped in Bitcoin 0.1. // It was seriously flawed and would have let anyone take anyone elses money. Later versions switched to // the design we use today where scripts are executed independently but share a stack. This left the // OP_CODESEPARATOR instruction having no purpose as it was only meant to be used internally, not actually // ever put into scripts. Deleting OP_CODESEPARATOR is a step that should never be required but if we don't // do it, we could split off the main chain. val connectedScript1 : Script = new Script(org.bitcoinj.script.Script.removeAllInstancesOfOp( connectedScript.toArray, ScriptOpCodes.OP_CODESEPARATOR)); // Set the input to the script of its output. Bitcoin Core does this but the step has no obvious purpose as // the signature covers the hash of the prevout transaction which obviously includes the output script // already. Perhaps it felt safer to him in some way, or is another leftover from how the code was written. val input = tx.getInputs.get(inputIndex); input.setScriptSig(connectedScript1); if ((sigHashType & 0x1f) == (org.bitcoinj.core.Transaction.SigHash.NONE.ordinal() + 1)) { // SIGHASH_NONE means no outputs are signed at all - the signature is effectively for a "blank cheque". //tx.outputs = new util.ArrayList[TransactionOutput](0); tx.clearOutputs() // The signature isn't broken by new versions of the transaction issued by other parties. for { i <- 0 until tx.getInputs.size } { if (i != inputIndex) tx.getInputs.get(i).setSequenceNumber(0); } } else if ((sigHashType & 0x1f) == (org.bitcoinj.core.Transaction.SigHash.SINGLE.ordinal() + 1)) { logger.info("Sighash type was SIGHASH_SINGLE") // SIGHASH_SINGLE means only sign the output at the same index as the input (ie, my output). if (inputIndex >= tx.getOutputs.size()) { logger.info("Input index was >= output size") // The input index is beyond the number of outputs, it's a buggy signature made by a broken // Bitcoin implementation. Bitcoin Core also contains a bug in handling this case: // any transaction output that is signed in this case will result in both the signed output // and any future outputs to this public key being steal-able by anyone who has // the resulting signature and the public key (both of which are part of the signed tx input). // Bitcoin Core's bug is that SignatureHash was supposed to return a hash and on this codepath it // actually returns the constant "1" to indicate an error, which is never checked for. Oops. return BitcoinSUtil.encodeHex(Sha256Hash.wrap("0100000000000000000000000000000000000000000000000000000000000000").getBytes) } // In SIGHASH_SINGLE the outputs after the matching input index are deleted, and the outputs before // that position are "nulled out". Unintuitively, the value in a "null" transaction is set to -1. //tx.outputs = new util.ArrayList[TransactionOutput](tx.getOutputs.subList(0, inputIndex + 1)) tx.clearOutputs() for { i <- 0 until inputIndex } { tx.addOutput(new org.bitcoinj.core.TransactionOutput(params, tx, Coin.NEGATIVE_SATOSHI, List[Byte]().toArray)) } // The signature isn't broken by new versions of the transaction issued by other parties. for {i <- 0 until tx.getInputs.size } { if (i != inputIndex) tx.getInputs.get(i).setSequenceNumber(0); } logger.info("Tx inputs: " + tx.getInputs) logger.info("Tx outputs: " + tx.getOutputs) } /* if ((sigHashType & SIGHASH_ANYONECANPAY_VALUE) == SIGHASH_ANYONECANPAY_VALUE) { // SIGHASH_ANYONECANPAY means the signature in the input is not broken by changes/additions/removals // of other inputs. For example, this is useful for building assurance contracts. tx.clearInputs() tx.addInput(input); } */ val bos : ByteArrayOutputStream = new UnsafeByteArrayOutputStream(256); tx.bitcoinSerialize(bos); // We also have to write a hash type (sigHashType is actually an unsigned char) Utils.uint32ToByteStreamLE(0x000000ff & sigHashType, bos); // Note that this is NOT reversed to ensure it will be signed correctly. If it were to be printed out // however then we would expect that it is IS reversed. val hash : Sha256Hash = Sha256Hash.twiceOf(bos.toByteArray()) val txBytes = bos.toByteArray bos.close(); return BitcoinSUtil.encodeHex(txBytes) } catch { case e : IOException => throw new RuntimeException(e); // Cannot happen. } } /** * Helper function to create bitcoinj ECKey * @param bytes * @return */ def publicKey(bytes : Seq[Byte]) : ECKey = ECKey.fromPublicOnly(bytes.toArray) /** * Helper function to create bitcoinj ECKey * @param key * @return */ def publicKey(key : ECPublicKey) : ECKey = publicKey(key.bytes) /** * Builds a bitcoinj transaction out of a bitcoin-s transaction * @param tx * @return */ def transaction(tx : org.scalacoin.protocol.transaction.Transaction) : org.bitcoinj.core.Transaction = { new org.bitcoinj.core.Transaction(params,BitcoinSUtil.decodeHex(tx.hex).toArray) } } object BitcoinjConversions extends BitcoinjConversions
TomMcCabe/scalacoin
src/test/scala/org/scalacoin/util/BitcoinJConversions.scala
Scala
mit
7,589
package me.heaton.cases object Calc { type Environment = String => Int def eval(t: Tree, env: Environment): Int = t match { case Sum(l, r) => eval(l, env) + eval(r, env) case Var(n) => env(n) case Const(v) => v } def derive(t: Tree, v: String): Tree = t match { case Sum(l, r) => Sum(derive(l, v), derive(r, v)) case Var(n) if(v == n) => Const(1) case _ => Const(0) } def main(args: Array[String]) { val exp: Tree = Sum(Sum(Var("x"), Var("x")), Sum(Const(7), Var("y"))) val env: Environment = { case "x" => 5 case "y" => 7 } println("Expression: " + exp) println("Evaluation with x=5, y=7: " + eval(exp, env)) println("Derivative relative to x:\\n " + derive(exp, "x")) println("Derivative relative to y:\\n " + derive(exp, "y")) } }
heaton/hello-scala
src/main/scala/me/heaton/cases/Calc.scala
Scala
mit
799
package spire.diff /** * Chunk represents a window onto an underlying array. * * It basically exists to do coordinate translations (and truncation). * This allows us to cheaply do a diff on a substring of an array * without either copying, or carrying around a ton of indices * manually. */ case class Chunk[A](data: Array[A], start: Int, limit: Int) { def length: Int = limit - start def apply(i: Int): A = data(i + start) } object Chunk { def apply[A](data: Array[A]): Chunk[A] = Chunk(data, 0, data.length) }
non/spire-diff
src/main/scala/spire/diff/Chunk.scala
Scala
mit
528
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.apache.commons.lang3.StringUtils import org.apache.carbondata.streaming.CarbonStreamException import org.apache.carbondata.streaming.CarbonStreamSparkStreaming import org.apache.carbondata.streaming.CarbonStreamSparkStreamingWriter /** * Create [[CarbonStreamSparkStreamingWriter]] for stream table * when integrate with Spark Streaming. * * NOTE: Current integration with Spark Streaming is an alpha feature. */ object CarbonSparkStreamingFactory { def getStreamSparkStreamingWriter(spark: SparkSession, dbNameStr: String, tableName: String): CarbonStreamSparkStreamingWriter = synchronized { val dbName = if (StringUtils.isEmpty(dbNameStr)) "default" else dbNameStr val key = dbName + "." + tableName if (CarbonStreamSparkStreaming.getTableMap.containsKey(key)) { CarbonStreamSparkStreaming.getTableMap.get(key) } else { if (StringUtils.isEmpty(tableName) || tableName.contains(" ")) { throw new CarbonStreamException("Table creation failed. " + "Table name must not be blank or " + "cannot contain blank space") } val carbonTable = CarbonEnv.getCarbonTable(Some(dbName), tableName)(spark) if (!carbonTable.isStreamingSink) { throw new CarbonStreamException(s"Table ${carbonTable.getDatabaseName}." + s"${carbonTable.getTableName} is not a streaming table") } val streamWriter = new CarbonStreamSparkStreamingWriter(spark, carbonTable, spark.sessionState.newHadoopConf()) CarbonStreamSparkStreaming.getTableMap.put(key, streamWriter) streamWriter } } }
sgururajshetty/carbondata
integration/spark2/src/main/scala/org/apache/spark/sql/CarbonSparkStreamingFactory.scala
Scala
apache-2.0
2,548
/* * Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. * */ package com.hypertino.facade.filters.annotated import com.hypertino.binders.annotations.fieldName import com.hypertino.binders.value.Value import com.hypertino.facade.filter.chain.SimpleFilterChain import com.hypertino.facade.filter.model._ import com.hypertino.facade.filter.parser.{ExpressionEvaluator, PreparedExpression} import com.hypertino.facade.raml._ import com.hypertino.hyperbus.model.HRL import com.typesafe.config.Config import scaldi.Injectable case class ForwardAnnotation( @fieldName("if") predicate: Option[PreparedExpression], location: PreparedExpression, query: Map[String, PreparedExpression], method: Option[PreparedExpression] ) extends RamlAnnotation { def name: String = "forward" } class ForwardFilterFactory(config: Config, protected val predicateEvaluator: ExpressionEvaluator) extends RamlFilterFactory with Injectable { override def createFilters(target: RamlFilterTarget): SimpleFilterChain = { val (sourceLocation, ramlMethod, destLocation, destQuery, destMethod) = target match { case ResourceTarget(uri, ForwardAnnotation(_, l, q, m)) ⇒ (uri, None, l, q, m) case MethodTarget(uri, method, ForwardAnnotation(_, l, q, m)) ⇒ (uri, Some(Method(method)), l, q, m) case otherTarget ⇒ throw RamlConfigException(s"Annotation 'forward' cannot be assigned to $otherTarget") } val sourceHRL = HRL(sourceLocation) SimpleFilterChain( requestFilters = Seq(new ForwardRequestFilter(sourceHRL, destLocation, destQuery, destMethod, predicateEvaluator)), responseFilters = Seq.empty, eventFilters = Seq.empty ) } override def createRamlAnnotation(name: String, value: Value): RamlAnnotation = { value.to[ForwardAnnotation] } }
hypertino/hyperfacade
src/main/scala/com/hypertino/facade/filters/annotated/ForwardFilterFactory.scala
Scala
mpl-2.0
2,139
package com.sksamuel.elastic4s.http.task import com.sksamuel.elastic4s.JsonFormat import com.sksamuel.elastic4s.http.HttpExecutable import com.sksamuel.elastic4s.task.{CancelTasksDefinition, ListTasksDefinition} import org.elasticsearch.client.{ResponseListener, RestClient} import scala.collection.JavaConverters._ import scala.concurrent.Future import scala.concurrent.duration._ case class ListTaskResponse(nodes: Map[String, Node]) case class Node(name: String, private val transport_address: String, host: String, ip: String, roles: Seq[String], tasks: Map[String, Task]) { def transportAddress: String = transport_address } case class Task(node: String, id: String, `type`: String, action: String, private val start_time_in_millis: Long, private val running_time_in_nanos: Long, cancellable: Boolean) { def startTime: FiniteDuration = start_time_in_millis.millis def runningTime: FiniteDuration = running_time_in_nanos.nanos } trait TaskImplicits { implicit object ListTaskHttpExecutable extends HttpExecutable[ListTasksDefinition, ListTaskResponse] { val method = "GET" val endpoint = s"/_tasks" override def execute(client: RestClient, request: ListTasksDefinition, format: JsonFormat[ListTaskResponse]): Future[ListTaskResponse] = { val params = scala.collection.mutable.Map.empty[String, String] if (request.nodeIds.nonEmpty) params.put("nodes", request.nodeIds.mkString(",")) if (request.actions.nonEmpty) params.put("actions", request.actions.mkString(",")) if (request.detailed.contains(true)) params.put("detailed", "true") if (request.waitForCompletion.contains(true)) params.put("wait_for_completion", "true") request.groupBy.foreach(params.put("group_by", _)) val fn = client.performRequestAsync(method, endpoint, params.asJava, _: ResponseListener) executeAsyncAndMapResponse(fn, format) } } implicit object CancelTaskHttpExecutable extends HttpExecutable[CancelTasksDefinition, Boolean] { val method = "POST" override def execute(client: RestClient, request: CancelTasksDefinition, format: JsonFormat[Boolean]): Future[Boolean] = { val endpoint = if (request.nodeIds.isEmpty) s"/_tasks/cancel" else s"/_tasks/task_id:${request.nodeIds.mkString(",")}/_cancel" val params = scala.collection.mutable.Map.empty[String, String] if (request.nodeIds.nonEmpty) params.put("nodes", request.nodeIds.mkString(",")) if (request.actions.nonEmpty) params.put("actions", request.actions.mkString(",")) val fn = client.performRequestAsync(method, endpoint, params.asJava, _: ResponseListener) Future.successful { val code = client.performRequest(method, endpoint, params.asJava).getStatusLine.getStatusCode code >= 200 && code < 300 } } } }
FabienPennequin/elastic4s
elastic4s-http/src/main/scala/com/sksamuel/elastic4s/http/task/TaskImplicits.scala
Scala
apache-2.0
3,139
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.r import scala.collection.mutable import org.apache.hadoop.fs.Path import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.SparkException import org.apache.spark.ml.{Pipeline, PipelineModel, PipelineStage} import org.apache.spark.ml.clustering.{LDA, LDAModel} import org.apache.spark.ml.feature.{CountVectorizer, CountVectorizerModel, RegexTokenizer, StopWordsRemover} import org.apache.spark.ml.linalg.{Vector, VectorUDT} import org.apache.spark.ml.param.ParamPair import org.apache.spark.ml.util._ import org.apache.spark.sql.{DataFrame, Dataset} import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.StringType private[r] class LDAWrapper private ( val pipeline: PipelineModel, val logLikelihood: Double, val logPerplexity: Double, val vocabulary: Array[String]) extends MLWritable { import LDAWrapper._ private val lda: LDAModel = pipeline.stages.last.asInstanceOf[LDAModel] private val preprocessor: PipelineModel = new PipelineModel(s"${Identifiable.randomUID(pipeline.uid)}", pipeline.stages.dropRight(1)) def transform(data: Dataset[_]): DataFrame = { val vec2ary = udf { vec: Vector => vec.toArray } val outputCol = lda.getTopicDistributionCol val tempCol = s"${Identifiable.randomUID(outputCol)}" val preprocessed = preprocessor.transform(data) lda.transform(preprocessed, ParamPair(lda.topicDistributionCol, tempCol)) .withColumn(outputCol, vec2ary(col(tempCol))) .drop(TOKENIZER_COL, STOPWORDS_REMOVER_COL, COUNT_VECTOR_COL, tempCol) } def computeLogPerplexity(data: Dataset[_]): Double = { lda.logPerplexity(preprocessor.transform(data)) } def topics(maxTermsPerTopic: Int): DataFrame = { val topicIndices: DataFrame = lda.describeTopics(maxTermsPerTopic) if (vocabulary.isEmpty || vocabulary.length < vocabSize) { topicIndices } else { val index2term = udf { indices: mutable.WrappedArray[Int] => indices.map(i => vocabulary(i)) } topicIndices .select(col("topic"), index2term(col("termIndices")).as("term"), col("termWeights")) } } lazy val isDistributed: Boolean = lda.isDistributed lazy val vocabSize: Int = lda.vocabSize lazy val docConcentration: Array[Double] = lda.getEffectiveDocConcentration lazy val topicConcentration: Double = lda.getEffectiveTopicConcentration override def write: MLWriter = new LDAWrapper.LDAWrapperWriter(this) } private[r] object LDAWrapper extends MLReadable[LDAWrapper] { val TOKENIZER_COL = s"${Identifiable.randomUID("rawTokens")}" val STOPWORDS_REMOVER_COL = s"${Identifiable.randomUID("tokens")}" val COUNT_VECTOR_COL = s"${Identifiable.randomUID("features")}" private def getPreStages( features: String, customizedStopWords: Array[String], maxVocabSize: Int): Array[PipelineStage] = { val tokenizer = new RegexTokenizer() .setInputCol(features) .setOutputCol(TOKENIZER_COL) val stopWordsRemover = new StopWordsRemover() .setInputCol(TOKENIZER_COL) .setOutputCol(STOPWORDS_REMOVER_COL) stopWordsRemover.setStopWords(stopWordsRemover.getStopWords ++ customizedStopWords) val countVectorizer = new CountVectorizer() .setVocabSize(maxVocabSize) .setInputCol(STOPWORDS_REMOVER_COL) .setOutputCol(COUNT_VECTOR_COL) Array(tokenizer, stopWordsRemover, countVectorizer) } def fit( data: DataFrame, features: String, k: Int, maxIter: Int, optimizer: String, subsamplingRate: Double, topicConcentration: Double, docConcentration: Array[Double], customizedStopWords: Array[String], maxVocabSize: Int): LDAWrapper = { val lda = new LDA() .setK(k) .setMaxIter(maxIter) .setSubsamplingRate(subsamplingRate) .setOptimizer(optimizer) val featureSchema = data.schema(features) val stages = featureSchema.dataType match { case d: StringType => getPreStages(features, customizedStopWords, maxVocabSize) ++ Array(lda.setFeaturesCol(COUNT_VECTOR_COL)) case d: VectorUDT => Array(lda.setFeaturesCol(features)) case _ => throw new SparkException( s"Unsupported input features type of ${featureSchema.dataType.typeName}," + s" only String type and Vector type are supported now.") } if (topicConcentration != -1) { lda.setTopicConcentration(topicConcentration) } else { // Auto-set topicConcentration } if (docConcentration.length == 1) { if (docConcentration.head != -1) { lda.setDocConcentration(docConcentration.head) } else { // Auto-set docConcentration } } else { lda.setDocConcentration(docConcentration) } val pipeline = new Pipeline().setStages(stages) val model = pipeline.fit(data) val vocabulary: Array[String] = featureSchema.dataType match { case d: StringType => val countVectorModel = model.stages(2).asInstanceOf[CountVectorizerModel] countVectorModel.vocabulary case _ => Array.empty[String] } val ldaModel: LDAModel = model.stages.last.asInstanceOf[LDAModel] val preprocessor: PipelineModel = new PipelineModel(s"${Identifiable.randomUID(pipeline.uid)}", model.stages.dropRight(1)) val preprocessedData = preprocessor.transform(data) new LDAWrapper( model, ldaModel.logLikelihood(preprocessedData), ldaModel.logPerplexity(preprocessedData), vocabulary) } override def read: MLReader[LDAWrapper] = new LDAWrapperReader override def load(path: String): LDAWrapper = super.load(path) class LDAWrapperWriter(instance: LDAWrapper) extends MLWriter { override protected def saveImpl(path: String): Unit = { val rMetadataPath = new Path(path, "rMetadata").toString val pipelinePath = new Path(path, "pipeline").toString val rMetadata = ("class" -> instance.getClass.getName) ~ ("logLikelihood" -> instance.logLikelihood) ~ ("logPerplexity" -> instance.logPerplexity) ~ ("vocabulary" -> instance.vocabulary.toList) val rMetadataJson: String = compact(render(rMetadata)) sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath) instance.pipeline.save(pipelinePath) } } class LDAWrapperReader extends MLReader[LDAWrapper] { override def load(path: String): LDAWrapper = { implicit val format = DefaultFormats val rMetadataPath = new Path(path, "rMetadata").toString val pipelinePath = new Path(path, "pipeline").toString val rMetadataStr = sc.textFile(rMetadataPath, 1).first() val rMetadata = parse(rMetadataStr) val logLikelihood = (rMetadata \\ "logLikelihood").extract[Double] val logPerplexity = (rMetadata \\ "logPerplexity").extract[Double] val vocabulary = (rMetadata \\ "vocabulary").extract[List[String]].toArray val pipeline = PipelineModel.load(pipelinePath) new LDAWrapper(pipeline, logLikelihood, logPerplexity, vocabulary) } } }
spark0001/spark2.1.1
mllib/src/main/scala/org/apache/spark/ml/r/LDAWrapper.scala
Scala
apache-2.0
7,940
package markov import Token._ import markov.Dictionary.DictionaryImpl import org.scalatest.funsuite.AnyFunSuite class TokensTest extends AnyFunSuite { val ed = DictionaryImpl(Set("hello", "world", "end", "with", "dot", "first", "second", "line")) val helloTokens = List(StartToken, WordToken("hello"), EndToken) val helloWorldTokens = List(StartToken, WordToken("hello"), WordToken("world"), SignToken("!"), EndToken) val endWithDot = List( StartToken, WordToken("end"), WordToken("with"), WordToken("dot"), SignToken("."), EndToken ) val twoLinesTokens = List( StartToken, WordToken("first"), WordToken("line"), SignToken("."), WordToken("second"), WordToken("line"), SignToken("."), EndToken ) test("tokenize") { assert(tokenize("hello", ed) === helloTokens) assert(tokenize("Hello world!", ed) === helloWorldTokens) assert(tokenize("End with dot.", ed) === endWithDot) assert(tokenize("First line.Second line.", ed) === twoLinesTokens) assert(tokenize("First line. Second line.", ed) === twoLinesTokens) assert(tokenize("First line.\\n Second line.\\n", ed) === twoLinesTokens) } test("tokensToString") { assert(tokensToString(helloTokens) === "Hello") assert(tokensToString(helloWorldTokens) === "Hello world!") assert(tokensToString(endWithDot) === "End with dot.") assert(tokensToString(twoLinesTokens) === "First line. Second line.") } test("weird spacing") { assert(tokenize("hello ", ed) === helloTokens) assert(tokenize("hello ", ed) === helloTokens) assert(tokenize(" hello", ed) === helloTokens) assert(tokenize(" hello", ed) === helloTokens) assert(tokenize(" hello ", ed) === helloTokens) assert(tokenize(" ", ed) === List(StartToken, EndToken)) assert(tokenize(" ", ed) === List(StartToken, EndToken)) assert(tokenize(" . ", ed) === List(StartToken, SignToken("."), EndToken)) assert(tokenize("first line . second line . ", ed) === twoLinesTokens) assert(tokenize("first line.second line.", ed) === twoLinesTokens) } test("words that dont occur in the library") { assert( tokenize("first unknown word", ed) === List( StartToken, WordToken("first"), InfrequentWord, InfrequentWord, EndToken ) ) } val plots = List("hello world!", "End, with:dot.", "First line.Second line.") test("build dictionary should skip words that occur only once") { val d = Dictionary.build(plots, 2) assert(d == DictionaryImpl(Set("line"))) } test("build dictionary with all words occurring at least twice") { val d = Dictionary.build(plots ++ plots, 2) assert(d == ed) } }
tammosminia/markovMovieCritic
src/test/scala/markov/TokensTest.scala
Scala
gpl-3.0
2,739
package com.karasiq.nanoboard.frontend.components.post import scala.language.postfixOps import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue import rx._ import rx.async._ import scalatags.JsDom.all._ import com.karasiq.bootstrap.Bootstrap.default._ import com.karasiq.nanoboard.frontend.NanoboardController import com.karasiq.nanoboard.frontend.api.NanoboardApi import com.karasiq.nanoboard.frontend.utils.Mouse private[components] object PostLink { def apply(hash: String)(implicit controller: NanoboardController): PostLink = { new PostLink(hash) } } private[components] final class PostLink(hash: String)(implicit controller: NanoboardController) extends BootstrapHtmlComponent { lazy val post = NanoboardApi.post(hash).toRx(None) .map(_.map(data ⇒ div(Mouse.relative(xOffset = 12), zIndex := 1, NanoboardPost(showParent = false, showAnswers = true, data)).render)) private val hover = Var(false) override def renderTag(md: Modifier*): TagT = { val updateHover: Modifier = Seq( onmouseover := { () ⇒ hover() = true }, onmouseout := { () ⇒ hover() = false } ) span( position.relative, a(updateHover, href := s"#$hash", onclick := Callback.onClick(_ ⇒ controller.showPost(hash)), md), Rx[Frag](if (hover() && post().nonEmpty) post().get else "") ) } }
Karasiq/nanoboard
frontend/src/main/scala/com/karasiq/nanoboard/frontend/components/post/PostLink.scala
Scala
apache-2.0
1,371
package sync import db.{BinariesDao, BinaryVersionsDao, SyncsDao} import io.flow.common.v0.models.UserReference import io.flow.dependency.actors.SearchActor import io.flow.dependency.api.lib.DefaultBinaryVersionProvider import io.flow.dependency.v0.models.Binary import io.flow.log.RollbarLogger import io.flow.postgresql.Pager import javax.inject.Inject class BinarySync @Inject() ( binariesDao: BinariesDao, defaultBinaryVersionProvider: DefaultBinaryVersionProvider, binaryVersionsDao: BinaryVersionsDao, syncsDao: SyncsDao, @javax.inject.Named("search-actor") searchActor: akka.actor.ActorRef, logger: RollbarLogger, ) { def sync(user: UserReference, binaryId: String): Unit = { binariesDao.findById(binaryId).foreach { binary => syncsDao.withStartedAndCompleted("binary", binary.id) { val versions = defaultBinaryVersionProvider.versions(binary.name) logger .fingerprint(s"${getClass.getName}:results") .withKeyValue("binary", binary.toString) .withKeyValue("versions", versions.toString) .info("result") versions.foreach { version => binaryVersionsDao.upsert(user, binary.id, version.value) } } } searchActor ! SearchActor.Messages.SyncBinary(binaryId) } def iterateAll()(f: Binary => Any): Unit = { Pager.create { offset => binariesDao.findAll(offset = offset, limit = 1000) }.foreach { rec => f(rec) } } }
flowcommerce/dependency
api/app/sync/BinarySync.scala
Scala
mit
1,468
/* * Copyright 2015 Webtrends (http://www.webtrends.com) * * See the LICENCE.txt file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.webtrends.harness.component.cluster.communication import com.typesafe.config.{Config, ConfigFactory} import com.webtrends.harness.utils.ConfigUtil /* * @author cuthbertm on 10/22/14 9:22 AM */ class MessagingSettings (config: Config = ConfigFactory.load) { protected val c: Config = ConfigUtil.prepareSubConfig(config, "message-processor") var ShareInterval = c getMilliseconds "share-interval" var TrashInterval = c getMilliseconds "trash-interval" require(ShareInterval > 0, "share-interval must be set") require(TrashInterval > 0, "trash-interval must be set") } object MessagingSettings { implicit def apply(config: Config = ConfigFactory.load()) = new MessagingSettings(config) }
Webtrends/wookiee-cluster
src/main/scala/com/webtrends/harness/component/cluster/communication/MessagingSettings.scala
Scala
apache-2.0
1,482
package com.sksamuel.scapegoat.inspections import com.sksamuel.scapegoat.{ Inspection, InspectionContext, Inspector } /** @author Stephen Samuel */ class NoSuperClone
pwwpche/scalac-scapegoat-plugin
src/main/scala/com/sksamuel/scapegoat/inspections/NoSuperClone.scala
Scala
apache-2.0
168
package form.kanban /** * かんばん参加者変更Form. * @param id かんばんID * @param lockVersion バージョンNo * @param joinedUserIds かんばん参加者IDSeq * @param adminUserIds かんばん管理者IDSeq */ case class JoinedUser( id: Long, lockVersion: Long, joinedUserIds: Seq[Long], adminUserIds: Seq[Long] )
nemuzuka/vss-kanban
src/main/scala/form/kanban/JoinedUser.scala
Scala
mit
346
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import scala.util.Random import org.scalatest.Matchers.the import org.apache.spark.sql.execution.WholeStageCodegenExec import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec} import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec import org.apache.spark.sql.expressions.Window import org.apache.spark.sql.functions._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.SharedSQLContext import org.apache.spark.sql.test.SQLTestData.DecimalData import org.apache.spark.sql.types.DecimalType case class Fact(date: Int, hour: Int, minute: Int, room_name: String, temp: Double) class DataFrameAggregateSuite extends QueryTest with SharedSQLContext { import testImplicits._ val absTol = 1e-8 test("groupBy") { checkAnswer( testData2.groupBy("a").agg(sum($"b")), Seq(Row(1, 3), Row(2, 3), Row(3, 3)) ) checkAnswer( testData2.groupBy("a").agg(sum($"b").as("totB")).agg(sum('totB)), Row(9) ) checkAnswer( testData2.groupBy("a").agg(count("*")), Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil ) checkAnswer( testData2.groupBy("a").agg(Map("*" -> "count")), Row(1, 2) :: Row(2, 2) :: Row(3, 2) :: Nil ) checkAnswer( testData2.groupBy("a").agg(Map("b" -> "sum")), Row(1, 3) :: Row(2, 3) :: Row(3, 3) :: Nil ) val df1 = Seq(("a", 1, 0, "b"), ("b", 2, 4, "c"), ("a", 2, 3, "d")) .toDF("key", "value1", "value2", "rest") checkAnswer( df1.groupBy("key").min(), df1.groupBy("key").min("value1", "value2").collect() ) checkAnswer( df1.groupBy("key").min("value2"), Seq(Row("a", 0), Row("b", 4)) ) checkAnswer( decimalData.groupBy("a").agg(sum("b")), Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal(3)), Row(new java.math.BigDecimal(2), new java.math.BigDecimal(3)), Row(new java.math.BigDecimal(3), new java.math.BigDecimal(3))) ) val decimalDataWithNulls = spark.sparkContext.parallelize( DecimalData(1, 1) :: DecimalData(1, null) :: DecimalData(2, 1) :: DecimalData(2, null) :: DecimalData(3, 1) :: DecimalData(3, 2) :: DecimalData(null, 2) :: Nil).toDF() checkAnswer( decimalDataWithNulls.groupBy("a").agg(sum("b")), Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal(1)), Row(new java.math.BigDecimal(2), new java.math.BigDecimal(1)), Row(new java.math.BigDecimal(3), new java.math.BigDecimal(3)), Row(null, new java.math.BigDecimal(2))) ) } test("SPARK-17124 agg should be ordering preserving") { val df = spark.range(2) val ret = df.groupBy("id").agg("id" -> "sum", "id" -> "count", "id" -> "min") assert(ret.schema.map(_.name) == Seq("id", "sum(id)", "count(id)", "min(id)")) checkAnswer( ret, Row(0, 0, 1, 0) :: Row(1, 1, 1, 1) :: Nil ) } test("SPARK-18952: regexes fail codegen when used as keys due to bad forward-slash escapes") { val df = Seq(("some[thing]", "random-string")).toDF("key", "val") checkAnswer( df.groupBy(regexp_extract('key, "([a-z]+)\\\\[", 1)).count(), Row("some", 1) :: Nil ) } test("rollup") { checkAnswer( courseSales.rollup("course", "year").sum("earnings"), Row("Java", 2012, 20000.0) :: Row("Java", 2013, 30000.0) :: Row("Java", null, 50000.0) :: Row("dotNET", 2012, 15000.0) :: Row("dotNET", 2013, 48000.0) :: Row("dotNET", null, 63000.0) :: Row(null, null, 113000.0) :: Nil ) } test("cube") { checkAnswer( courseSales.cube("course", "year").sum("earnings"), Row("Java", 2012, 20000.0) :: Row("Java", 2013, 30000.0) :: Row("Java", null, 50000.0) :: Row("dotNET", 2012, 15000.0) :: Row("dotNET", 2013, 48000.0) :: Row("dotNET", null, 63000.0) :: Row(null, 2012, 35000.0) :: Row(null, 2013, 78000.0) :: Row(null, null, 113000.0) :: Nil ) val df0 = spark.sparkContext.parallelize(Seq( Fact(20151123, 18, 35, "room1", 18.6), Fact(20151123, 18, 35, "room2", 22.4), Fact(20151123, 18, 36, "room1", 17.4), Fact(20151123, 18, 36, "room2", 25.6))).toDF() val cube0 = df0.cube("date", "hour", "minute", "room_name").agg(Map("temp" -> "avg")) assert(cube0.where("date IS NULL").count > 0) } test("grouping and grouping_id") { checkAnswer( courseSales.cube("course", "year") .agg(grouping("course"), grouping("year"), grouping_id("course", "year")), Row("Java", 2012, 0, 0, 0) :: Row("Java", 2013, 0, 0, 0) :: Row("Java", null, 0, 1, 1) :: Row("dotNET", 2012, 0, 0, 0) :: Row("dotNET", 2013, 0, 0, 0) :: Row("dotNET", null, 0, 1, 1) :: Row(null, 2012, 1, 0, 2) :: Row(null, 2013, 1, 0, 2) :: Row(null, null, 1, 1, 3) :: Nil ) intercept[AnalysisException] { courseSales.groupBy().agg(grouping("course")).explain() } intercept[AnalysisException] { courseSales.groupBy().agg(grouping_id("course")).explain() } } test("grouping/grouping_id inside window function") { val w = Window.orderBy(sum("earnings")) checkAnswer( courseSales.cube("course", "year") .agg(sum("earnings"), grouping_id("course", "year"), rank().over(Window.partitionBy(grouping_id("course", "year")).orderBy(sum("earnings")))), Row("Java", 2012, 20000.0, 0, 2) :: Row("Java", 2013, 30000.0, 0, 3) :: Row("Java", null, 50000.0, 1, 1) :: Row("dotNET", 2012, 15000.0, 0, 1) :: Row("dotNET", 2013, 48000.0, 0, 4) :: Row("dotNET", null, 63000.0, 1, 2) :: Row(null, 2012, 35000.0, 2, 1) :: Row(null, 2013, 78000.0, 2, 2) :: Row(null, null, 113000.0, 3, 1) :: Nil ) } test("SPARK-21980: References in grouping functions should be indexed with semanticEquals") { checkAnswer( courseSales.cube("course", "year") .agg(grouping("CouRse"), grouping("year")), Row("Java", 2012, 0, 0) :: Row("Java", 2013, 0, 0) :: Row("Java", null, 0, 1) :: Row("dotNET", 2012, 0, 0) :: Row("dotNET", 2013, 0, 0) :: Row("dotNET", null, 0, 1) :: Row(null, 2012, 1, 0) :: Row(null, 2013, 1, 0) :: Row(null, null, 1, 1) :: Nil ) } test("rollup overlapping columns") { checkAnswer( testData2.rollup($"a" + $"b" as "foo", $"b" as "bar").agg(sum($"a" - $"b") as "foo"), Row(2, 1, 0) :: Row(3, 2, -1) :: Row(3, 1, 1) :: Row(4, 2, 0) :: Row(4, 1, 2) :: Row(5, 2, 1) :: Row(2, null, 0) :: Row(3, null, 0) :: Row(4, null, 2) :: Row(5, null, 1) :: Row(null, null, 3) :: Nil ) checkAnswer( testData2.rollup("a", "b").agg(sum("b")), Row(1, 1, 1) :: Row(1, 2, 2) :: Row(2, 1, 1) :: Row(2, 2, 2) :: Row(3, 1, 1) :: Row(3, 2, 2) :: Row(1, null, 3) :: Row(2, null, 3) :: Row(3, null, 3) :: Row(null, null, 9) :: Nil ) } test("cube overlapping columns") { checkAnswer( testData2.cube($"a" + $"b", $"b").agg(sum($"a" - $"b")), Row(2, 1, 0) :: Row(3, 2, -1) :: Row(3, 1, 1) :: Row(4, 2, 0) :: Row(4, 1, 2) :: Row(5, 2, 1) :: Row(2, null, 0) :: Row(3, null, 0) :: Row(4, null, 2) :: Row(5, null, 1) :: Row(null, 1, 3) :: Row(null, 2, 0) :: Row(null, null, 3) :: Nil ) checkAnswer( testData2.cube("a", "b").agg(sum("b")), Row(1, 1, 1) :: Row(1, 2, 2) :: Row(2, 1, 1) :: Row(2, 2, 2) :: Row(3, 1, 1) :: Row(3, 2, 2) :: Row(1, null, 3) :: Row(2, null, 3) :: Row(3, null, 3) :: Row(null, 1, 3) :: Row(null, 2, 6) :: Row(null, null, 9) :: Nil ) } test("spark.sql.retainGroupColumns config") { checkAnswer( testData2.groupBy("a").agg(sum($"b")), Seq(Row(1, 3), Row(2, 3), Row(3, 3)) ) spark.conf.set(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key, false) checkAnswer( testData2.groupBy("a").agg(sum($"b")), Seq(Row(3), Row(3), Row(3)) ) spark.conf.set(SQLConf.DATAFRAME_RETAIN_GROUP_COLUMNS.key, true) } test("agg without groups") { checkAnswer( testData2.agg(sum('b)), Row(9) ) } test("agg without groups and functions") { checkAnswer( testData2.agg(lit(1)), Row(1) ) } test("average") { checkAnswer( testData2.agg(avg('a), mean('a)), Row(2.0, 2.0)) checkAnswer( testData2.agg(avg('a), sumDistinct('a)), // non-partial Row(2.0, 6.0) :: Nil) checkAnswer( decimalData.agg(avg('a)), Row(new java.math.BigDecimal(2))) checkAnswer( decimalData.agg(avg('a), sumDistinct('a)), // non-partial Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil) checkAnswer( decimalData.agg(avg('a cast DecimalType(10, 2))), Row(new java.math.BigDecimal(2))) // non-partial checkAnswer( decimalData.agg(avg('a cast DecimalType(10, 2)), sumDistinct('a cast DecimalType(10, 2))), Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil) } test("null average") { checkAnswer( testData3.agg(avg('b)), Row(2.0)) checkAnswer( testData3.agg(avg('b), countDistinct('b)), Row(2.0, 1)) checkAnswer( testData3.agg(avg('b), sumDistinct('b)), // non-partial Row(2.0, 2.0)) } test("zero average") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(avg('a)), Row(null)) checkAnswer( emptyTableData.agg(avg('a), sumDistinct('b)), // non-partial Row(null, null)) } test("count") { assert(testData2.count() === testData2.rdd.map(_ => 1).count()) checkAnswer( testData2.agg(count('a), sumDistinct('a)), // non-partial Row(6, 6.0)) } test("null count") { checkAnswer( testData3.groupBy('a).agg(count('b)), Seq(Row(1, 0), Row(2, 1)) ) checkAnswer( testData3.groupBy('a).agg(count('a + 'b)), Seq(Row(1, 0), Row(2, 1)) ) checkAnswer( testData3.agg(count('a), count('b), count(lit(1)), countDistinct('a), countDistinct('b)), Row(2, 1, 2, 2, 1) ) checkAnswer( testData3.agg(count('b), countDistinct('b), sumDistinct('b)), // non-partial Row(1, 1, 2) ) } test("multiple column distinct count") { val df1 = Seq( ("a", "b", "c"), ("a", "b", "c"), ("a", "b", "d"), ("x", "y", "z"), ("x", "q", null.asInstanceOf[String])) .toDF("key1", "key2", "key3") checkAnswer( df1.agg(countDistinct('key1, 'key2)), Row(3) ) checkAnswer( df1.agg(countDistinct('key1, 'key2, 'key3)), Row(3) ) checkAnswer( df1.groupBy('key1).agg(countDistinct('key2, 'key3)), Seq(Row("a", 2), Row("x", 1)) ) } test("zero count") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(count('a), sumDistinct('a)), // non-partial Row(0, null)) } test("stddev") { val testData2ADev = math.sqrt(4.0 / 5.0) checkAnswer( testData2.agg(stddev('a), stddev_pop('a), stddev_samp('a)), Row(testData2ADev, math.sqrt(4 / 6.0), testData2ADev)) checkAnswer( testData2.agg(stddev("a"), stddev_pop("a"), stddev_samp("a")), Row(testData2ADev, math.sqrt(4 / 6.0), testData2ADev)) } test("zero stddev") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(stddev('a), stddev_pop('a), stddev_samp('a)), Row(null, null, null)) } test("zero sum") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(sum('a)), Row(null)) } test("zero sum distinct") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(sumDistinct('a)), Row(null)) } test("moments") { val sparkVariance = testData2.agg(variance('a)) checkAggregatesWithTol(sparkVariance, Row(4.0 / 5.0), absTol) val sparkVariancePop = testData2.agg(var_pop('a)) checkAggregatesWithTol(sparkVariancePop, Row(4.0 / 6.0), absTol) val sparkVarianceSamp = testData2.agg(var_samp('a)) checkAggregatesWithTol(sparkVarianceSamp, Row(4.0 / 5.0), absTol) val sparkSkewness = testData2.agg(skewness('a)) checkAggregatesWithTol(sparkSkewness, Row(0.0), absTol) val sparkKurtosis = testData2.agg(kurtosis('a)) checkAggregatesWithTol(sparkKurtosis, Row(-1.5), absTol) } test("zero moments") { val input = Seq((1, 2)).toDF("a", "b") checkAnswer( input.agg(stddev('a), stddev_samp('a), stddev_pop('a), variance('a), var_samp('a), var_pop('a), skewness('a), kurtosis('a)), Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN)) checkAnswer( input.agg( expr("stddev(a)"), expr("stddev_samp(a)"), expr("stddev_pop(a)"), expr("variance(a)"), expr("var_samp(a)"), expr("var_pop(a)"), expr("skewness(a)"), expr("kurtosis(a)")), Row(Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN, 0.0, Double.NaN, Double.NaN)) } test("null moments") { val emptyTableData = Seq.empty[(Int, Int)].toDF("a", "b") checkAnswer( emptyTableData.agg(variance('a), var_samp('a), var_pop('a), skewness('a), kurtosis('a)), Row(null, null, null, null, null)) checkAnswer( emptyTableData.agg( expr("variance(a)"), expr("var_samp(a)"), expr("var_pop(a)"), expr("skewness(a)"), expr("kurtosis(a)")), Row(null, null, null, null, null)) } test("collect functions") { val df = Seq((1, 2), (2, 2), (3, 4)).toDF("a", "b") checkAnswer( df.select(collect_list($"a"), collect_list($"b")), Seq(Row(Seq(1, 2, 3), Seq(2, 2, 4))) ) checkAnswer( df.select(collect_set($"a"), collect_set($"b")), Seq(Row(Seq(1, 2, 3), Seq(2, 4))) ) checkDataset( df.select(collect_set($"a").as("aSet")).as[Set[Int]], Set(1, 2, 3)) checkDataset( df.select(collect_set($"b").as("bSet")).as[Set[Int]], Set(2, 4)) checkDataset( df.select(collect_set($"a"), collect_set($"b")).as[(Set[Int], Set[Int])], Seq(Set(1, 2, 3) -> Set(2, 4)): _*) } test("collect functions structs") { val df = Seq((1, 2, 2), (2, 2, 2), (3, 4, 1)) .toDF("a", "x", "y") .select($"a", struct($"x", $"y").as("b")) checkAnswer( df.select(collect_list($"a"), sort_array(collect_list($"b"))), Seq(Row(Seq(1, 2, 3), Seq(Row(2, 2), Row(2, 2), Row(4, 1)))) ) checkAnswer( df.select(collect_set($"a"), sort_array(collect_set($"b"))), Seq(Row(Seq(1, 2, 3), Seq(Row(2, 2), Row(4, 1)))) ) } test("collect_set functions cannot have maps") { val df = Seq((1, 3, 0), (2, 3, 0), (3, 4, 1)) .toDF("a", "x", "y") .select($"a", map($"x", $"y").as("b")) val error = intercept[AnalysisException] { df.select(collect_set($"a"), collect_set($"b")) } assert(error.message.contains("collect_set() cannot have map type data")) } test("SPARK-17641: collect functions should not collect null values") { val df = Seq(("1", 2), (null, 2), ("1", 4)).toDF("a", "b") checkAnswer( df.select(collect_list($"a"), collect_list($"b")), Seq(Row(Seq("1", "1"), Seq(2, 2, 4))) ) checkAnswer( df.select(collect_set($"a"), collect_set($"b")), Seq(Row(Seq("1"), Seq(2, 4))) ) } test("SPARK-14664: Decimal sum/avg over window should work.") { checkAnswer( spark.sql("select sum(a) over () from values 1.0, 2.0, 3.0 T(a)"), Row(6.0) :: Row(6.0) :: Row(6.0) :: Nil) checkAnswer( spark.sql("select avg(a) over () from values 1.0, 2.0, 3.0 T(a)"), Row(2.0) :: Row(2.0) :: Row(2.0) :: Nil) } test("SQL decimal test (used for catching certain decimal handling bugs in aggregates)") { checkAnswer( decimalData.groupBy('a cast DecimalType(10, 2)).agg(avg('b cast DecimalType(10, 2))), Seq(Row(new java.math.BigDecimal(1), new java.math.BigDecimal("1.5")), Row(new java.math.BigDecimal(2), new java.math.BigDecimal("1.5")), Row(new java.math.BigDecimal(3), new java.math.BigDecimal("1.5")))) } test("SPARK-17616: distinct aggregate combined with a non-partial aggregate") { val df = Seq((1, 3, "a"), (1, 2, "b"), (3, 4, "c"), (3, 4, "c"), (3, 5, "d")) .toDF("x", "y", "z") checkAnswer( df.groupBy($"x").agg(countDistinct($"y"), sort_array(collect_list($"z"))), Seq(Row(1, 2, Seq("a", "b")), Row(3, 2, Seq("c", "c", "d")))) } test("SPARK-18004 limit + aggregates") { withSQLConf(SQLConf.LIMIT_FLAT_GLOBAL_LIMIT.key -> "true") { val df = Seq(("a", 1), ("b", 2), ("c", 1), ("d", 5)).toDF("id", "value") val limit2Df = df.limit(2) checkAnswer( limit2Df.groupBy("id").count().select($"id"), limit2Df.select($"id")) } } test("SPARK-17237 remove backticks in a pivot result schema") { val df = Seq((2, 3, 4), (3, 4, 5)).toDF("a", "x", "y") withSQLConf(SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") { checkAnswer( df.groupBy("a").pivot("x").agg(count("y"), avg("y")).na.fill(0), Seq(Row(3, 0, 0.0, 1, 5.0), Row(2, 1, 4.0, 0, 0.0)) ) } } test("aggregate function in GROUP BY") { val e = intercept[AnalysisException] { testData.groupBy(sum($"key")).count() } assert(e.message.contains("aggregate functions are not allowed in GROUP BY")) } private def assertNoExceptions(c: Column): Unit = { for ((wholeStage, useObjectHashAgg) <- Seq((true, true), (true, false), (false, true), (false, false))) { withSQLConf( (SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, wholeStage.toString), (SQLConf.USE_OBJECT_HASH_AGG.key, useObjectHashAgg.toString)) { val df = Seq(("1", 1), ("1", 2), ("2", 3), ("2", 4)).toDF("x", "y") // test case for HashAggregate val hashAggDF = df.groupBy("x").agg(c, sum("y")) val hashAggPlan = hashAggDF.queryExecution.executedPlan if (wholeStage) { assert(hashAggPlan.find { case WholeStageCodegenExec(_: HashAggregateExec) => true case _ => false }.isDefined) } else { assert(hashAggPlan.isInstanceOf[HashAggregateExec]) } hashAggDF.collect() // test case for ObjectHashAggregate and SortAggregate val objHashAggOrSortAggDF = df.groupBy("x").agg(c, collect_list("y")) val objHashAggOrSortAggPlan = objHashAggOrSortAggDF.queryExecution.executedPlan if (useObjectHashAgg) { assert(objHashAggOrSortAggPlan.isInstanceOf[ObjectHashAggregateExec]) } else { assert(objHashAggOrSortAggPlan.isInstanceOf[SortAggregateExec]) } objHashAggOrSortAggDF.collect() } } } test("SPARK-19471: AggregationIterator does not initialize the generated result projection" + " before using it") { Seq( monotonically_increasing_id(), spark_partition_id(), rand(Random.nextLong()), randn(Random.nextLong()) ).foreach(assertNoExceptions) } test("SPARK-21580 ints in aggregation expressions are taken as group-by ordinal.") { checkAnswer( testData2.groupBy(lit(3), lit(4)).agg(lit(6), lit(7), sum("b")), Seq(Row(3, 4, 6, 7, 9))) checkAnswer( testData2.groupBy(lit(3), lit(4)).agg(lit(6), 'b, sum("b")), Seq(Row(3, 4, 6, 1, 3), Row(3, 4, 6, 2, 6))) checkAnswer( spark.sql("SELECT 3, 4, SUM(b) FROM testData2 GROUP BY 1, 2"), Seq(Row(3, 4, 9))) checkAnswer( spark.sql("SELECT 3 AS c, 4 AS d, SUM(b) FROM testData2 GROUP BY c, d"), Seq(Row(3, 4, 9))) } test("SPARK-22223: ObjectHashAggregate should not introduce unnecessary shuffle") { withSQLConf(SQLConf.USE_OBJECT_HASH_AGG.key -> "true") { val df = Seq(("1", "2", 1), ("1", "2", 2), ("2", "3", 3), ("2", "3", 4)).toDF("a", "b", "c") .repartition(col("a")) val objHashAggDF = df .withColumn("d", expr("(a, b, c)")) .groupBy("a", "b").agg(collect_list("d").as("e")) .withColumn("f", expr("(b, e)")) .groupBy("a").agg(collect_list("f").as("g")) val aggPlan = objHashAggDF.queryExecution.executedPlan val sortAggPlans = aggPlan.collect { case sortAgg: SortAggregateExec => sortAgg } assert(sortAggPlans.isEmpty) val objHashAggPlans = aggPlan.collect { case objHashAgg: ObjectHashAggregateExec => objHashAgg } assert(objHashAggPlans.nonEmpty) val exchangePlans = aggPlan.collect { case shuffle: ShuffleExchangeExec => shuffle } assert(exchangePlans.length == 1) } } Seq(true, false).foreach { codegen => test("SPARK-22951: dropDuplicates on empty dataFrames should produce correct aggregate " + s"results when codegen is enabled: $codegen") { withSQLConf((SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, codegen.toString)) { // explicit global aggregations val emptyAgg = Map.empty[String, String] checkAnswer(spark.emptyDataFrame.agg(emptyAgg), Seq(Row())) checkAnswer(spark.emptyDataFrame.groupBy().agg(emptyAgg), Seq(Row())) checkAnswer(spark.emptyDataFrame.groupBy().agg(count("*")), Seq(Row(0))) checkAnswer(spark.emptyDataFrame.dropDuplicates().agg(emptyAgg), Seq(Row())) checkAnswer(spark.emptyDataFrame.dropDuplicates().groupBy().agg(emptyAgg), Seq(Row())) checkAnswer(spark.emptyDataFrame.dropDuplicates().groupBy().agg(count("*")), Seq(Row(0))) // global aggregation is converted to grouping aggregation: assert(spark.emptyDataFrame.dropDuplicates().count() == 0) } } } test("SPARK-21896: Window functions inside aggregate functions") { def checkWindowError(df: => DataFrame): Unit = { val thrownException = the [AnalysisException] thrownBy { df.queryExecution.analyzed } assert(thrownException.message.contains("not allowed to use a window function")) } checkWindowError(testData2.select(min(avg('b).over(Window.partitionBy('a))))) checkWindowError(testData2.agg(sum('b), max(rank().over(Window.orderBy('a))))) checkWindowError(testData2.groupBy('a).agg(sum('b), max(rank().over(Window.orderBy('b))))) checkWindowError(testData2.groupBy('a).agg(max(sum(sum('b)).over(Window.orderBy('a))))) checkWindowError( testData2.groupBy('a).agg(sum('b).as("s"), max(count("*").over())).where('s === 3)) checkAnswer( testData2.groupBy('a).agg(max('b), sum('b).as("s"), count("*").over()).where('s === 3), Row(1, 2, 3, 3) :: Row(2, 2, 3, 3) :: Row(3, 2, 3, 3) :: Nil) checkWindowError(sql("SELECT MIN(AVG(b) OVER(PARTITION BY a)) FROM testData2")) checkWindowError(sql("SELECT SUM(b), MAX(RANK() OVER(ORDER BY a)) FROM testData2")) checkWindowError(sql("SELECT SUM(b), MAX(RANK() OVER(ORDER BY b)) FROM testData2 GROUP BY a")) checkWindowError(sql("SELECT MAX(SUM(SUM(b)) OVER(ORDER BY a)) FROM testData2 GROUP BY a")) checkWindowError( sql("SELECT MAX(RANK() OVER(ORDER BY b)) FROM testData2 GROUP BY a HAVING SUM(b) = 3")) checkAnswer( sql("SELECT a, MAX(b), RANK() OVER(ORDER BY a) FROM testData2 GROUP BY a HAVING SUM(b) = 3"), Row(1, 2, 1) :: Row(2, 2, 2) :: Row(3, 2, 3) :: Nil) } test("SPARK-24788: RelationalGroupedDataset.toString with unresolved exprs should not fail") { // Checks if these raise no exception assert(testData.groupBy('key).toString.contains( "[grouping expressions: [key], value: [key: int, value: string], type: GroupBy]")) assert(testData.groupBy(col("key")).toString.contains( "[grouping expressions: [key], value: [key: int, value: string], type: GroupBy]")) assert(testData.groupBy(current_date()).toString.contains( "grouping expressions: [current_date(None)], value: [key: int, value: string], " + "type: GroupBy]")) } }
rikima/spark
sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
Scala
apache-2.0
25,223
package org.jetbrains.plugins.scala package lang.completion import com.intellij.patterns.PlatformPatterns import com.intellij.psi._ import com.intellij.util.ProcessingContext import com.intellij.patterns.PlatformPatterns.psiElement import com.intellij.util.Consumer import com.intellij.psi.PsiClass import com.intellij.codeInsight.completion._ import lookups.{ScalaLookupItem, LookupElementManager} import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScTemplateDefinition, ScObject, ScTrait, ScClass} import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, ResolveUtils} import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScConstructorPattern import com.intellij.openapi.application.ApplicationManager import com.intellij.openapi.util.Computable import com.intellij.psi.util.PsiTreeUtil import org.jetbrains.plugins.scala.lang.psi.types.{ScAbstractType, ScType} import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._ import org.jetbrains.plugins.scala.lang.psi.api.base.{ScReferenceElement, ScStableCodeReferenceElement} import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.SyntheticClasses import org.jetbrains.plugins.scala.lang.completion.ScalaAfterNewCompletionUtil._ import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition import org.jetbrains.plugins.scala.extensions.{toPsiNamedElementExt, toPsiClassExt} import org.jetbrains.plugins.scala.lang.psi.light.PsiClassWrapper import org.jetbrains.plugins.scala.config.ScalaVersionUtil import scala.collection.mutable import org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix.{TypeAliasToImport, ClassTypeToImport, TypeToImport} import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAlias class ScalaClassNameCompletionContributor extends CompletionContributor { import ScalaClassNameCompletionContributor._ extend(CompletionType.BASIC, PlatformPatterns.psiElement(ScalaTokenTypes.tIDENTIFIER). withParent(classOf[ScReferenceElement]), new CompletionProvider[CompletionParameters] { def addCompletions(parameters: CompletionParameters, context: ProcessingContext, result: CompletionResultSet) { if (shouldRunClassNameCompletion(parameters, result.getPrefixMatcher)) { completeClassName(parameters, context, result) } result.stopHere() } }) } object ScalaClassNameCompletionContributor { def completeClassName(parameters: CompletionParameters, context: ProcessingContext, result: CompletionResultSet): Boolean = { val expectedTypesAfterNew: Array[ScType] = if (afterNewPattern.accepts(parameters.getPosition, context)) { val element = parameters.getPosition val newExpr = PsiTreeUtil.getParentOfType(element, classOf[ScNewTemplateDefinition]) //todo: probably we need to remove all abstracts here according to variance newExpr.expectedTypes().map(tp => tp match { case ScAbstractType(_, lower, upper) => upper case _ => tp }) } else Array.empty val insertedElement: PsiElement = parameters.getPosition val invocationCount = parameters.getInvocationCount if (!insertedElement.getContainingFile.isInstanceOf[ScalaFile]) return true val lookingForAnnotations: Boolean = psiElement.afterLeaf("@").accepts(insertedElement) val isInImport = ScalaPsiUtil.getParentOfType(insertedElement, classOf[ScImportStmt]) != null val stableRefElement = ScalaPsiUtil.getParentOfType(insertedElement, classOf[ScStableCodeReferenceElement]) val refElement = ScalaPsiUtil.getParentOfType(insertedElement, classOf[ScReferenceElement]) val onlyClasses = stableRefElement != null && !stableRefElement.getContext.isInstanceOf[ScConstructorPattern] val renamesMap = new mutable.HashMap[String, (String, PsiNamedElement)]() val reverseRenamesMap = new mutable.HashMap[String, PsiNamedElement]() refElement match { case ref: PsiReference => ref.getVariants().foreach { case s: ScalaLookupItem => s.isRenamed match { case Some(name) => renamesMap += ((s.element.name, (name, s.element))) reverseRenamesMap += ((name, s.element)) case None => } case _ => } case _ => } def addTypeForCompletion(typeToImport: TypeToImport) { val isExcluded: Boolean = ApplicationManager.getApplication.runReadAction(new Computable[Boolean] { def compute: Boolean = { val clazz = typeToImport match { case ClassTypeToImport(clazz) => clazz case TypeAliasToImport(alias) => val containingClass = alias.containingClass if (containingClass == null) return false containingClass } JavaCompletionUtil.isInExcludedPackage(clazz, true) } }) if (isExcluded) return val isAccessible = invocationCount >= 2 || ResolveUtils.isAccessible(typeToImport.element, insertedElement, forCompletion = true) if (!isAccessible) return if (lookingForAnnotations && !typeToImport.isAnnotationType) return typeToImport.element match { case _: ScClass | _: ScTrait | _: ScTypeAlias if !isInImport && !onlyClasses => return case _: ScObject if !isInImport && onlyClasses => return case _ => } val renamed = renamesMap.get(typeToImport.name).filter(_._2 == typeToImport).map(_._1) for { el <- LookupElementManager.getLookupElement(new ScalaResolveResult(typeToImport.element, nameShadow = renamed), isClassName = true, isInImport = isInImport, isInStableCodeReference = stableRefElement != null) } { if (!afterNewPattern.accepts(parameters.getPosition, context)) result.addElement(el) else { typeToImport match { case ClassTypeToImport(clazz) => result.addElement(getLookupElementFromClass(expectedTypesAfterNew, clazz, renamesMap)) case _ => } } } } val project = insertedElement.getProject import org.jetbrains.plugins.scala.config.ScalaVersionUtil._ val checkSynthetic = ScalaVersionUtil.isGeneric(parameters.getOriginalFile, true, SCALA_2_7, SCALA_2_8) for { clazz <- SyntheticClasses.get(project).all.valuesIterator if checkSynthetic || !ScType.baseTypesQualMap.contains(clazz.qualifiedName) } addTypeForCompletion(ClassTypeToImport(clazz)) val prefixMatcher = result.getPrefixMatcher AllClassesGetter.processJavaClasses(if (lookingForAnnotations) parameters.withInvocationCount(2) else parameters, prefixMatcher, parameters.getInvocationCount <= 1, new Consumer[PsiClass] { def consume(psiClass: PsiClass) { //todo: filter according to position if (psiClass.isInstanceOf[PsiClassWrapper]) return ScalaPsiUtil.getCompanionModule(psiClass).foreach(clazz => addTypeForCompletion(ClassTypeToImport(clazz))) addTypeForCompletion(ClassTypeToImport(psiClass)) } }) for { name <- ScalaPsiManager.instance(project).getStableTypeAliasesNames if prefixMatcher.prefixMatches(name) alias <- ScalaPsiManager.instance(project).getStableAliasesByName(name, insertedElement.getResolveScope) } { addTypeForCompletion(TypeAliasToImport(alias)) } for { (name, elem: PsiNamedElement) <- reverseRenamesMap if prefixMatcher.prefixMatches(name) if !prefixMatcher.prefixMatches(elem.name) } { elem match { case clazz: PsiClass => addTypeForCompletion(ClassTypeToImport(clazz)) case ta: ScTypeAlias => addTypeForCompletion(TypeAliasToImport(ta)) case _ => } } false } }
consulo/consulo-scala
src/org/jetbrains/plugins/scala/lang/completion/ScalaClassNameCompletionContributor.scala
Scala
apache-2.0
8,115
package com.swara.learn.genetic import net.jcip.annotations.ThreadSafe /** * A fitness evaluator. Evolutionary fitness numerically encodes the rules of Darwinian natural * selection; individuals with greater fitness are more likely to produce offspring relative to its * peers. Fitness must be a non-negative real number. * * @tparam T Type of genome. */ @ThreadSafe trait Evaluator[T] { def fitness(genome: T): Double }
ashwin153/swara
swara-learn/src/main/scala/com/swara/learn/genetic/Evaluator.scala
Scala
mit
433
package com.teambytes.inflatable.raft.config import com.typesafe.config.Config import java.util.concurrent.TimeUnit import concurrent.duration._ import akka.actor.Extension private[inflatable] class RaftConfig (config: Config) extends Extension { val raftConfig = config.getConfig("akka.raft") val defaultAppendEntriesBatchSize = raftConfig.getInt("default-append-entries-batch-size") val publishTestingEvents = raftConfig.getBoolean("publish-testing-events") val electionTimeoutMin = raftConfig.getDuration("election-timeout.min", TimeUnit.MILLISECONDS).millis val electionTimeoutMax = raftConfig.getDuration("election-timeout.max", TimeUnit.MILLISECONDS).millis val heartbeatInterval = raftConfig.getDuration("heartbeat-interval", TimeUnit.MILLISECONDS).millis val clusterAutoDiscoveryIdentifyTimeout = raftConfig.getDuration("cluster.auto-discovery.identify-timeout", TimeUnit.MILLISECONDS).millis val clusterAutoDiscoveryRetryCount = raftConfig.getInt("cluster.auto-discovery.retry-count") }
grahamar/inflatable
src/main/scala/com/teambytes/inflatable/raft/config/RaftConfig.scala
Scala
apache-2.0
1,020
package edu.rice.habanero.benchmarks.radixsort import java.util.Random import edu.rice.habanero.actors.HabaneroSelector import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner} import edu.rice.hj.Module0._ import edu.rice.hj.api.HjSuspendable /** * @author <a href="http://shams.web.rice.edu/">Shams Imam</a> (shams@rice.edu) */ object RadixSortHabaneroSelectorBenchmark { def main(args: Array[String]) { BenchmarkRunner.runBenchmark(args, new RadixSortHabaneroSelectorBenchmark) } private final class RadixSortHabaneroSelectorBenchmark extends Benchmark { def initialize(args: Array[String]) { RadixSortConfig.parseArgs(args) } def printArgInfo() { RadixSortConfig.printArgs() } def runIteration() { finish(new HjSuspendable { override def run() = { val validationActor = new ValidationActor(RadixSortConfig.N) validationActor.start() val sourceActor = new IntSourceActor(RadixSortConfig.N, RadixSortConfig.M, RadixSortConfig.S) sourceActor.start() var radix = RadixSortConfig.M / 2 var nextActor: HabaneroSelector[AnyRef] = validationActor while (radix > 0) { val sortActor = new SortActor(RadixSortConfig.N, radix, nextActor) sortActor.start() radix /= 2 nextActor = sortActor } sourceActor.send(0, NextActorMessage(nextActor)) } }) } def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) { } } private case class NextActorMessage(actor: HabaneroSelector[AnyRef]) private case class ValueMessage(value: Long) private class IntSourceActor(numValues: Int, maxValue: Long, seed: Long) extends HabaneroSelector[AnyRef](1) { val random = new Random(seed) override def process(msg: AnyRef) { msg match { case nm: NextActorMessage => var i = 0 while (i < numValues) { val candidate = Math.abs(random.nextLong()) % maxValue val message = new ValueMessage(candidate) nm.actor.send(0, message) i += 1 } exit() } } } private class SortActor(numValues: Int, radix: Long, nextActor: HabaneroSelector[AnyRef]) extends HabaneroSelector[AnyRef](1) { private val orderingArray = Array.ofDim[ValueMessage](numValues) private var valuesSoFar = 0 private var j = 0 override def process(msg: AnyRef): Unit = { msg match { case vm: ValueMessage => valuesSoFar += 1 val current = vm.value if ((current & radix) == 0) { nextActor.send(0, vm) } else { orderingArray(j) = vm j += 1 } if (valuesSoFar == numValues) { var i = 0 while (i < j) { nextActor.send(0, orderingArray(i)) i += 1 } exit() } } } } private class ValidationActor(numValues: Int) extends HabaneroSelector[AnyRef](1) { private var sumSoFar = 0.0 private var valuesSoFar = 0 private var prevValue = 0L private var errorValue = (-1L, -1) override def process(msg: AnyRef) { msg match { case vm: ValueMessage => valuesSoFar += 1 if (vm.value < prevValue && errorValue._1 < 0) { errorValue = (vm.value, valuesSoFar - 1) } prevValue = vm.value sumSoFar += prevValue if (valuesSoFar == numValues) { if (errorValue._1 >= 0) { println("ERROR: Value out of place: " + errorValue._1 + " at index " + errorValue._2) } else { println("Elements sum: " + sumSoFar) } exit() } } } } }
smarr/savina
src/main/scala/edu/rice/habanero/benchmarks/radixsort/RadixSortHabaneroSelectorBenchmark.scala
Scala
gpl-2.0
3,861
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.executor import java.net.URL import org.apache.spark.SparkEnv import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.internal.Logging import org.apache.spark.rpc.RpcEnv import org.apache.spark.util.YarnContainerInfoHelper /** * Custom implementation of CoarseGrainedExecutorBackend for YARN resource manager. * This class extracts executor log URLs and executor attributes from system environment which * properties are available for container being set via YARN. */ private[spark] class YarnCoarseGrainedExecutorBackend( rpcEnv: RpcEnv, driverUrl: String, executorId: String, hostname: String, cores: Int, userClassPath: Seq[URL], env: SparkEnv, resourcesFile: Option[String]) extends CoarseGrainedExecutorBackend( rpcEnv, driverUrl, executorId, hostname, cores, userClassPath, env, resourcesFile) with Logging { private lazy val hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(env.conf) override def extractLogUrls: Map[String, String] = { YarnContainerInfoHelper.getLogUrls(hadoopConfiguration, container = None) .getOrElse(Map()) } override def extractAttributes: Map[String, String] = { YarnContainerInfoHelper.getAttributes(hadoopConfiguration, container = None) .getOrElse(Map()) } } private[spark] object YarnCoarseGrainedExecutorBackend extends Logging { def main(args: Array[String]): Unit = { val createFn: (RpcEnv, CoarseGrainedExecutorBackend.Arguments, SparkEnv) => CoarseGrainedExecutorBackend = { case (rpcEnv, arguments, env) => new YarnCoarseGrainedExecutorBackend(rpcEnv, arguments.driverUrl, arguments.executorId, arguments.hostname, arguments.cores, arguments.userClassPath, env, arguments.resourcesFileOpt) } val backendArgs = CoarseGrainedExecutorBackend.parseArguments(args, this.getClass.getCanonicalName.stripSuffix("$")) CoarseGrainedExecutorBackend.run(backendArgs, createFn) System.exit(0) } }
aosagie/spark
resource-managers/yarn/src/main/scala/org/apache/spark/executor/YarnCoarseGrainedExecutorBackend.scala
Scala
apache-2.0
2,844
/* * Copyright 2001-2011 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest /** * A <code>Suite</code> class that takes zero to many <code>Suite</code>s in its constructor, * which will be returned from its <code>nestedSuites</code> method. * * <p> * For example, you can define a suite that always executes a list of * nested suites like this: * </p> * * <pre class="stHighlight"> * class StepsSuite extends Suites( * new Step1Suite, * new Step2Suite, * new Step3Suite, * new Step4Suite, * new Step5Suite * ) * </pre> * * <p> * If <code>StepsSuite</code> is executed sequentially, it will execute its * nested suites in the passed order: <code>Step1Suite</code>, <code>Step2Suite</code>, * <code>Step3Suite</code>, <code>Step4Suite</code>, and <code>Step5Suite</code>. * If <code>StepsSuite</code> is executed in parallel, the nested suites will * be executed concurrently. * </p> * * @param suitesToNest a sequence of <code>Suite</code>s to nest. * * @throws NullPointerException if <code>suitesToNest</code>, or any suite * it contains, is <code>null</code>. * * @author Bill Venners */ class Suites(suitesToNest: Suite*) extends Suite { thisSuite => for (s <- suitesToNest) { if (s == null) throw new NullPointerException("A passed suite was null") } /** * Returns an immutable <code>IndexedSeq</code> containing the suites passed to the constructor in * the order they were passed. */ override val nestedSuites: collection.immutable.IndexedSeq[Suite] = Vector.empty ++ suitesToNest /** * Returns a user friendly string for this suite, composed of the * simple name of the class (possibly simplified further by removing dollar signs if added by the Scala interpeter) and, if this suite * contains nested suites, the result of invoking <code>toString</code> on each * of the nested suites, separated by commas and surrounded by parentheses. * * @return a user-friendly string for this suite */ override def toString: String = Suite.suiteToString(None, thisSuite) } /** * Companion object to class <code>Suites</code> that offers an <code>apply</code> factory method * for creating a <code>Suites</code> instance. * * <p> * One use case for this object is to run multiple specification-style suites in the Scala interpreter, like this: * </p> * * <pre class="stREPL"> * scala&gt; Suites(new MyFirstSuite, new MyNextSuite).execute() * </pre> */ object Suites { /** * Factory method for creating a <code>Suites</code> instance. */ def apply(suitesToNest: Suite*): Suites = new Suites(suitesToNest: _*) }
svn2github/scalatest
src/main/scala/org/scalatest/Suites.scala
Scala
apache-2.0
3,176
package mid2016 object Mid2016 extends Mid2016 with App { } trait Mid2016 { def scanLeft[A, B](xs: List[A])(z: B)(op: (B, A) => B): List[B] = xs.foldLeft(List(z))((list:List[B], x: A) => op(list.head, x) :: list).reverse.tail def flatMap[A, B](xs: List[A])(f: A => List[B]): List[B] = { xs.foldRight(List[B]())((x: A, y: List[B]) => f(x).foldRight(y)(_ :: _)) } def reachable(n: Int, init: Set[Node], edges: List[Edge]): Set[Node] = { if (n == 0) init else reachable(n - 1, init.toList.flatMap((x) => edges.filter(_.from == x).map(_.to)).toSet, edges) } def cycles3(nodes: Set[Node], edges: List[Edge]): Set[Node] = { val reach = reachable(3, nodes, edges) nodes.filter((x) => reachable(3, Set(x), edges).filter(_ == x).nonEmpty) } } case class Node(id: Int) case class Edge(from: Node, to: Node)
rusucosmin/courses
fp/mid-exam/src/main/scala/mid2016/mid2016.scala
Scala
mit
841
package de.fosd.typechef.linux.featuremodel import de.fosd.typechef.linux.LinuxSettings import io.Source /** * full feature model, including the partial configuration, used for analysis and preparation */ class LinuxFullModel extends LinuxDimacsModel { override def createFeatureModel = super.createFeatureModel and partialConfiguration private def partialConfiguration = { import de.fosd.typechef.featureexpr.FeatureExprFactory._ val DEF = "#define" val UNDEF = "#undef" val directives = Source.fromFile(LinuxSettings.partialConfFile).getLines().filter(_.startsWith("#")) def findMacroName(directive: String) = directive.split(' ')(1) val booleanDefs = directives.filter(directive => directive.startsWith(DEF) && directive.endsWith(" 1")).map(findMacroName) val undefs = directives.filter(_.startsWith(UNDEF)).map(findMacroName) (booleanDefs.map(createDefinedExternal(_)) ++ undefs.map(createDefinedExternal(_).not())). foldRight(True)(_ and _) } }
aJanker/TypeChef-Sampling-Linux
src/main/scala/de/fosd/typechef/linux/featuremodel/LinuxFullModel.scala
Scala
gpl-3.0
1,061
package org.scalaide.debug.internal.model import com.sun.jdi.AbsentInformationException import com.sun.jdi.InvalidStackFrameException import com.sun.jdi.Method import com.sun.jdi.NativeMethodException import com.sun.jdi.StackFrame import org.eclipse.debug.core.model.IDropToFrame import org.eclipse.debug.core.model.IRegisterGroup import org.eclipse.debug.core.model.IStackFrame import org.eclipse.debug.core.model.IThread import org.eclipse.debug.core.model.IVariable import scala.collection.JavaConverters.asScalaBufferConverter import scala.reflect.NameTransformer object ScalaStackFrame { def apply(thread: ScalaThread, stackFrame: StackFrame, index: Int = /*top frame*/ 0 ): ScalaStackFrame = { new ScalaStackFrame(thread, stackFrame, index) } // regexp for JNI signature private val typeSignature = """L([^;]*);""".r private val arraySignature = """\\[(.*)""".r private val argumentsInMethodSignature = """\\(([^\\)]*)\\).*""".r def getSimpleName(signature: String): String = { signature match { case typeSignature(typeName) => NameTransformer.decode(typeName.split('/').last) case arraySignature(elementSignature) => "Array[%s]".format(getSimpleName(elementSignature)) case "B" => "Byte" case "C" => "Char" case "D" => "Double" case "F" => "Float" case "I" => "Int" case "J" => "Long" case "S" => "Short" case "Z" => "Boolean" } } // TODO: need unit tests def getArgumentSimpleNames(methodSignature: String): List[String] = { val argumentsInMethodSignature(argString) = methodSignature def parseArguments(args: String) : List[String] = { if (args.isEmpty) { Nil } else { args.charAt(0) match { case 'L' => val typeSignatureLength = args.indexOf(';') + 1 getSimpleName(args.substring(0, typeSignatureLength)) +: parseArguments(args.substring(typeSignatureLength)) case '[' => val parsedArguments = parseArguments(args.tail) "Array[%s]".format(parsedArguments.head) +: parsedArguments.tail case c => getSimpleName(c.toString) +: parseArguments(args.tail) } } } parseArguments(argString) } } /** * A stack frame in the Scala debug model. * This class is NOT thread safe. 'stackFrame' variable can be 're-bound' at any time. * Instances have be created through its companion object. */ class ScalaStackFrame private (val thread: ScalaThread, @volatile var stackFrame: StackFrame, val index: Int) extends ScalaDebugElement(thread.getDebugTarget) with IStackFrame with IDropToFrame { import ScalaStackFrame._ // Members declared in org.eclipse.debug.core.model.IStackFrame override def getCharEnd(): Int = -1 override def getCharStart(): Int = -1 override def getLineNumber(): Int = { (safeStackFrameCalls(-1) or wrapJDIException("Exception while retrieving stack frame's line number")) { stackFrame.location.lineNumber // TODO: cache data ? } } override def getName(): String = { (safeStackFrameCalls("Error retrieving name") or wrapJDIException("Exception while retrieving stack frame's name")) { stackFrame.location.declaringType.name // TODO: cache data ? } } override def getRegisterGroups(): Array[IRegisterGroup] = ??? override def getThread(): IThread = thread override def getVariables(): Array[IVariable] = variables.toArray // TODO: need real logic override def hasRegisterGroups(): Boolean = ??? override def hasVariables(): Boolean = ??? // Members declared in org.eclipse.debug.core.model.IStep override def canStepInto(): Boolean = thread.canStepInto() override def canStepOver(): Boolean = thread.canStepOver() override def canStepReturn(): Boolean = thread.canStepReturn() override def isStepping(): Boolean = ??? override def stepInto(): Unit = thread.stepInto override def stepOver(): Unit = thread.stepOver override def stepReturn(): Unit = thread.stepReturn // Members declared in org.eclipse.debug.core.model.ISuspendResume override def canResume(): Boolean = thread.canResume() override def canSuspend(): Boolean = false override def isSuspended(): Boolean = true override def resume(): Unit = thread.resume() override def suspend(): Unit = ??? // Members declared in org.eclipse.debug.core.model.IDropToFrame override def canDropToFrame(): Boolean = thread.canDropToFrame(this) override def dropToFrame(): Unit = thread.dropToFrame(this) // --- def isNative = stackFrame.location().method().isNative() def isObsolete = stackFrame.location().method().isObsolete() import org.scalaide.debug.internal.JDIUtil._ import scala.util.control.Exception import Exception.Catch private lazy val variables: Seq[ScalaVariable] = { (safeStackFrameCalls(Nil) or wrapJDIException("Exception while retrieving stack frame's visible variables")) { import scala.collection.JavaConverters._ val visibleVariables = { (Exception.handling(classOf[AbsentInformationException]) by (_ => Seq.empty)) { stackFrame.visibleVariables.asScala.map(new ScalaLocalVariable(_, this)) } } val currentMethod = stackFrame.location.method if (currentMethod.isNative || currentMethod.isStatic) { // 'this' is not available for native and static methods visibleVariables } else { new ScalaThisVariable(stackFrame.thisObject, this) +: visibleVariables } } } private def getSourceName(): String = safeStackFrameCalls("Source name not available")(stackFrame.location.sourceName) /** * Return the source path based on source name and the package. * Segments are separated by '/'. * * @throws DebugException */ def getSourcePath(): String = { wrapJDIException("Exception while retrieving source path") { // we shoudn't use location#sourcePath, as it is platform dependent stackFrame.location.declaringType.name.split('.').init match { case Array() => getSourceName case packageSegments => packageSegments.mkString("", "/", "/") + getSourceName } } } def getMethodFullName(): String = { def getFullName(method: Method): String = { // method.signature of obsolete methods is not available. Hence, they have to be processed in a special way. if (method.isObsolete()) s"${getSimpleName(method.declaringType.signature)} <${NameTransformer.decode(method.name)}>" else "%s.%s(%s)".format( getSimpleName(method.declaringType.signature), NameTransformer.decode(method.name), getArgumentSimpleNames(method.signature).mkString(", ")) } safeStackFrameCalls("Error retrieving full name") { getFullName(stackFrame.location.method) } } /** Set the current stack frame to `newStackFrame`. The `ScalaStackFrame.variables` don't need * to be recomputed because a variable (i.e., a `ScalaLocalVariable`) always uses the latest * stack frame to compute its value, as it can be checked by looking at the implementation of * `ScalaLocalVariable.getValue` */ def rebind(newStackFrame: StackFrame): Unit = { stackFrame = newStackFrame } /** Wrap calls to the underlying VM stack frame to handle exceptions gracefully. */ private def safeStackFrameCalls[A](defaultValue: A): Catch[A] = (safeVmCalls(defaultValue) or Exception.failAsValue( classOf[InvalidStackFrameException], classOf[AbsentInformationException], classOf[NativeMethodException])(defaultValue)) }
aleksi-lukkarinen/scala-ide
org.scala-ide.sdt.debug/src/org/scalaide/debug/internal/model/ScalaStackFrame.scala
Scala
bsd-3-clause
7,705
// scalac: -Werror // object CatchAll { try { "warn" } catch { case _ => } try { "warn" } catch { case x => } try { "warn" } catch { case _: RuntimeException => ; case x => } val t = T try { "okay" } catch { case T => } try { "okay" } catch { case `t` => } try { "okay" } catch { case x @ T => } try { "okay" } catch { case x @ `t` => } try { "okay" } catch { case _: Throwable => } try { "okay" } catch { case _: Exception => } try { "okay" } catch { case okay: Throwable => } try { "okay" } catch { case okay: Exception => } try { "okay" } catch { case _ if "".isEmpty => } "okay" match { case _ => "" } val handler: PartialFunction[Throwable, String] = { case _ => "hello, world" } val discarder = (_: Throwable) => "goodbye, cruel world" try "okay" catch handler try "okay" catch discarder // warn total function } object T extends Throwable
scala/scala
test/files/neg/catch-all.scala
Scala
apache-2.0
900
/* Copyright 2012 - 2014 Jerome Leleu Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package org.pac4j.play.scala import scala.concurrent.Future import play.api._ import play.api.mvc._ import org.pac4j.core.client._ import org.pac4j.core.credentials._ import org.pac4j.core.profile._ import org.pac4j.core.util._ import org.pac4j.play._ import org.slf4j._ import play.core.server.netty.RequestBodyHandler import org.pac4j.core.exception._ /** * This controller is the Scala controller to retrieve the user profile or the redirection url to start the authentication process. * * @author Jerome Leleu * @since 1.0.0 */ trait ScalaController extends Controller { protected val logger = LoggerFactory.getLogger("org.pac4j.play.scala.ScalaController") /** * Get or create a new sessionId. * * @param request * @return the (updated) session */ protected def getOrCreateSessionId(request: RequestHeader): Session = { var newSession = request.session val optionSessionId = newSession.get(Constants.SESSION_ID) logger.debug("getOrCreateSessionId : {}", optionSessionId) if (!optionSessionId.isDefined) { newSession += Constants.SESSION_ID -> StorageHelper.generateSessionId() } newSession } /** * Defines an action with requires authentication : it means that the user is redirected to the provider * if he is not authenticated or access directly to the action otherwise. * * @param clientName * @param targetUrl * @param parser * @param isAjax * @param action * @return the current action to process or the redirection to the provider if the user is not authenticated */ protected def RequiresAuthentication[A](clientName: String, targetUrl: String, parser: BodyParser[A], isAjax: Boolean = false)(action: CommonProfile => Action[A]) = Action.async(parser) { request => logger.debug("Entering RequiresAuthentication") var newSession = getOrCreateSessionId(request) val sessionId = newSession.get(Constants.SESSION_ID).get logger.debug("sessionId : {}", sessionId) val profile = getUserProfile(request) logger.debug("profile : {}", profile) if (profile == null) { try { val redirectAction = getRedirectAction(request, newSession, clientName, targetUrl, true, isAjax) logger.debug("redirectAction : {}", redirectAction) redirectAction.getType() match { case RedirectAction.RedirectType.REDIRECT => Future.successful(Redirect(redirectAction.getLocation()).withSession(newSession)) case RedirectAction.RedirectType.SUCCESS => Future.successful(Ok(redirectAction.getContent()).withSession(newSession).as(HTML)) case _ => throw new TechnicalException("Unexpected RedirectAction : " + redirectAction.getType) } } catch { case ex: RequiresHttpAction => { val code = ex.getCode() if (code == 401) { Future.successful(Unauthorized(Config.getErrorPage401()).as(HTML)) } else if (code == 403) { Future.successful(Forbidden(Config.getErrorPage403()).as(HTML)) } else { throw new TechnicalException("Unexpected HTTP code : " + code) } } } } else { action(profile)(request) } } protected def RequiresAuthentication(clientName: String, targetUrl: String = "", isAjax: Boolean = false)(action: CommonProfile => Action[AnyContent]): Action[AnyContent] = { RequiresAuthentication(clientName, targetUrl, parse.anyContent, isAjax)(action) } /** * Returns the redirection action to the provider for authentication. * * @param request * @param newSession * @param clientName * @param targetUrl * @return the redirection url to the provider */ protected def getRedirectAction[A](request: Request[A], newSession: Session, clientName: String, targetUrl: String = ""): RedirectAction = { var action: RedirectAction = null try { // redirect to the provider for authentication action = getRedirectAction(request, newSession, clientName, targetUrl, false, false) } catch { case ex: RequiresHttpAction => { // should not happen } } logger.debug("redirectAction to : {}", action) action } /** * Returns the redirection action to the provider for authentication. * * @param request * @param newSession * @param clientName * @param targetUrl * @param protectedPage * @param isAjax * @return the redirection url to the provider */ private def getRedirectAction[A](request: Request[A], newSession: Session, clientName: String, targetUrl: String, protectedPage: Boolean, isAjax: Boolean): RedirectAction = { val sessionId = newSession.get(Constants.SESSION_ID).get logger.debug("sessionId for getRedirectionUrl() : {}", sessionId) // save requested url to save val requestedUrlToSave = CallbackController.defaultUrl(targetUrl, request.uri) logger.debug("requestedUrlToSave : {}", requestedUrlToSave) StorageHelper.saveRequestedUrl(sessionId, clientName, requestedUrlToSave); // context val scalaWebContext = new ScalaWebContext(request, newSession) // clients val clients = Config.getClients() if (clients == null) { throw new TechnicalException("No client defined. Use Config.setClients(clients)") } val client = clients.findClient(clientName) match { case c: BaseClient[_, _] => c } val action = client.getRedirectAction(scalaWebContext, protectedPage, isAjax) logger.debug("redirectAction to : {}", action) action } /** * Returns the user profile. * * @param request * @return the user profile */ protected def getUserProfile(request: RequestHeader): CommonProfile = { // get the session id var profile: CommonProfile = null val sessionId = request.session.get(Constants.SESSION_ID) logger.debug("sessionId for profile : {}", sessionId) if (sessionId.isDefined) { // get the user profile profile = StorageHelper.getProfile(sessionId.get) logger.debug("profile : {}", profile) } profile } }
milinda/play-pac4j
play-pac4j_scala/src/main/scala/org/pac4j/play/scala/ScalaController.scala
Scala
apache-2.0
6,655
/** * Copyright (C) 2007 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.processor.converter import org.orbeon.oxf.util.NetUtils import org.orbeon.oxf.processor.impl.CacheableTransformerOutputImpl import org.orbeon.oxf.pipeline.api.{XMLReceiver, PipelineContext} import org.orbeon.oxf.processor.ProcessorImpl import org.orbeon.oxf.externalcontext.ServletURLRewriter /** * This rewriter always rewrites using ServletURLRewriter. */ class XHTMLServletRewrite extends XHTMLRewrite { override def createOutput(name: String) = addOutput(name, new CacheableTransformerOutputImpl(this, name) { def readImpl(pipelineContext: PipelineContext, xmlReceiver: XMLReceiver): Unit = { val externalContext = NetUtils.getExternalContext val rewriter = getRewriteXMLReceiver(new ServletURLRewriter(externalContext.getRequest), xmlReceiver, false) readInputAsSAX(pipelineContext, ProcessorImpl.INPUT_DATA, rewriter) } }) }
evlist/orbeon-forms
src/main/scala/org/orbeon/oxf/processor/converter/XHTMLServletRewrite.scala
Scala
lgpl-2.1
1,605
package org.jetbrains.plugins.scala package lang package psi package impl package base package types import com.intellij.lang.ASTNode import com.intellij.psi.PsiElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor import org.jetbrains.plugins.scala.lang.psi.api.base.types._ import org.jetbrains.plugins.scala.lang.psi.types.api.Unit import org.jetbrains.plugins.scala.lang.psi.types.result._ /** * @author Alexander Podkhalyuzin, ilyas */ class ScParenthesisedTypeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScParenthesisedTypeElement { protected def innerType: TypeResult = innerElement match { case Some(el) => el.`type`() case None => Right(Unit) } override def accept(visitor: ScalaElementVisitor) { visitor.visitParenthesisedTypeElement(this) } override def accept(visitor: PsiElementVisitor) { visitor match { case s: ScalaElementVisitor => s.visitParenthesisedTypeElement(this) case _ => super.accept(visitor) } } }
jastice/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScParenthesisedTypeElementImpl.scala
Scala
apache-2.0
1,024
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // scalastyle:off println package org.apache.spark.examples.ml // $example on$ import org.apache.spark.ml.feature.{RegexTokenizer, Tokenizer} import org.apache.spark.sql.SparkSession import org.apache.spark.sql.functions._ // $example off$ object TokenizerExample { def main(args: Array[String]): Unit = { val spark = SparkSession .builder .appName("TokenizerExample") .getOrCreate() // $example on$ val sentenceDataFrame = spark.createDataFrame(Seq( (0, "Hi I heard about Spark"), (1, "I wish Java could use case classes"), (2, "Logistic,regression,models,are,neat") )).toDF("id", "sentence") val tokenizer = new Tokenizer().setInputCol("sentence").setOutputCol("words") val regexTokenizer = new RegexTokenizer() .setInputCol("sentence") .setOutputCol("words") .setPattern("\\\\W") // alternatively .setPattern("\\\\w+").setGaps(false) val countTokens = udf { (words: Seq[String]) => words.length } val tokenized = tokenizer.transform(sentenceDataFrame) tokenized.select("sentence", "words") .withColumn("tokens", countTokens(col("words"))).show(false) val regexTokenized = regexTokenizer.transform(sentenceDataFrame) regexTokenized.select("sentence", "words") .withColumn("tokens", countTokens(col("words"))).show(false) // $example off$ spark.stop() } } // scalastyle:on println
lhfei/spark-in-action
spark-2.x/src/main/scala/org/apache/spark/examples/ml/TokenizerExample.scala
Scala
apache-2.0
2,275
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.datasources.v2 import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.execution.LeafExecNode /** * A physical operator that executes run() and saves the result to prevent multiple executions. * Any V2 commands that do not require triggering a spark job should extend this class. */ abstract class V2CommandExec extends LeafExecNode { /** * Abstract method that each concrete command needs to implement to compute the result. */ protected def run(): Seq[InternalRow] /** * The value of this field can be used as the contents of the corresponding RDD generated from * the physical plan of this command. */ private lazy val result: Seq[InternalRow] = run() /** * The `execute()` method of all the physical command classes should reference `result` * so that the command can be executed eagerly right after the command query is created. */ override def executeCollect(): Array[InternalRow] = result.toArray override def executeToIterator: Iterator[InternalRow] = result.toIterator override def executeTake(limit: Int): Array[InternalRow] = result.take(limit).toArray protected override def doExecute(): RDD[InternalRow] = { sqlContext.sparkContext.parallelize(result, 1) } }
caneGuy/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2CommandExec.scala
Scala
apache-2.0
2,125
package turkey import scala.util.Try import upickle.default.Writer import upickle.default.Reader /** * API for services that store the data returned by workers across HITs on MTurk. * Semantics are append-only; you can save HITs and Assignments and get them later, * but cannot delete any data. * * Not expected to be thread-safe in general. * TODO: implement this using the interpreter pattern instead. */ trait HITDataService { /** Save a HIT that has been uploaded to MTurk. * This should happen directly after successful HIT creation. */ def saveHIT[Prompt : Writer]( hit: HIT[Prompt] ): Try[Unit] /** Get a stored HIT by its HIT Type ID and HIT ID, which together uniquely identify it. * (HIT ID may already be unique; I'm not sure.) */ def getHIT[Prompt : Reader]( hitTypeId: String, hitId: String ): Try[HIT[Prompt]] /** Save the data of an assignment that has been approved on MTurk. * Should happen directly after the assignment is approved. */ def saveApprovedAssignment[Response : Writer]( assignment: Assignment[Response] ): Try[Unit] /** Save the data of an assignment that has been rejected on MTurk. * Should happen directly after the assignment is rejected. */ def saveRejectedAssignment[Response : Writer]( assignment: Assignment[Response] ): Try[Unit] /** Get a saved HIT and all data relevant to that HIT. */ def getHITInfo[Prompt: Reader, Response : Reader]( hitTypeId: String, hitId: String ): Try[HITInfo[Prompt, Response]] /** Get all saved HIT data for a given HIT Type. */ def getAllHITInfo[Prompt: Reader, Response : Reader]( hitTypeId: String ): Try[List[HITInfo[Prompt, Response]]] // TODO implement the below in terms of getHITInfo /** Get all assignments for a given HIT. */ def getAssignmentsForHIT[Response : Reader]( hitTypeId: String, hitId: String ): Try[List[Assignment[Response]]] }
julianmichael/turkey
turkey/shared/src/main/scala/turkey/HITDataService.scala
Scala
mit
1,961
/* * Copyright 2014-2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.atlas.webapi import com.netflix.atlas.core.model.Datapoint import com.netflix.atlas.core.model.DatapointTuple import com.netflix.atlas.core.util.SortedTagMap import munit.FunSuite import java.util.UUID import scala.util.Random class PublishPayloadsSuite extends FunSuite { private val timestamp = 1636116180000L private def datapoints(n: Int): List[DatapointTuple] = { (0 until n).toList.map { i => val tags = SortedTagMap( "name" -> "test", "i" -> i.toString, "u" -> UUID.randomUUID().toString ) val value = i match { case 0 => Double.NaN case 1 => Double.MinValue case 2 => Double.MaxValue case 3 => Double.MinPositiveValue case 4 => Double.NegativeInfinity case 5 => Double.PositiveInfinity case _ => Random.nextDouble() } Datapoint(tags, timestamp, value).toTuple } } test("encode and decode empty batch") { val input = datapoints(0) val encoded = PublishPayloads.encodeBatch(Map.empty, input) val decoded = PublishPayloads.decodeBatch(encoded) assertEquals(decoded, input) } test("encode and decode batch") { val input = datapoints(10) val encoded = PublishPayloads.encodeBatch(Map.empty, input) val decoded = PublishPayloads.decodeBatch(encoded) assert(decoded.head.value.isNaN) assertEquals(decoded.head.tags, input.head.tags) assertEquals(decoded.tail, input.tail) } test("encode and decode empty compact batch") { val input = datapoints(0) val encoded = PublishPayloads.encodeCompactBatch(input) val decoded = PublishPayloads.decodeCompactBatch(encoded) assertEquals(decoded, input) } test("encode and decode compact batch") { val input = datapoints(10) val encoded = PublishPayloads.encodeCompactBatch(input) val decoded = PublishPayloads.decodeCompactBatch(encoded) assert(decoded.head.value.isNaN) assertEquals(decoded.head.tags, input.head.tags) assertEquals(decoded.tail, input.tail) } test("encode and decode empty list") { val input = datapoints(0) val encoded = PublishPayloads.encodeList(input) val decoded = PublishPayloads.decodeList(encoded) assertEquals(decoded, input) } test("encode and decode list") { val input = datapoints(10) val encoded = PublishPayloads.encodeList(input) val decoded = PublishPayloads.decodeList(encoded) assert(decoded.head.value.isNaN) assertEquals(decoded.head.tags, input.head.tags) assertEquals(decoded.tail, input.tail) } }
brharrington/atlas
atlas-webapi/src/test/scala/com/netflix/atlas/webapi/PublishPayloadsSuite.scala
Scala
apache-2.0
3,184
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.stream.table.stringexpr import org.apache.flink.api.scala._ import org.apache.flink.table.api._ import org.apache.flink.table.api.bridge.scala._ import org.apache.flink.table.api.{Session, Slide, Tumble} import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.WeightedAvg import org.apache.flink.table.planner.utils.{CountAggFunction, TableTestBase} import org.junit.Test class GroupWindowStringExpressionTest extends TableTestBase { @Test def testRowTimeSlide(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, String)]( "T1", 'int, 'long, 'string, 'rowtime.rowtime) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Slide over 4.hours every 2.hours on 'rowtime as 'w) .groupBy('w, 'string) .select( 'string, myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start, 'w.end) // String / Java API val resJava = t .window(Slide.over("4.hours").every("2.hours").on("rowtime").as("w")) .groupBy("w, string") .select( "string, " + "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w)," + "end(w)") verifyTableEquals(resJava, resScala) } @Test def testRowTimeTumble(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, Long, String)]( "T1", 'int, 'long, 'rowtime.rowtime, 'string) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Tumble over 4.hours on 'rowtime as 'w) .groupBy('w, 'string) .select( 'string, myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start, 'w.end) // String / Java API val resJava = t .window(Tumble.over("4.hours").on("rowtime").as("w")) .groupBy("w, string") .select( "string, " + "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w)," + "end(w)") verifyTableEquals(resJava, resScala) } @Test def testRowTimeSession(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, String)]( "T1", 'int, 'long, 'string, 'rowtime.rowtime) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Session withGap 4.hours on 'rowtime as 'w) .groupBy('w, 'string) .select( 'string, myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start) // String / Java API val resJava = t .window(Session.withGap("4.hours").on("rowtime").as("w")) .groupBy("w, string") .select( "string, " + "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w)" ) verifyTableEquals(resJava, resScala) } @Test def testProcTimeSlide(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, String)]( "T1", 'int, 'long, 'string, 'proctime.proctime) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Slide over 4.hours every 2.hours on 'proctime as 'w) .groupBy('w) .select( myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start, 'w.end) // String / Java API val resJava = t .window(Slide.over("4.hours").every("2.hours").on("proctime").as("w")) .groupBy("w") .select( "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w)," + "end(w)") verifyTableEquals(resJava, resScala) } @Test def testProcTimeTumble(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, String)]( "T1", 'int, 'long,'string, 'proctime.proctime) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Tumble over 4.hours on 'proctime as 'w) .groupBy('w) .select( myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start, 'w.end) // String / Java API val resJava = t .window(Tumble.over("4.hours").on("proctime").as("w")) .groupBy("w") .select( "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w)," + "end(w)") verifyTableEquals(resJava, resScala) } @Test def testProcTimeSession(): Unit = { val util = streamTestUtil() val t = util.addDataStream[(Int, Long, String)]( "T1", 'int, 'long, 'string, 'proctime.proctime) val myCountFun = new CountAggFunction util.addFunction("myCountFun", myCountFun) val weightAvgFun = new WeightedAvg util.addFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .window(Session withGap 4.hours on 'proctime as 'w) .groupBy('w) .select( myCountFun('string), 'int.sum, weightAvgFun('long, 'int), weightAvgFun('int, 'int) * 2, 'w.start, 'w.end) // String / Java API val resJava = t .window(Session.withGap("4.hours").on("proctime").as("w")) .groupBy("w") .select( "myCountFun(string), " + "int.sum, " + "weightAvgFun(long, int), " + "weightAvgFun(int, int) * 2, " + "start(w), " + "end(w)" ) verifyTableEquals(resJava, resScala) } }
GJL/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/stringexpr/GroupWindowStringExpressionTest.scala
Scala
apache-2.0
7,644
/* * Copyright 2015 Textocat * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.textocat.textokit.phrrecog.util import com.textocat.textokit.morph.fs.Wordform import com.textocat.textokit.phrrecog.cas.{Phrase, VerbPhrase} import org.apache.uima.cas.text.AnnotationFS import org.apache.uima.jcas.JCas import org.apache.uima.jcas.cas.FSArray import scala.collection.Map class VPAnnotationStringParserFactory extends PhraseStringParsersFactory { override def createParser(jCas: JCas, tokens: Array[AnnotationFS]) = new VPAnnotationStringParser(jCas, tokens) } class VPAnnotationStringParser( protected val jCas: JCas, protected val tokens: Array[AnnotationFS]) extends PhraseStringParsers { protected override def createAnnotation( prefixedWordformsMap: Map[String, Seq[Wordform]], depPhrases: Seq[Phrase]): VerbPhrase = { val unprefixedWfs = prefixedWordformsMap.get(null) match { case Some(list) => list case None => throw new IllegalStateException( "No head in %s".format(prefixedWordformsMap)) } if (prefixedWordformsMap.size > 1) throw new IllegalStateException( "Unknown prefixes in %s".format(prefixedWordformsMap)) val headWf = unprefixedWfs.head val dependentWfAnnos = unprefixedWfs.tail val dependentsFsArray = new FSArray(jCas, dependentWfAnnos.size) var fsArrayIndex = 0 for (dwAnno <- dependentWfAnnos) { dependentsFsArray.set(fsArrayIndex, dwAnno) fsArrayIndex += 1 } val phrase = new VerbPhrase(jCas) phrase.setBegin(headWf.getWord.getBegin) phrase.setEnd(headWf.getWord.getEnd) phrase.setHead(headWf) phrase.setDependentWords(dependentsFsArray) phrase } }
Denis220795/Textokit
Textokit.PhraseRecognizer/src/main/scala/com/textocat/textokit/phrrecog/util/VPAnnotationStringParser.scala
Scala
apache-2.0
2,392
package ch.fram.medlineGeo.crunching import ch.fram.medlineGeo.models._ /** * Created by Alexandre Massselot on 11/09/15. */ case class LocalizedAffiliationPubmedIds(affiliationHook: String, pubmedIds: List[Long], citationCount: Int, location: Option[Location], locResolverSolution: Option[String], locResolverTried: List[String]) { /** * builds a new LocalizedAffiliationPubmedIds, setting a given location + resolver name * @param newLocation * @param resolver * @return */ def resolveSuccess(newLocation: Location, resolver: String): LocalizedAffiliationPubmedIds = new LocalizedAffiliationPubmedIds( affiliationHook, pubmedIds, citationCount, Some(newLocation), Some(resolver), locResolverTried ) /** * builds a new LocalizedAffiliationPubmedIds, just add the resolver name to the set of tried resolvers * @param resolver * @return */ def resolveFailure(resolver: String): LocalizedAffiliationPubmedIds = new LocalizedAffiliationPubmedIds( affiliationHook, pubmedIds, citationCount, location, None, (locResolverTried :+ resolver).distinct ) } object LocalizedAffiliationPubmedIds { /** * create a default LocalizedAffiliationPubmedIds. * @param affiliationHook * @param pubmedIds * @return */ def create(affiliationHook: String, pubmedIds: List[Long]): LocalizedAffiliationPubmedIds = LocalizedAffiliationPubmedIds( affiliationHook, pubmedIds, pubmedIds.size, None, None, Nil ) }
alexmasselot/medlineGeoBackend
app/ch/fram/medlineGeo/crunching/LocalizedAffiliationPubmedIds.scala
Scala
mit
1,795
// Copyright (c) 2013, Johns Hopkins University. All rights reserved. // This software is released under the 2-clause BSD license. // See /LICENSE.txt // Travis Wolfe, twolfe18@gmail.com, 30 July 2013 package edu.jhu.hlt.parma.features.generic // TODO return to this // how might you represent more than one features in one? // val allWN = wordNetSyn + wordNetHyp + ... // ... word >>= allWN >>= max // basically i'm trying to get non-determinism out of my features // i am defining a lattice and saying, "find all paths, make a feature for each!" // i can define a type which holds N Pipes // when in binds (to the right) with another holder with M Pipes, // it produces another holder with N*M Pipes! case class SuperPipe[D,T](val name: String, val pipes: Seq[Pipe[D,T]]) { def this(name: String, p: Pipe[D,T]) = this(name, Seq(p)) def simpleBind[R](sp: SuperPipe[T, R]): SuperPipe[D,R] = { val newName = name + "-" + sp.name val newPipes = pipes.flatMap(p => sp.pipes.map(pp => p.bind(pp))) SuperPipe(newName, newPipes) } } object SuperPipeImplicits { implicit def p2sp[D,R](p: Pipe[D,R]): SuperPipe[D,R] = SuperPipe("super:"+p.name, Seq(p)) implicit def |[D,R](p1: Pipe[D,R], p2: Pipe[D,R]) = SuperPipe("or", Seq(p1, p2)) }
hltcoe/parma
src/main/scala/edu/jhu/hlt/parma/features/generic/SuperPipe.scala
Scala
bsd-2-clause
1,249
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.plan.rules.logical import org.apache.flink.api.scala._ import org.apache.flink.table.plan.optimize.program.FlinkStreamProgram import org.apache.flink.table.util.TableTestBase import org.junit.{Before, Test} /** * Test for [[RankNumberColumnRemoveRule]]. */ class RankNumberColumnRemoveRuleTest extends TableTestBase { private val util = streamTestUtil() @Before def setup(): Unit = { util.buildStreamProgram(FlinkStreamProgram.PHYSICAL) util.addDataStream[(Int, String, Long)]("MyTable", 'a, 'b, 'c, 'rowtime) } @Test def testCannotRemoveRankNumberColumn1(): Unit = { val sql = """ |SELECT a, rank_num FROM ( | SELECT *, | RANK() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num | FROM MyTable) |WHERE rank_num >= 1 AND rank_num < 2 """.stripMargin util.verifyPlan(sql) } @Test def testCannotRemoveRankNumberColumn2(): Unit = { val sql = """ |SELECT a, rank_num FROM ( | SELECT *, | ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num | FROM MyTable) |WHERE rank_num >= 1 AND rank_num < 3 """.stripMargin util.verifyPlan(sql) } @Test def testCannotRemoveRankNumberColumn3(): Unit = { // the Rank does not output rank number, so this rule will not be matched val sql = """ |SELECT a FROM ( | SELECT *, | ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num | FROM MyTable) |WHERE rank_num >= 1 AND rank_num < 2 """.stripMargin util.verifyPlan(sql) } @Test def testCouldRemoveRankNumberColumn(): Unit = { val sql = """ |SELECT a, rank_num FROM ( | SELECT *, | ROW_NUMBER() OVER (PARTITION BY a ORDER BY rowtime DESC) as rank_num | FROM MyTable) |WHERE rank_num >= 1 AND rank_num < 2 """.stripMargin util.verifyPlan(sql) } }
shaoxuan-wang/flink
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/plan/rules/logical/RankNumberColumnRemoveRuleTest.scala
Scala
apache-2.0
2,829
package com.twitter.server.lint import com.twitter.finagle.Stack import com.twitter.finagle.client.StackClient import com.twitter.finagle.param.{Label, Stats} import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver} import com.twitter.finagle.util.StackRegistry import org.scalatest.funsuite.AnyFunSuite class NullStatsReceiversRuleTest extends AnyFunSuite { def newRegistry(name: String): StackRegistry = new StackRegistry { def registryName: String = name } test("Empty registry doesn't create issues") { val registry = newRegistry("client") val rule = NullStatsReceiversRule(registry) assert(rule().size == 0) } test("Client with StatsReceiver doesn't create issues") { val registry = newRegistry("client") val params = Stack.Params.empty + Stats(new InMemoryStatsReceiver) registry.register("localhost:1234", StackClient.newStack, params) val rule = NullStatsReceiversRule(registry) assert(rule().size == 0) } test("Client with NullStatsReceiver does create issues") { val registry = newRegistry("client") val params = Stack.Params.empty + Stats(NullStatsReceiver) registry.register("localhost:1234", StackClient.newStack, params) val rule = NullStatsReceiversRule(registry) assert(rule().size == 1) } test("Server with StatsReceiver doesn't create issues") { val registry = newRegistry("server") val params = Stack.Params.empty + Stats(new InMemoryStatsReceiver) registry.register("localhost:8080", StackClient.newStack, params) val rule = NullStatsReceiversRule(registry) assert(rule().size == 0) } test("Server with NullStatsReceiver does create issues") { val registry = newRegistry("server") val params = Stack.Params.empty + Stats(NullStatsReceiver) registry.register("localhost:8080", StackClient.newStack, params) val rule = NullStatsReceiversRule(registry) assert(rule().size == 1) } test("Admin Server doesn't create issues") { val registry = newRegistry("server") val params = Stack.Params.empty + Stats(NullStatsReceiver) + Label("adminhttp") registry.register("localhost:8080", StackClient.newStack, params) val rule = NullStatsReceiversRule(registry) assert(rule().size == 0) } }
twitter/twitter-server
server/src/test/scala/com/twitter/server/lint/NullStatsReceiversRuleTest.scala
Scala
apache-2.0
2,275
package scala.models import com.bryzek.apidoc.generator.v0.models.{ File, InvocationForm } import com.bryzek.apidoc.spec.v0.models.Attribute import lib.Text._ import lib.generator.CodeGenerator import scala.generator.{ ScalaEnums, ScalaCaseClasses, ScalaService, ScalaResource, ScalaOperation, ScalaUtil } import generator.ServiceFileNames import play.api.libs.json.JsString object PlayService extends PlayService trait PlayService extends CodeGenerator { import CaseClassUtil._ import KafkaUtil._ override def invoke( form: InvocationForm ): Either[Seq[String], Seq[File]] = { Right(generateCode(form)) } def generateCode( form: InvocationForm, addHeader: Boolean = true ): Seq[File] = { val ssd = ScalaService(form.service) val prefix = underscoreAndDashToInitCap(ssd.name) val enumJson: String = ssd.enums.map { ScalaEnums(ssd, _).buildJson() }.mkString("\\n\\n") val play2Json = Play2JsonExtended(ssd).generate() val header = addHeader match { case false ⇒ "" case true ⇒ ApidocComments(form.service.version, form.userAgent).toJavaString() + "\\n" } val kafkaModels = getKafkaModels(ssd) ssd.resources.map { resource: ScalaResource ⇒ val resourceName = resource.plural val serviceName = resource.plural + "Service" // Find KafkaProducer that contians the model for $resourceName val kafkaProducers = kafkaModels.flatMap(_.model.attributes.map(attr ⇒ { (attr.value \\ "data_type").as[JsString].value })) val resourceBodies = resource.operations.flatMap(_.body.map(_.body.`type`)) val producerMap = kafkaProducers.intersect(resourceBodies).map(t => t -> ScalaUtil.toClassName(t)).toMap val producers = producerMap.values.map(p ⇒ s"val kafka${p}Producer = new Kafka${p}Producer(config)").mkString("\\n") val resourceFunctions = resource.operations.map { operation: ScalaOperation ⇒ val method = operation.method.toString.toLowerCase val parameters = operation.parameters val resultType = operation.resultType val bodyType = operation.body.map(_.name).getOrElse(resultType) val firstParamName = parameters.map(_.name).headOption.getOrElse("") val dataArg = operation.body.map(b => s"""data: ${b.name}""") val additionalArgs = Seq(Some("request: Request[T]"), dataArg).flatten val argList = ScalaUtil.fieldsToArgList(additionalArgs ++ (parameters.map(_.definition()))).mkString(", ") val argNameList = (Seq("request.body", "request") ++ operation.parameters.map(_.name)).mkString(", ") val producerName = operation.body.map(_.body.`type`) .map(_.replaceAll("[\\\\[\\\\]]", "")) .map(clazz => s"kafka${ScalaUtil.toClassName(clazz)}Producer") .getOrElse("???") val bodyScala = method.toLowerCase match { case "post" | "put" => s"""${producerName}.send(data, ${firstParamName})""" case "get" => // Create a default Case Class ssd.models.filter(_.qualifiedName == operation.resultType).headOption match { case Some(model) => val caseClass = generateInstance(model, 1, ssd) s"Try { ${caseClass.indent(6)} }" case None => "Try { Unit }" } case _ => "???" } s""" def ${method}[T](${argList}): Future[Try[${bodyType}]] = { Future { ${bodyScala} } }""" }.mkString("\\n") val source = s"""$header package services import javax.inject.Inject import com.typesafe.config.Config import play.api.mvc.Request import scala.concurrent.Future import scala.util.Try class ${serviceName} @Inject() (config: Config) { import ${ssd.namespaces.models}._ import ${ssd.namespaces.base}.kafka._ import play.api.libs.concurrent.Execution.Implicits.defaultContext ${producers} ${resourceFunctions.indent(2)} } """ File(serviceName + ".scala", Some("services"), source) } } }
movio/apidoc-generator
scala-generator/src/main/scala/models/PlayService.scala
Scala
mit
4,032
package org.apache.spark.ml.dsl.utils import java.util import com.tribbloids.spookystuff.utils.CachingUtils import com.tribbloids.spookystuff.utils.CachingUtils.ConcurrentCache import org.apache.spark.ml.dsl.utils.messaging.MessageAPI import org.json4s.Extraction._ import org.json4s.JsonAST.JString import org.json4s._ import org.json4s.reflect.{TypeInfo, _} object XMLWeakDeserializer { case class ExceptionMetadata( jValue: Option[JValue] = None, typeInfo: Option[String] = None, serDe: SerDeMetadata ) extends MessageAPI case class SerDeMetadata( reporting: Option[String] = None, primitives: Seq[String] = Nil, field: Map[String, String] = Map.empty, custom: Seq[String] = Nil ) val cached: ConcurrentCache[Long, ParsingException] = CachingUtils.ConcurrentCache[Long, ParsingException]() trait ExceptionLike extends Throwable with Verbose { // def metadata: ExceptionMetadata override def getMessage: String = detailedStr } class ParsingException( override val shortStr: String, cause: Exception, val metadata: ExceptionMetadata ) extends MappingException(shortStr, cause) with ExceptionLike { { cached.put(System.currentTimeMillis(), this) } override def detail: String = s""" |"METADATA": ${metadata.toJSON()} |""".trim.stripMargin } case class UnrecoverableError( override val shortStr: String, cause: Throwable // override val metadata: ExceptionMetadata ) extends Error with ExceptionLike { override def detail: String = s""" |### [RECENT XML EXCEPTIONS] ### | |${cached.toSeq.sortBy(_._1).map(_._2).mkString("\\n")} |""".stripMargin } } abstract class XMLWeakDeserializer[T: Manifest] extends Serializer[T] { import XMLWeakDeserializer._ // cannot serialize override def serialize(implicit format: Formats): PartialFunction[Any, JValue] = PartialFunction.empty def exceptionMetadata( jValue: JValue, typeInfo: TypeInfo, formats: Formats ): ExceptionMetadata = ExceptionMetadata( Some(jValue), Some(typeInfo.toString), SerDeMetadata( Some(this.getClass.getName), formats.primitives.toSeq.map(_.toString), Map(formats.fieldSerializers.map(v => v._1.getName -> v._2.toString): _*), formats.customSerializers.map(_.toString) ) ) def wrapException[A](ti: TypeInfo, jv: JValue, format: Formats)(fn: => A): A = { lazy val metadata = exceptionMetadata(jv, ti, format) try { fn } catch { case e: MappingException => throw new ParsingException( e.getMessage, e, metadata ) case e: Exception => // throw new ParsingException( // e.getMessage, // e, // metadata // ) throw e case e: Throwable => throw UnrecoverableError( e.getClass.getSimpleName, e ) } } override final def deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), T] = { val result: ((TypeInfo, JValue)) => Option[T] = { case (ti, jv) => wrapException(ti, jv, format) { _deserialize(format).lift.apply(ti -> jv) } } Function.unlift(result) } def _deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), T] } // <tag>12</tag> => tag: 12 object StringToNumberDeserializer extends XMLWeakDeserializer[Any] { override def _deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), Any] = Function.unlift { case (ti @ TypeInfo(cc, _), JString(v)) => val parsed = cc match { case java.lang.Byte.TYPE => v.toByte case java.lang.Short.TYPE => v.toShort case java.lang.Character.TYPE => v.toInt.toChar case java.lang.Integer.TYPE => v.toInt case java.lang.Long.TYPE => v.toLong case java.lang.Float.TYPE => v.toFloat case java.lang.Double.TYPE => v.toDouble case java.lang.Boolean.TYPE => v.toBoolean case _ => null //TODO: add boxed type, or use try/errorToNone } Option(parsed) case _ => None } } // <tag/> => tag: {} object EmptyStringToEmptyObjectDeserializer extends XMLWeakDeserializer[Any] { override def _deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), Any] = Function.unlift { case (ti @ TypeInfo(cc, _), jv @ JString(str)) if !cc.isAssignableFrom(classOf[String]) && str.trim.isEmpty => // wrapException(ti, jv, format) { Some(extract(JObject(), ti)(format)) // } case _ => None } } // <tag>12</tag> => tag: [12] // <tag>abc</tag> => tag: ["abc"] object ElementToArrayDeserializer extends XMLWeakDeserializer[Any] { val listClass: Class[List[_]] = classOf[List[_]] val seqClass: Class[Seq[_]] = classOf[Seq[_]] val setClass: Class[Set[_]] = classOf[Set[_]] val arrayListClass: Class[util.ArrayList[_]] = classOf[java.util.ArrayList[_]] override def _deserialize(implicit format: Formats): PartialFunction[(TypeInfo, JValue), Any] = { case (ti @ TypeInfo(this.listClass | this.seqClass, _), jv) if !jv.isInstanceOf[JArray] => extractInner(ti, jv, format).toList case (ti @ TypeInfo(this.setClass, _), jv) if !jv.isInstanceOf[JArray] => extractInner(ti, jv, format).toSet case (ti @ TypeInfo(this.arrayListClass, _), jv) if !jv.isInstanceOf[JArray] => import scala.collection.JavaConverters._ new java.util.ArrayList[Any](extractInner(ti, jv, format).toList.asJava) case (ti @ TypeInfo(cc, _), jv) if !jv.isInstanceOf[JArray] && cc.isArray => val a = extractInner(ti, jv, format).toArray mkTypedArray(a, firstTypeArg(ti)) } def mkTypedArray(a: Array[_], typeArg: ScalaType): AnyRef = { import java.lang.reflect.Array.{newInstance => newArray} a.foldLeft((newArray(typeArg.erasure, a.length), 0)) { (tuple, e) => { java.lang.reflect.Array.set(tuple._1, tuple._2, e) (tuple._1, tuple._2 + 1) } } ._1 } def extractInner(ti: TypeInfo, jv: JValue, format: Formats): Option[Any] = { // wrapException(ti, jv, format) { val result = jv match { case JNothing => None case _ => Some(extract(jv, firstTypeArg(ti))(format)) } result // } } def firstTypeArg(ti: TypeInfo): ScalaType = { val tpe: ScalaType = ScalaType.apply(ti) val firstTypeArg = tpe.typeArgs.head firstTypeArg } }
tribbloid/spookystuff
mldsl/src/main/scala/org/apache/spark/ml/dsl/utils/XMLWeakDeserializer.scala
Scala
apache-2.0
6,698
def f[P] { trait P println(/* resolved: false */ P.getClass) println(classOf[/* offset: 19 */ P]) }
ilinum/intellij-scala
testdata/resolve2/element/mix/FunctionTypeParameterAndTrait.scala
Scala
apache-2.0
106
/* Copyright (c) 2015, Robby, Kansas State University All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.sireum.option import org.sireum.option.annotation._ import org.sireum.util._ import scala.beans.BeanProperty @Mode( name = "pilar", header = """ |Pilar: Sireum's Intermediate Representation (IR) """, description = "Pilar tooling" ) final case class PilarOption(@BeanProperty var parser: PilarParserOption = PilarParserOption()) { def this() = this(PilarParserOption()) } @Main( name = "parser", header = """ |Pilar Parser |... and pretty printer and JSON de/serializer """, description = "Pilar parser", handler = "org.sireum.pilar.parser.Parser" ) final case class PilarParserOption(@BeanProperty @Opt(shortKey = Some("in"), description = "Use standard input stream") var standardInput: Boolean = false, @BeanProperty @Opt(shortKey = Some("f"), description = "Output file\\n(if unspecified, use standard output stream)") var outputFile: OptionBean[String] = none(), @BeanProperty @EnumOpt(shortKey = Some("i"), elements = Seq("auto", "pilar", "json", "scala"), description = "Input mode") var inputMode: PilarParserOption.InputMode.Type = PilarParserOption.InputMode.Auto, @BeanProperty @EnumOpt(shortKey = Some("o"), elements = Seq("pilar", "json", "scala"), description = "Output mode") var outputMode: PilarParserOption.OutputMode.Type = PilarParserOption.InputMode.JSON, @BeanProperty @Opt(shortKey = Some("a"), description = "Use ANTLR4 Pilar parser instead of hand-written one") var antlr4: Boolean = false, @BeanProperty @Opt(shortKey = Some("e"), description = "Maximum number of errors found before parsing stop") var maxErrors: Int = 10, @BeanProperty @Arg(name = "file") var inputs: Array[String] = Array()) { def this() = this(false, none(), PilarParserOption.InputMode.Auto, PilarParserOption.OutputMode.JSON, false, 10, Array()) } object PilarParserOption { object InputMode extends Enum("") { type Type = Value val Auto = Value("auto") val Pilar = Value("pilar") val JSON = Value("json") val Scala = Value("scala") } object OutputMode extends Enum("") { type Type = Value val Pilar = Value("pilar") val JSON = Value("json") val Scala = Value("scala") } }
sireum/v3
cli/jvm/src/main/scala/org/sireum/option/PilarOption.scala
Scala
bsd-2-clause
4,245
import java.io.File import java.util.zip.ZipFile import scala.collection.JavaConversions._ import scala.io.Source import scala.xml.XML class Solver(graph: File, archive: File) { private val guessGdf = { val zip = new ZipFile(archive) val messages = zip.entries.filter(_.getName.endsWith("messages.htm")).map { file => val html = XML.load(zip.getInputStream(file)) val users = html \\\\ "span" filter { _ \\ "@class" exists (_.text == "user") } map(_.text) users.groupBy(u => u).mapValues(_.size) }.next() val total = messages.map(_._2).sum.toFloat val (nodeLines, edgeLines) = Source.fromFile(graph)("UTF-8").getLines().span(line => !line.startsWith("edgedef")) val nodes = nodeLines.toList.tail.map { line => val data = line.split(",") val messageCount = messages.get(data(1)) match { case Some(count) => count case None => 0 } new Node(data(1), data(0), messageCount / total) } val edges = edgeLines.toList.tail.map { line => val data = line.split(",") new Edge(data(0), data(1)) } new GuessGdf(nodes, edges) } def save(file: File) = guessGdf.save(file) }
dragostis/WeightedFacebook
src/main/scala/Solver.scala
Scala
gpl-2.0
1,183
import akka.actor._ object Guess1 { def main(args: Array[String]) { val system = ActorSystem("Guess1") val server = ActorDSL.actor(system)(new GuessServer1) val client = ActorDSL.actor(system)(new GuessClient1(server)) client ! 'startGuessing } } class GuessClient1(server: ActorRef) extends Actor { println("GuessClient1: starting") private[this] var n: Int = 5 def receive = { case 'startGuessing => send(n) case 'correct => println(s"GuessClient1: solution found: $n") context.system.shutdown() case 'tooSmall => println("GuessClient1: too small") n = n + 1 send(n) case 'tooBig => println("GuessClient1: too big") n = n - 1 send(n) } def send(msg: Int) { println(s"GuessClient1: sending $msg") server ! msg } } class GuessServer1 extends Actor { val n = scala.util.Random.nextInt(10) println("GuessServer1: starting") def receive = { case x:Int if x > n => send(sender, 'tooBig) case x:Int if x < n => send(sender, 'tooSmall) case x:Int => send(sender, 'correct) context.stop(self) } def send(sender: ActorRef, msg: Symbol) { println(s"GuessServer1: sending $msg") sender ! msg } }
grzegorzbalcerek/scala-book-examples
examples/Guess1.scala
Scala
mit
1,254
package japgolly.univeq import cats.data.* import cats.kernel.* import UnivEq.* trait UnivEqCats: inline implicit def catsEqFromUnivEq[A](using inline ev: UnivEq[A]): Eq[A] = Eq.fromUniversalEquals inline implicit def univEqCatsIor [A, B](using inline a: UnivEq[A], inline b: UnivEq[B]): UnivEq[A Ior B ] = force inline implicit def univEqCatsChain[A] (using inline a: UnivEq[A]) : UnivEq[Chain[A] ] = force inline implicit def univEqCatsNec [A] (using inline a: UnivEq[A]) : UnivEq[NonEmptyChain[A]] = force inline implicit def univEqCatsNel [A] (using inline a: UnivEq[A]) : UnivEq[NonEmptyList[A] ] = derive // NonEmptyVector doesn't implement inline implicit def univEqCatsOneAnd[F[_], A](using inline fa: UnivEq[F[A]], inline a: UnivEq[A]): UnivEq[OneAnd[F, A]] = derive inline implicit def catsMonoidSet[A](using inline ev: UnivEq[A]): Monoid[Set[A]] = _catsMonoidSet private[univeq] def _catsMonoidSet[A]: Monoid[Set[A]] = new Monoid[Set[A]]: override def empty = Set.empty override def combine(a: Set[A], b: Set[A]) = a | b object UnivEqCats extends UnivEqCats
japgolly/univeq
univeq-cats/shared/src/main/scala-3/japgolly/univeq/UnivEqCats.scala
Scala
apache-2.0
1,204
package chrome.windows.bindings import chrome.events.bindings.Event import chrome.tabs.bindings.Tab import scala.scalajs.js import scala.scalajs.js.annotation.JSGlobal @js.native trait GetOptions extends js.Object { val populate: js.UndefOr[Boolean] = js.native } object GetOptions { def apply(populate: js.UndefOr[Boolean] = js.undefined): GetOptions = { js.Dynamic .literal( populate = populate ) .asInstanceOf[GetOptions] } } @js.native trait CreateOptions extends js.Object { val url: js.UndefOr[js.Any] = js.native val tabId: js.UndefOr[Tab.Id] = js.native val left: js.UndefOr[Int] = js.native val top: js.UndefOr[Int] = js.native val width: js.UndefOr[Int] = js.native val height: js.UndefOr[Int] = js.native val focused: js.UndefOr[Boolean] = js.native val incognito: js.UndefOr[Boolean] = js.native val `type`: js.UndefOr[Window.CreateType] = js.native val state: js.UndefOr[Window.State] = js.native } object CreateOptions { def apply(url: js.Array[String] = js.Array(), tabId: js.UndefOr[Tab.Id] = js.undefined, left: js.UndefOr[Int] = js.undefined, top: js.UndefOr[Int] = js.undefined, width: js.UndefOr[Int] = js.undefined, height: js.UndefOr[Int] = js.undefined, focused: js.UndefOr[Boolean] = js.undefined, incognito: js.UndefOr[Boolean] = js.undefined, `type`: js.UndefOr[Window.CreateType] = js.undefined, state: js.UndefOr[Window.State] = js.undefined): CreateOptions = { js.Dynamic .literal( url = url.length match { case 0 => js.undefined case 1 => url(0) case _ => url }, tabId = tabId, left = left, top = top, width = width, height = height, focused = focused, incognito = incognito, `type` = `type`, state = state ) .asInstanceOf[CreateOptions] } } @js.native trait UpdateOptions extends js.Object { val left: js.UndefOr[Int] = js.native val top: js.UndefOr[Int] = js.native val width: js.UndefOr[Int] = js.native val height: js.UndefOr[Int] = js.native val focused: js.UndefOr[Boolean] = js.native val drawAttention: js.UndefOr[Boolean] = js.native val state: js.UndefOr[Window.State] = js.native } object UpdateOptions { def apply(left: js.UndefOr[Int] = js.undefined, top: js.UndefOr[Int] = js.undefined, width: js.UndefOr[Int] = js.undefined, height: js.UndefOr[Int] = js.undefined, focused: js.UndefOr[Boolean] = js.undefined, drawAttention: js.UndefOr[Boolean] = js.undefined, state: js.UndefOr[Window.State] = js.undefined): UpdateOptions = { js.Dynamic .literal( left = left, top = top, width = width, height = height, focused = focused, drawAttention = drawAttention, state = state ) .asInstanceOf[UpdateOptions] } } @js.native @JSGlobal("chrome.windows") object Windows extends js.Object { val WINDOW_ID_NONE: Window.Id = js.native val WINDOW_ID_CURRENT: Window.Id = js.native val onCreated: Event[js.Function1[Window, _]] = js.native val onRemoved: Event[js.Function1[Window.Id, _]] = js.native val onFocusChanged: Event[js.Function1[Window.Id, _]] = js.native def get(windowId: Window.Id, getInfo: js.UndefOr[GetOptions] = js.undefined, callback: js.Function1[Window, _]): Unit = js.native def getCurrent(getInfo: js.UndefOr[GetOptions] = js.undefined, callback: js.Function1[Window, _]): Unit = js.native def getLastFocused(getInfo: js.UndefOr[GetOptions] = js.undefined, callback: js.Function1[Window, _]): Unit = js.native def getAll(getInfo: js.UndefOr[GetOptions] = js.undefined, callback: js.Function1[js.Array[Window], _]): Unit = js.native def create(createData: js.UndefOr[CreateOptions], callback: js.UndefOr[js.Function1[js.UndefOr[Window], _]] = js.undefined): Unit = js.native def update( windowId: Window.Id, updateInfo: UpdateOptions, callback: js.UndefOr[js.Function1[Window, _]] = js.undefined): Unit = js.native def remove(windowId: Window.Id, callback: js.UndefOr[js.Function0[_]] = js.undefined): Unit = js.native }
lucidd/scala-js-chrome
bindings/src/main/scala/chrome/windows/bindings/Windows.scala
Scala
mit
4,469
package org.dele.text.maen.matchers import org.dele.text.maen.matchers.MatcherManager._ import org.dele.text.maen.{AtomSeqMatch, TMatchResultPool} import org.dele.text.maen.matchers.TMatcher.MId import org.dele.text.maen.{AtomSeqMatch, TMatchResultPool} import scala.collection.mutable.ListBuffer /** * Created by jiaji on 2016-04-25. */ import StoppedByMatcherManager._ class StoppedByMatcherManager(stoppedByMapping:Iterable[(MId,List[StoppedByConfig])], val contextChecker: ContextChecker = AlwaysPassChecker) { private val _stoppedByMapping:Map[MId, List[StoppedByConfig]] = stoppedByMapping.groupBy(_._1).map(p => p._1 -> p._2.flatMap(_._2).toList) private def _getStopByLists(mid:MId):List[StoppedByConfig] = if (_stoppedByMapping.contains(mid)) _stoppedByMapping.get(mid).get else EmptyStoppedByList //def getStopByLists(listName:String):java.util.List[StoppedBy] = _getStopByLists(listName).asJava private val _reverseStopByMap:Map[MId,(Boolean, List[MId])] = { import scala.collection.mutable.{Map => MutMap} val mm:MutMap[String,(Boolean,ListBuffer[String])] = MutMap() _stoppedByMapping.foreach( (kvp) => { val listName:String = kvp._1 val stoppedByLists = kvp._2 stoppedByLists.foreach(sl => { if (!mm.contains(sl.mid)) mm.put(sl.mid, sl.overlap -> ListBuffer()) mm(sl.mid)._2 += listName }) } ) mm.map(kvp => (kvp._1, kvp._2._1 -> kvp._2._2.toList.sorted)).toMap } def getListsStopBy(mid: MId):Option[(Boolean, List[MId])] = _reverseStopByMap.get(mid) //def getListsStopBy(mid: MId):List[MId] = _getListsStopBy(mid) import scala.util.control.Breaks._ import scala.collection.mutable def filter(mid:MId, currMatches:Set[AtomSeqMatch], resultPool:TMatchResultPool):Set[AtomSeqMatch] = { val stoppedByConfigs = _getStopByLists(mid) if (stoppedByConfigs.isEmpty) currMatches else { var toRemove:mutable.Set[AtomSeqMatch] = mutable.Set() //r ++= currMatches breakable { stoppedByConfigs.foreach( cfg => { val ms = resultPool.query(cfg.mid) if (ms.nonEmpty) { // remove all, !overlap means as long as there's stop-by-match in the sentence, the matches will be filtered if (!cfg.overlap) { toRemove ++= currMatches resultPool.handleStoppedMatch(cfg.mid, toRemove.toSet) break } else { val toStop = currMatches.filter(cm => ms.exists(checkMatchIntersect(cm, _))) toRemove ++= toStop if (toStop.nonEmpty) resultPool.handleStoppedMatch(cfg.mid, toStop.toSet) } } } ) } currMatches -- toRemove } } def updateMatches(mid:MId, currMatches:Set[AtomSeqMatch], resultPool:TMatchResultPool):List[MId] = { val stoppedList = getListsStopBy(mid) if (stoppedList.isEmpty) List() else { val overlap = stoppedList.get._1 val lists = stoppedList.get._2 if (overlap) { val matchers2Update = ListBuffer[MId]() lists.foreach( slid => { val matches = resultPool.query(slid) val isAffected = matches.exists( m => { currMatches.exists(_.range.intersect(m.range).nonEmpty) } ) if (isAffected) matchers2Update += slid } ) matchers2Update.toList } else { val nonEmptyLists = lists.filter(resultPool.query(_).nonEmpty) nonEmptyLists } } } } object StoppedByMatcherManager { val EmptyStoppedByList = List[StoppedByConfig]() val EmptyMIdList = EmptyStoppedByList.map(_.mid) case class StoppedByConfig(mid:MId, overlap:Boolean) def checkMatchIntersect(m1:AtomSeqMatch, m2:AtomSeqMatch):Boolean = m1.range.intersect(m2.range).nonEmpty }
new2scala/text-util
maen/src/main/scala/org/dele/text/maen/matchers/StoppedByMatcherManager.scala
Scala
apache-2.0
3,945
/* * Copyright (c) 2014-2015 Christopher Rebert * Copyright (c) 2013-2015 Mozilla Foundation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ package com.chrisrebert.lmvtfy.validation import scala.util.{Try,Success} import nu.validator.messages._ import nu.validator.servlet.imagereview.ImageCollector import nu.validator.source.SourceCode import nu.validator.validation.SimpleDocumentValidator import nu.validator.xml.SystemErrErrorHandler import org.xml.sax.{SAXException, InputSource} // java -Xss512k object Html5Validator { private val schemaUrl = "http://s.validator.nu/html5-rdfalite.rnc" private val errorsOnly = true private val showSource = false System.setProperty("org.whattf.datatype.warn", errorsOnly.toString) def validationErrorsFor(inputSource: InputSource): Try[Seq[ValidationMessage]] = { (new Html5Validator(inputSource)).validationErrors } } private class Html5Validator(inputSource: InputSource) { import Html5Validator.{schemaUrl,errorsOnly,showSource} private val emitter = new StructuredObjectEmitter() lazy val validationErrors: Try[Seq[ValidationMessage]] = { validator.checkHtmlInputSource(inputSource) end().flatMap{ _ => Success(emitter.messages.filter{ msg => msg.parts match { // Exempt <img>s without alt attributes as they typically don't cause any problems besides decreasing accessibility, and most live examples lack them due to irrelevance and extra effort case Seq(PlainText("An "), CodeText("img"), PlainText(" element must have an "), CodeText("alt"), _*) => false // Ditto for <area>s without alt attributes case Seq(PlainText("Element "), CodeText("area"), PlainText(" is missing required attribute "), CodeText("alt"), PlainText(".")) => false // Exempt missing/empty <title> as it is very common in live examples but typically doesn't cause any problem case Seq(PlainText("Element "), CodeText("head"), PlainText(" is missing a required instance of child element "), CodeText("title"), PlainText(".")) => false case Seq(PlainText("Element "), CodeText("title"), PlainText(" must not be empty.")) => false // Exempt nonstandard <meta> used by jsFiddle case Seq(PlainText("Bad value "), CodeText("edit-Type"), PlainText(" for attribute "), CodeText("http-equiv"), PlainText(" on element "), CodeText("meta"), PlainText(".")) => false case Seq(PlainText("Attribute "), CodeText("edit"), PlainText(" not allowed on element "), CodeText("meta"), PlainText(" at this point.")) => false // Exempt nonstandard usage of autocomplete attribute because of Firefox bug: https://bugzilla.mozilla.org/show_bug.cgi?id=654072 case Seq(PlainText("Attribute "), CodeText("autocomplete"), PlainText(" is only allowed when the input type is "), _*) => false case Seq(PlainText("Attribute "), CodeText("autocomplete"), PlainText(" not allowed on element "), CodeText("button"), PlainText(" at this point.")) => false case _ => true } }) } } private lazy val _validator: SimpleDocumentValidator = new SimpleDocumentValidator() private lazy val sourceCode: SourceCode = _validator.getSourceCode /** * @throws SAXException, Exception * @throws SimpleDocumentValidator.SchemaReadException, StackOverflowError */ private lazy val validator: SimpleDocumentValidator = { _validator.setUpMainSchema(schemaUrl, new SystemErrErrorHandler()) val loadEntities = false val noStream = false _validator.setUpValidatorAndParsers(errorHandler, noStream, loadEntities) _validator } private lazy val errorHandler: MessageEmitterAdapter = { val lineOffset = 0 val imageCollector = new ImageCollector(sourceCode) val errHandler = new MessageEmitterAdapter(sourceCode, showSource, imageCollector, lineOffset, true, emitter) errHandler.setHtml(true) errHandler.setErrorsOnly(errorsOnly) errHandler.start(null) errHandler } private def end(): Try[Unit] = Try{ errorHandler.end("Document checking completed. No errors found.", "Document checking completed.") } }
cvrebert/lmvtfy
src/main/scala/com/chrisrebert/lmvtfy/validation/Html5Validator.scala
Scala
mit
5,185
/* * Copyright (c) 2014-2018 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.reactive.observers import java.io.{OutputStream, PrintStream} import minitest.SimpleTestSuite import monix.execution.Ack.Continue import monix.execution.schedulers.TestScheduler import monix.reactive.Observer import monix.execution.exceptions.DummyException import monix.execution.atomic.AtomicInt object DumpObserverSuite extends SimpleTestSuite { def dummyOut(count: AtomicInt = null) = { val out = new OutputStream { def write(b: Int) = () } new PrintStream(out) { override def println(x: String) = { super.println(x) if (count != null) { val c = count.incrementAndGet() if (c == 0) throw new DummyException("dummy") } } } } test("Observer.dump works") { val counter = AtomicInt(0) val out = Observer.dump[Int]("O", dummyOut(counter)) assertEquals(out.onNext(1), Continue) assertEquals(out.onNext(2), Continue) out.onComplete() out.onError(DummyException("dummy")) assertEquals(counter.get, 4) } test("Subscriber.dump works") { implicit val s = TestScheduler() val counter = AtomicInt(0) val out = Subscriber.dump[Int]("O", dummyOut(counter)) assertEquals(out.onNext(1), Continue) assertEquals(out.onNext(2), Continue) out.onComplete() out.onError(DummyException("dummy")) assertEquals(counter.get, 4) } }
Wogan/monix
monix-reactive/shared/src/test/scala/monix/reactive/observers/DumpObserverSuite.scala
Scala
apache-2.0
2,047
/** * Copyright (C) 2009-2011 the original author or authors. * See the notice.md file distributed with this work for additional * information regarding copyright ownership. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fusesource.scalate.util import _root_.org.fusesource.scalate.FunSuiteSupport import org.fusesource.scalate.util.URIs._ /** * @version $Revision: 1.1 $ */ class URIsTest extends FunSuiteSupport { test("adding query argument to no query string") { assertResult("/foo?x=1") { uri("/foo", "x=1") } } test("adding query argument to query string") { assertResult("/foo?x=1&y=2") { uri("/foo?x=1", "y=2") } } test("adding query argument to existing query") { assertResult("/foo?x=1&y=2") { uriPlus("/foo", null, "x=1&y=2") } assertResult("/foo?x=1&y=2") { uriPlus("/foo", "", "x=1&y=2") } assertResult("/foo?x=1&y=2") { uriPlus("/foo", "x=1", "y=2") } assertResult("/foo?x=1&y=2") { uriPlus("/foo", "x=1", "x=1&y=2") } assertResult("/foo?x=1&y=2") { uriPlus("/foo", "x=1&y=2", "x=1&y=2") } } test("removing query argument to existing query") { assertResult("/foo?x=1&y=2") { uriMinus("/foo", "x=1&y=2", "foo=bar") } assertResult("/foo?x=1&y=2") { uriMinus("/foo", "x=1&y=2&z=3", "z=3") } } }
scalate/scalate
scalate-util/src/test/scala/org/fusesource/scalate/util/URIsTest.scala
Scala
apache-2.0
1,796
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // Licence: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.indexer import org.ensime.fixture._ import org.ensime.util.EnsimeSpec import org.ensime.util.file._ import scala.concurrent._ import scala.concurrent.duration._ class SearchServiceSpec extends EnsimeSpec with SharedTestKitFixture with SharedSearchServiceFixture with SearchServiceTestUtils { def original = EnsimeConfigFixture.SimpleTestProject "search refreshing" should "parse all files on a pristine structure" in { withSearchService { implicit service => val (deleted, indexed) = refresh() deleted shouldBe 0 indexed should be > 0 } } it should "not refresh files that have not changed" in { withSearchService { implicit service => refresh() shouldBe ((0, 0)) } } it should "refresh files that have 'changed'" in { withSearchService { (config, service) => implicit val s = service val now = System.currentTimeMillis() for { m <- config.modules.values r <- m.targets ++ m.testTargets f <- r.tree } { // simulate a full recompile f.setLastModified(now) } val (deleted, indexed) = refresh() deleted should be > 0 indexed should be > 0 } } it should "remove classfiles that have been deleted" in { withSearchService { (config, service) => implicit val s = service val classfile = config.subprojects.head.targets.head / "org/example/Foo.class" classfile shouldBe 'exists classfile.delete() refresh() shouldBe ((1, 0)) } } "class searching" should "return results from J2SE" in withSearchService { implicit service => searchesClasses( "java.lang.String", "String", "string", "j.l.str", "j l str" ) } it should "return results from dependencies" in withSearchService { implicit service => searchesClasses( "org.scalatest.FunSuite", "FunSuite", "funsuite", "funsu", "o s Fun" ) } it should "return results from the project" in withSearchService { implicit service => searchesClasses( "org.example.Bloo", "o e bloo" ) searchesClasses( "org.example.Blue$", "o e blue" ) searchesClasses( "org.example.CaseClassWithCamelCaseName", "CaseClassWith", "caseclasswith", "o e Case", "o.e.caseclasswith", "CCWC" // <= CamelCaseAwesomeNess ) } it should "return results from package objects" in withSearchService { implicit service => searchClasses( "org.example.Blip$", "Blip" ) searchClasses( "org.example.Blop", "Blop" ) } "class and method searching" should "return results from classes" in { withSearchService { implicit service => searchesClassesAndMethods( "java.lang.String", "String", "string", "j.l.str", "j l str" ) } } it should "return results from static fields" in withSearchService { implicit service => searchesEmpty( "CASE_INSENSITIVE", "case_insensitive", "case_" ) } it should "not return results from instance fields" in withSearchService { implicit service => searchesEmpty( "java.awt.Point.x" ) } it should "return results from static methods" in withSearchService { implicit service => searchesClassesAndMethods( "java.lang.Runtime.addShutdownHook", "addShutdownHook" ) } it should "return results from instance methods" in withSearchService { implicit service => searchesClassesAndMethods( "java.lang.Runtime.availableProcessors", "availableProcessors", "availableP" ) } it should "not prioritise noisy inner classes" in withSearchService { implicit service => val hits = service.searchClasses("Baz", 10).map(_.fqn) hits should contain theSameElementsAs (Seq( "org.example2.Baz", "org.example2.Baz$Wibble$baz", "org.example2.Baz$Wibble$baz$", "org.example2.Baz$Wibble$", "org.example2.Baz$", "org.example2.Baz$Wibble" )) hits.head shouldBe "org.example2.Baz" } it should "return user created classes first" in withSearchService { implicit service => val hits = service.searchClasses("File", 10).map(_.fqn) hits.head should startWith("org.boost.File") hits should contain("java.io.File") val hits2 = service.searchClasses("Function1", 25).map(_.fqn) hits2.head should startWith("org.boost.Function1") hits2 should contain("scala.Function1") } it should "return user methods first" in withSearchService { implicit service => val hits = service.searchClassesMethods("toString" :: Nil, 10).map(_.fqn) all(hits) should startWith regex ("org.example|org.boost") } "exact searches" should "find type aliases" in withSearchService { implicit service => service.findUnique("org.scalatest.fixture.ConfigMapFixture$FixtureParam") shouldBe defined } } trait SearchServiceTestUtils { self: EnsimeSpec => def refresh()(implicit service: SearchService): (Int, Int) = Await.result(service.refresh(), Duration.Inf) def searchClasses(expect: String, query: String)(implicit service: SearchService) = { val max = 10 val info = s"'$query' expected '$expect')" val results = service.searchClasses(query, max) withClue(s"${results.size} $info")(results.size should be <= max) withClue(s"$info but was empty")(results should not be empty) // when we improve the search quality, we could // make this really look only at #1 val got = results.map(_.fqn) withClue(s"$info got '$got'")(got should contain(expect)) results } def searchesClasses(expect: String, queries: String*)(implicit service: SearchService) = (expect :: queries.toList).foreach(searchClasses(expect, _)) def searchClassesAndMethods(expect: String, query: String)(implicit service: SearchService) = { val max = 10 val info = s"'$query' expected '$expect')" val results = service.searchClassesMethods(List(query), max) withClue(s"${results.size} $info")(results.size should be <= max) withClue(s"$info but was empty")(results should not be empty) // when we improve the search quality, we could // make this really look only at #1 val got = results.map(_.fqn) withClue(s"$info got '$got'")(got should contain(expect)) results } def searchExpectEmpty(query: String)(implicit service: SearchService) = { val max = 1 val results = service.searchClassesMethods(List(query), max) withClue("expected empty results from %s".format(query))(results shouldBe empty) results } def searchesEmpty(queries: String*)(implicit service: SearchService) = queries.toList.foreach(searchExpectEmpty) def searchesClassesAndMethods(expect: String, queries: String*)(implicit service: SearchService) = (expect :: queries.toList).foreach(searchClassesAndMethods(expect, _)) }
rorygraves/ensime-server
core/src/it/scala/org/ensime/indexer/SearchServiceSpec.scala
Scala
gpl-3.0
7,047
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.optimizer import scala.collection.mutable import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.analysis._ import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog} import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate._ import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ /** * Abstract class all optimizers should inherit of, contains the standard batches (extending * Optimizers can override this. */ abstract class Optimizer(sessionCatalog: SessionCatalog, conf: SQLConf) extends RuleExecutor[LogicalPlan] { protected val fixedPoint = FixedPoint(conf.optimizerMaxIterations) def batches: Seq[Batch] = { Batch("Eliminate Distinct", Once, EliminateDistinct) :: // Technically some of the rules in Finish Analysis are not optimizer rules and belong more // in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime). // However, because we also use the analyzer to canonicalized queries (for view definition), // we do not eliminate subqueries or compute current time in the analyzer. Batch("Finish Analysis", Once, EliminateSubqueryAliases, EliminateView, ReplaceExpressions, ComputeCurrentTime, GetCurrentDatabase(sessionCatalog), RewriteDistinctAggregates, ReplaceDeduplicateWithAggregate) :: ////////////////////////////////////////////////////////////////////////////////////////// // Optimizer rules start here ////////////////////////////////////////////////////////////////////////////////////////// // - Do the first call of CombineUnions before starting the major Optimizer rules, // since it can reduce the number of iteration and the other rules could add/move // extra operators between two adjacent Union operators. // - Call CombineUnions again in Batch("Operator Optimizations"), // since the other rules might make two separate Unions operators adjacent. Batch("Union", Once, CombineUnions) :: Batch("Pullup Correlated Expressions", Once, PullupCorrelatedPredicates) :: Batch("Subquery", Once, OptimizeSubqueries) :: Batch("Replace Operators", fixedPoint, ReplaceIntersectWithSemiJoin, ReplaceExceptWithAntiJoin, ReplaceDistinctWithAggregate) :: Batch("Aggregate", fixedPoint, RemoveLiteralFromGroupExpressions, RemoveRepetitionFromGroupExpressions) :: Batch("Operator Optimizations", fixedPoint, Seq( // Operator push down PushProjectionThroughUnion, ReorderJoin(conf), EliminateOuterJoin, PushPredicateThroughJoin, PushDownPredicate, LimitPushDown(conf), ColumnPruning, InferFiltersFromConstraints, // Operator combine CollapseRepartition, CollapseProject, CollapseWindow, CombineFilters, CombineLimits, CombineUnions, // Constant folding and strength reduction NullPropagation(conf), ConstantPropagation, FoldablePropagation, OptimizeIn(conf), ConstantFolding, ReorderAssociativeOperator, LikeSimplification, BooleanSimplification, SimplifyConditionals, RemoveDispensableExpressions, SimplifyBinaryComparison, PruneFilters, EliminateSorts, SimplifyCasts, SimplifyCaseConversionExpressions, RewriteCorrelatedScalarSubquery, EliminateSerialization, RemoveRedundantAliases, RemoveRedundantProject, SimplifyCreateStructOps, SimplifyCreateArrayOps, SimplifyCreateMapOps, CombineConcats) ++ extendedOperatorOptimizationRules: _*) :: Batch("Check Cartesian Products", Once, CheckCartesianProducts(conf)) :: Batch("Join Reorder", Once, CostBasedJoinReorder(conf)) :: Batch("Decimal Optimizations", fixedPoint, DecimalAggregates(conf)) :: Batch("Object Expressions Optimization", fixedPoint, EliminateMapObjects, CombineTypedFilters) :: Batch("LocalRelation", fixedPoint, ConvertToLocalRelation, PropagateEmptyRelation) :: Batch("OptimizeCodegen", Once, OptimizeCodegen(conf)) :: Batch("RewriteSubquery", Once, RewritePredicateSubquery, CollapseProject) :: Nil } /** * Optimize all the subqueries inside expression. */ object OptimizeSubqueries extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions { case s: SubqueryExpression => val Subquery(newPlan) = Optimizer.this.execute(Subquery(s.plan)) s.withNewPlan(newPlan) } } /** * Override to provide additional rules for the operator optimization batch. */ def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = Nil } /** * Remove useless DISTINCT for MAX and MIN. * This rule should be applied before RewriteDistinctAggregates. */ object EliminateDistinct extends Rule[LogicalPlan] { override def apply(plan: LogicalPlan): LogicalPlan = plan transformExpressions { case ae: AggregateExpression if ae.isDistinct => ae.aggregateFunction match { case _: Max | _: Min => ae.copy(isDistinct = false) case _ => ae } } } /** * An optimizer used in test code. * * To ensure extendability, we leave the standard rules in the abstract optimizer rules, while * specific rules go to the subclasses */ object SimpleTestOptimizer extends SimpleTestOptimizer class SimpleTestOptimizer extends Optimizer( new SessionCatalog( new InMemoryCatalog, EmptyFunctionRegistry, new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)), new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) /** * Remove redundant aliases from a query plan. A redundant alias is an alias that does not change * the name or metadata of a column, and does not deduplicate it. */ object RemoveRedundantAliases extends Rule[LogicalPlan] { /** * Create an attribute mapping from the old to the new attributes. This function will only * return the attribute pairs that have changed. */ private def createAttributeMapping(current: LogicalPlan, next: LogicalPlan) : Seq[(Attribute, Attribute)] = { current.output.zip(next.output).filterNot { case (a1, a2) => a1.semanticEquals(a2) } } /** * Remove the top-level alias from an expression when it is redundant. */ private def removeRedundantAlias(e: Expression, blacklist: AttributeSet): Expression = e match { // Alias with metadata can not be stripped, or the metadata will be lost. // If the alias name is different from attribute name, we can't strip it either, or we // may accidentally change the output schema name of the root plan. case a @ Alias(attr: Attribute, name) if a.metadata == Metadata.empty && name == attr.name && !blacklist.contains(attr) && !blacklist.contains(a) => attr case a => a } /** * Remove redundant alias expression from a LogicalPlan and its subtree. A blacklist is used to * prevent the removal of seemingly redundant aliases used to deduplicate the input for a (self) * join or to prevent the removal of top-level subquery attributes. */ private def removeRedundantAliases(plan: LogicalPlan, blacklist: AttributeSet): LogicalPlan = { plan match { // We want to keep the same output attributes for subqueries. This means we cannot remove // the aliases that produce these attributes case Subquery(child) => Subquery(removeRedundantAliases(child, blacklist ++ child.outputSet)) // A join has to be treated differently, because the left and the right side of the join are // not allowed to use the same attributes. We use a blacklist to prevent us from creating a // situation in which this happens; the rule will only remove an alias if its child // attribute is not on the black list. case Join(left, right, joinType, condition) => val newLeft = removeRedundantAliases(left, blacklist ++ right.outputSet) val newRight = removeRedundantAliases(right, blacklist ++ newLeft.outputSet) val mapping = AttributeMap( createAttributeMapping(left, newLeft) ++ createAttributeMapping(right, newRight)) val newCondition = condition.map(_.transform { case a: Attribute => mapping.getOrElse(a, a) }) Join(newLeft, newRight, joinType, newCondition) case _ => // Remove redundant aliases in the subtree(s). val currentNextAttrPairs = mutable.Buffer.empty[(Attribute, Attribute)] val newNode = plan.mapChildren { child => val newChild = removeRedundantAliases(child, blacklist) currentNextAttrPairs ++= createAttributeMapping(child, newChild) newChild } // Create the attribute mapping. Note that the currentNextAttrPairs can contain duplicate // keys in case of Union (this is caused by the PushProjectionThroughUnion rule); in this // case we use the the first mapping (which should be provided by the first child). val mapping = AttributeMap(currentNextAttrPairs) // Create a an expression cleaning function for nodes that can actually produce redundant // aliases, use identity otherwise. val clean: Expression => Expression = plan match { case _: Project => removeRedundantAlias(_, blacklist) case _: Aggregate => removeRedundantAlias(_, blacklist) case _: Window => removeRedundantAlias(_, blacklist) case _ => identity[Expression] } // Transform the expressions. newNode.mapExpressions { expr => clean(expr.transform { case a: Attribute => mapping.getOrElse(a, a) }) } } } def apply(plan: LogicalPlan): LogicalPlan = removeRedundantAliases(plan, AttributeSet.empty) } /** * Remove projections from the query plan that do not make any modifications. */ object RemoveRedundantProject extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case p @ Project(_, child) if p.output == child.output => child } } /** * Pushes down [[LocalLimit]] beneath UNION ALL and beneath the streamed inputs of outer joins. */ case class LimitPushDown(conf: SQLConf) extends Rule[LogicalPlan] { private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = { plan match { case GlobalLimit(_, child) => child case _ => plan } } private def maybePushLimit(limitExp: Expression, plan: LogicalPlan): LogicalPlan = { (limitExp, plan.maxRows) match { case (IntegerLiteral(maxRow), Some(childMaxRows)) if maxRow < childMaxRows => LocalLimit(limitExp, stripGlobalLimitIfPresent(plan)) case (_, None) => LocalLimit(limitExp, stripGlobalLimitIfPresent(plan)) case _ => plan } } def apply(plan: LogicalPlan): LogicalPlan = plan transform { // Adding extra Limits below UNION ALL for children which are not Limit or do not have Limit // descendants whose maxRow is larger. This heuristic is valid assuming there does not exist any // Limit push-down rule that is unable to infer the value of maxRows. // Note: right now Union means UNION ALL, which does not de-duplicate rows, so it is safe to // pushdown Limit through it. Once we add UNION DISTINCT, however, we will not be able to // pushdown Limit. case LocalLimit(exp, Union(children)) => LocalLimit(exp, Union(children.map(maybePushLimit(exp, _)))) // Add extra limits below OUTER JOIN. For LEFT OUTER and FULL OUTER JOIN we push limits to the // left and right sides, respectively. For FULL OUTER JOIN, we can only push limits to one side // because we need to ensure that rows from the limited side still have an opportunity to match // against all candidates from the non-limited side. We also need to ensure that this limit // pushdown rule will not eventually introduce limits on both sides if it is applied multiple // times. Therefore: // - If one side is already limited, stack another limit on top if the new limit is smaller. // The redundant limit will be collapsed by the CombineLimits rule. // - If neither side is limited, limit the side that is estimated to be bigger. case LocalLimit(exp, join @ Join(left, right, joinType, _)) => val newJoin = joinType match { case RightOuter => join.copy(right = maybePushLimit(exp, right)) case LeftOuter => join.copy(left = maybePushLimit(exp, left)) case FullOuter => (left.maxRows, right.maxRows) match { case (None, None) => if (left.stats.sizeInBytes >= right.stats.sizeInBytes) { join.copy(left = maybePushLimit(exp, left)) } else { join.copy(right = maybePushLimit(exp, right)) } case (Some(_), Some(_)) => join case (Some(_), None) => join.copy(left = maybePushLimit(exp, left)) case (None, Some(_)) => join.copy(right = maybePushLimit(exp, right)) } case _ => join } LocalLimit(exp, newJoin) } } /** * Pushes Project operator to both sides of a Union operator. * Operations that are safe to pushdown are listed as follows. * Union: * Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is * safe to pushdown Filters and Projections through it. Filter pushdown is handled by another * rule PushDownPredicate. Once we add UNION DISTINCT, we will not be able to pushdown Projections. */ object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper { /** * Maps Attributes from the left side to the corresponding Attribute on the right side. */ private def buildRewrites(left: LogicalPlan, right: LogicalPlan): AttributeMap[Attribute] = { assert(left.output.size == right.output.size) AttributeMap(left.output.zip(right.output)) } /** * Rewrites an expression so that it can be pushed to the right side of a * Union or Except operator. This method relies on the fact that the output attributes * of a union/intersect/except are always equal to the left child's output. */ private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = { val result = e transform { case a: Attribute => rewrites(a) } // We must promise the compiler that we did not discard the names in the case of project // expressions. This is safe since the only transformation is from Attribute => Attribute. result.asInstanceOf[A] } /** * Splits the condition expression into small conditions by `And`, and partition them by * deterministic, and finally recombine them by `And`. It returns an expression containing * all deterministic expressions (the first field of the returned Tuple2) and an expression * containing all non-deterministic expressions (the second field of the returned Tuple2). */ private def partitionByDeterministic(condition: Expression): (Expression, Expression) = { val andConditions = splitConjunctivePredicates(condition) andConditions.partition(_.deterministic) match { case (deterministic, nondeterministic) => deterministic.reduceOption(And).getOrElse(Literal(true)) -> nondeterministic.reduceOption(And).getOrElse(Literal(true)) } } def apply(plan: LogicalPlan): LogicalPlan = plan transform { // Push down deterministic projection through UNION ALL case p @ Project(projectList, Union(children)) => assert(children.nonEmpty) if (projectList.forall(_.deterministic)) { val newFirstChild = Project(projectList, children.head) val newOtherChildren = children.tail.map { child => val rewrites = buildRewrites(children.head, child) Project(projectList.map(pushToRight(_, rewrites)), child) } Union(newFirstChild +: newOtherChildren) } else { p } } } /** * Attempts to eliminate the reading of unneeded columns from the query plan. * * Since adding Project before Filter conflicts with PushPredicatesThroughProject, this rule will * remove the Project p2 in the following pattern: * * p1 @ Project(_, Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(p2.inputSet) * * p2 is usually inserted by this rule and useless, p1 could prune the columns anyway. */ object ColumnPruning extends Rule[LogicalPlan] { private def sameOutput(output1: Seq[Attribute], output2: Seq[Attribute]): Boolean = output1.size == output2.size && output1.zip(output2).forall(pair => pair._1.semanticEquals(pair._2)) def apply(plan: LogicalPlan): LogicalPlan = removeProjectBeforeFilter(plan transform { // Prunes the unused columns from project list of Project/Aggregate/Expand case p @ Project(_, p2: Project) if (p2.outputSet -- p.references).nonEmpty => p.copy(child = p2.copy(projectList = p2.projectList.filter(p.references.contains))) case p @ Project(_, a: Aggregate) if (a.outputSet -- p.references).nonEmpty => p.copy( child = a.copy(aggregateExpressions = a.aggregateExpressions.filter(p.references.contains))) case a @ Project(_, e @ Expand(_, _, grandChild)) if (e.outputSet -- a.references).nonEmpty => val newOutput = e.output.filter(a.references.contains(_)) val newProjects = e.projections.map { proj => proj.zip(e.output).filter { case (_, a) => newOutput.contains(a) }.unzip._1 } a.copy(child = Expand(newProjects, newOutput, grandChild)) // Prunes the unused columns from child of `DeserializeToObject` case d @ DeserializeToObject(_, _, child) if (child.outputSet -- d.references).nonEmpty => d.copy(child = prunedChild(child, d.references)) // Prunes the unused columns from child of Aggregate/Expand/Generate case a @ Aggregate(_, _, child) if (child.outputSet -- a.references).nonEmpty => a.copy(child = prunedChild(child, a.references)) case e @ Expand(_, _, child) if (child.outputSet -- e.references).nonEmpty => e.copy(child = prunedChild(child, e.references)) case g: Generate if !g.join && (g.child.outputSet -- g.references).nonEmpty => g.copy(child = prunedChild(g.child, g.references)) // Turn off `join` for Generate if no column from it's child is used case p @ Project(_, g: Generate) if g.join && p.references.subsetOf(g.generatedSet) => p.copy(child = g.copy(join = false)) // Eliminate unneeded attributes from right side of a Left Existence Join. case j @ Join(_, right, LeftExistence(_), _) => j.copy(right = prunedChild(right, j.references)) // all the columns will be used to compare, so we can't prune them case p @ Project(_, _: SetOperation) => p case p @ Project(_, _: Distinct) => p // Eliminate unneeded attributes from children of Union. case p @ Project(_, u: Union) => if ((u.outputSet -- p.references).nonEmpty) { val firstChild = u.children.head val newOutput = prunedChild(firstChild, p.references).output // pruning the columns of all children based on the pruned first child. val newChildren = u.children.map { p => val selected = p.output.zipWithIndex.filter { case (a, i) => newOutput.contains(firstChild.output(i)) }.map(_._1) Project(selected, p) } p.copy(child = u.withNewChildren(newChildren)) } else { p } // Prune unnecessary window expressions case p @ Project(_, w: Window) if (w.windowOutputSet -- p.references).nonEmpty => p.copy(child = w.copy( windowExpressions = w.windowExpressions.filter(p.references.contains))) // Eliminate no-op Window case w: Window if w.windowExpressions.isEmpty => w.child // Eliminate no-op Projects case p @ Project(_, child) if sameOutput(child.output, p.output) => child // Can't prune the columns on LeafNode case p @ Project(_, _: LeafNode) => p // for all other logical plans that inherits the output from it's children case p @ Project(_, child) => val required = child.references ++ p.references if ((child.inputSet -- required).nonEmpty) { val newChildren = child.children.map(c => prunedChild(c, required)) p.copy(child = child.withNewChildren(newChildren)) } else { p } }) /** Applies a projection only when the child is producing unnecessary attributes */ private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) = if ((c.outputSet -- allReferences.filter(c.outputSet.contains)).nonEmpty) { Project(c.output.filter(allReferences.contains), c) } else { c } /** * The Project before Filter is not necessary but conflict with PushPredicatesThroughProject, * so remove it. */ private def removeProjectBeforeFilter(plan: LogicalPlan): LogicalPlan = plan transform { case p1 @ Project(_, f @ Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(child.outputSet) => p1.copy(child = f.copy(child = child)) } } /** * Combines two adjacent [[Project]] operators into one and perform alias substitution, * merging the expressions into one single expression. */ object CollapseProject extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case p1 @ Project(_, p2: Project) => if (haveCommonNonDeterministicOutput(p1.projectList, p2.projectList)) { p1 } else { p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList)) } case p @ Project(_, agg: Aggregate) => if (haveCommonNonDeterministicOutput(p.projectList, agg.aggregateExpressions)) { p } else { agg.copy(aggregateExpressions = buildCleanedProjectList( p.projectList, agg.aggregateExpressions)) } } private def collectAliases(projectList: Seq[NamedExpression]): AttributeMap[Alias] = { AttributeMap(projectList.collect { case a: Alias => a.toAttribute -> a }) } private def haveCommonNonDeterministicOutput( upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Boolean = { // Create a map of Aliases to their values from the lower projection. // e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)). val aliases = collectAliases(lower) // Collapse upper and lower Projects if and only if their overlapped expressions are all // deterministic. upper.exists(_.collect { case a: Attribute if aliases.contains(a) => aliases(a).child }.exists(!_.deterministic)) } private def buildCleanedProjectList( upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Seq[NamedExpression] = { // Create a map of Aliases to their values from the lower projection. // e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)). val aliases = collectAliases(lower) // Substitute any attributes that are produced by the lower projection, so that we safely // eliminate it. // e.g., 'SELECT c + 1 FROM (SELECT a + b AS C ...' produces 'SELECT a + b + 1 ...' // Use transformUp to prevent infinite recursion. val rewrittenUpper = upper.map(_.transformUp { case a: Attribute => aliases.getOrElse(a, a) }) // collapse upper and lower Projects may introduce unnecessary Aliases, trim them here. rewrittenUpper.map { p => CleanupAliases.trimNonTopLevelAliases(p).asInstanceOf[NamedExpression] } } } /** * Combines adjacent [[RepartitionOperation]] operators */ object CollapseRepartition extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { // Case 1: When a Repartition has a child of Repartition or RepartitionByExpression, // 1) When the top node does not enable the shuffle (i.e., coalesce API), but the child // enables the shuffle. Returns the child node if the last numPartitions is bigger; // otherwise, keep unchanged. // 2) In the other cases, returns the top node with the child's child case r @ Repartition(_, _, child: RepartitionOperation) => (r.shuffle, child.shuffle) match { case (false, true) => if (r.numPartitions >= child.numPartitions) child else r case _ => r.copy(child = child.child) } // Case 2: When a RepartitionByExpression has a child of Repartition or RepartitionByExpression // we can remove the child. case r @ RepartitionByExpression(_, child: RepartitionOperation, _) => r.copy(child = child.child) } } /** * Collapse Adjacent Window Expression. * - If the partition specs and order specs are the same and the window expression are * independent, collapse into the parent. */ object CollapseWindow extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case w1 @ Window(we1, ps1, os1, w2 @ Window(we2, ps2, os2, grandChild)) if ps1 == ps2 && os1 == os2 && w1.references.intersect(w2.windowOutputSet).isEmpty => w1.copy(windowExpressions = we2 ++ we1, child = grandChild) } } /** * Generate a list of additional filters from an operator's existing constraint but remove those * that are either already part of the operator's condition or are part of the operator's child * constraints. These filters are currently inserted to the existing conditions in the Filter * operators and on either side of Join operators. * * Note: While this optimization is applicable to all types of join, it primarily benefits Inner and * LeftSemi joins. */ object InferFiltersFromConstraints extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = { if (SQLConf.get.constraintPropagationEnabled) { inferFilters(plan) } else { plan } } private def inferFilters(plan: LogicalPlan): LogicalPlan = plan transform { case filter @ Filter(condition, child) => val newFilters = filter.constraints -- (child.constraints ++ splitConjunctivePredicates(condition)) if (newFilters.nonEmpty) { Filter(And(newFilters.reduce(And), condition), child) } else { filter } case join @ Join(left, right, joinType, conditionOpt) => // Only consider constraints that can be pushed down completely to either the left or the // right child val constraints = join.constraints.filter { c => c.references.subsetOf(left.outputSet) || c.references.subsetOf(right.outputSet) } // Remove those constraints that are already enforced by either the left or the right child val additionalConstraints = constraints -- (left.constraints ++ right.constraints) val newConditionOpt = conditionOpt match { case Some(condition) => val newFilters = additionalConstraints -- splitConjunctivePredicates(condition) if (newFilters.nonEmpty) Option(And(newFilters.reduce(And), condition)) else None case None => additionalConstraints.reduceOption(And) } if (newConditionOpt.isDefined) Join(left, right, joinType, newConditionOpt) else join } } /** * Combines all adjacent [[Union]] operators into a single [[Union]]. */ object CombineUnions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transformDown { case u: Union => flattenUnion(u, false) case Distinct(u: Union) => Distinct(flattenUnion(u, true)) } private def flattenUnion(union: Union, flattenDistinct: Boolean): Union = { val stack = mutable.Stack[LogicalPlan](union) val flattened = mutable.ArrayBuffer.empty[LogicalPlan] while (stack.nonEmpty) { stack.pop() match { case Distinct(Union(children)) if flattenDistinct => stack.pushAll(children.reverse) case Union(children) => stack.pushAll(children.reverse) case child => flattened += child } } Union(flattened) } } /** * Combines two adjacent [[Filter]] operators into one, merging the non-redundant conditions into * one conjunctive predicate. */ object CombineFilters extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Filter(fc, nf @ Filter(nc, grandChild)) => (ExpressionSet(splitConjunctivePredicates(fc)) -- ExpressionSet(splitConjunctivePredicates(nc))).reduceOption(And) match { case Some(ac) => Filter(And(nc, ac), grandChild) case None => nf } } } /** * Removes no-op SortOrder from Sort */ object EliminateSorts extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) => val newOrders = orders.filterNot(_.child.foldable) if (newOrders.isEmpty) child else s.copy(order = newOrders) } } /** * Removes filters that can be evaluated trivially. This can be done through the following ways: * 1) by eliding the filter for cases where it will always evaluate to `true`. * 2) by substituting a dummy empty relation when the filter will always evaluate to `false`. * 3) by eliminating the always-true conditions given the constraints on the child's output. */ object PruneFilters extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan transform { // If the filter condition always evaluate to true, remove the filter. case Filter(Literal(true, BooleanType), child) => child // If the filter condition always evaluate to null or false, // replace the input with an empty relation. case Filter(Literal(null, _), child) => LocalRelation(child.output, data = Seq.empty) case Filter(Literal(false, BooleanType), child) => LocalRelation(child.output, data = Seq.empty) // If any deterministic condition is guaranteed to be true given the constraints on the child's // output, remove the condition case f @ Filter(fc, p: LogicalPlan) => val (prunedPredicates, remainingPredicates) = splitConjunctivePredicates(fc).partition { cond => cond.deterministic && p.constraints.contains(cond) } if (prunedPredicates.isEmpty) { f } else if (remainingPredicates.isEmpty) { p } else { val newCond = remainingPredicates.reduce(And) Filter(newCond, p) } } } /** * Pushes [[Filter]] operators through many operators iff: * 1) the operator is deterministic * 2) the predicate is deterministic and the operator will not change any of rows. * * This heuristic is valid assuming the expression evaluation cost is minimal. */ object PushDownPredicate extends Rule[LogicalPlan] with PredicateHelper { def apply(plan: LogicalPlan): LogicalPlan = plan transform { // SPARK-13473: We can't push the predicate down when the underlying projection output non- // deterministic field(s). Non-deterministic expressions are essentially stateful. This // implies that, for a given input row, the output are determined by the expression's initial // state and all the input rows processed before. In another word, the order of input rows // matters for non-deterministic expressions, while pushing down predicates changes the order. // This also applies to Aggregate. case Filter(condition, project @ Project(fields, grandChild)) if fields.forall(_.deterministic) && canPushThroughCondition(grandChild, condition) => // Create a map of Aliases to their values from the child projection. // e.g., 'SELECT a + b AS c, d ...' produces Map(c -> a + b). val aliasMap = AttributeMap(fields.collect { case a: Alias => (a.toAttribute, a.child) }) project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild)) case filter @ Filter(condition, aggregate: Aggregate) if aggregate.aggregateExpressions.forall(_.deterministic) => // Find all the aliased expressions in the aggregate list that don't include any actual // AggregateExpression, and create a map from the alias to the expression val aliasMap = AttributeMap(aggregate.aggregateExpressions.collect { case a: Alias if a.child.find(_.isInstanceOf[AggregateExpression]).isEmpty => (a.toAttribute, a.child) }) // For each filter, expand the alias and check if the filter can be evaluated using // attributes produced by the aggregate operator's child operator. val (candidates, containingNonDeterministic) = splitConjunctivePredicates(condition).span(_.deterministic) val (pushDown, rest) = candidates.partition { cond => val replaced = replaceAlias(cond, aliasMap) cond.references.nonEmpty && replaced.references.subsetOf(aggregate.child.outputSet) } val stayUp = rest ++ containingNonDeterministic if (pushDown.nonEmpty) { val pushDownPredicate = pushDown.reduce(And) val replaced = replaceAlias(pushDownPredicate, aliasMap) val newAggregate = aggregate.copy(child = Filter(replaced, aggregate.child)) // If there is no more filter to stay up, just eliminate the filter. // Otherwise, create "Filter(stayUp) <- Aggregate <- Filter(pushDownPredicate)". if (stayUp.isEmpty) newAggregate else Filter(stayUp.reduce(And), newAggregate) } else { filter } // Push [[Filter]] operators through [[Window]] operators. Parts of the predicate that can be // pushed beneath must satisfy the following conditions: // 1. All the expressions are part of window partitioning key. The expressions can be compound. // 2. Deterministic. // 3. Placed before any non-deterministic predicates. case filter @ Filter(condition, w: Window) if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) => val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references)) val (candidates, containingNonDeterministic) = splitConjunctivePredicates(condition).span(_.deterministic) val (pushDown, rest) = candidates.partition { cond => cond.references.subsetOf(partitionAttrs) } val stayUp = rest ++ containingNonDeterministic if (pushDown.nonEmpty) { val pushDownPredicate = pushDown.reduce(And) val newWindow = w.copy(child = Filter(pushDownPredicate, w.child)) if (stayUp.isEmpty) newWindow else Filter(stayUp.reduce(And), newWindow) } else { filter } case filter @ Filter(condition, union: Union) => // Union could change the rows, so non-deterministic predicate can't be pushed down val (pushDown, stayUp) = splitConjunctivePredicates(condition).span(_.deterministic) if (pushDown.nonEmpty) { val pushDownCond = pushDown.reduceLeft(And) val output = union.output val newGrandChildren = union.children.map { grandchild => val newCond = pushDownCond transform { case e if output.exists(_.semanticEquals(e)) => grandchild.output(output.indexWhere(_.semanticEquals(e))) } assert(newCond.references.subsetOf(grandchild.outputSet)) Filter(newCond, grandchild) } val newUnion = union.withNewChildren(newGrandChildren) if (stayUp.nonEmpty) { Filter(stayUp.reduceLeft(And), newUnion) } else { newUnion } } else { filter } case filter @ Filter(_, u: UnaryNode) if canPushThrough(u) && u.expressions.forall(_.deterministic) => pushDownPredicate(filter, u.child) { predicate => u.withNewChildren(Seq(Filter(predicate, u.child))) } } private def canPushThrough(p: UnaryNode): Boolean = p match { // Note that some operators (e.g. project, aggregate, union) are being handled separately // (earlier in this rule). case _: AppendColumns => true case _: ResolvedHint => true case _: Distinct => true case _: Generate => true case _: Pivot => true case _: RepartitionByExpression => true case _: Repartition => true case _: ScriptTransformation => true case _: Sort => true case _ => false } private def pushDownPredicate( filter: Filter, grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = { // Only push down the predicates that is deterministic and all the referenced attributes // come from grandchild. // TODO: non-deterministic predicates could be pushed through some operators that do not change // the rows. val (candidates, containingNonDeterministic) = splitConjunctivePredicates(filter.condition).span(_.deterministic) val (pushDown, rest) = candidates.partition { cond => cond.references.subsetOf(grandchild.outputSet) } val stayUp = rest ++ containingNonDeterministic if (pushDown.nonEmpty) { val newChild = insertFilter(pushDown.reduceLeft(And)) if (stayUp.nonEmpty) { Filter(stayUp.reduceLeft(And), newChild) } else { newChild } } else { filter } } /** * Check if we can safely push a filter through a projection, by making sure that predicate * subqueries in the condition do not contain the same attributes as the plan they are moved * into. This can happen when the plan and predicate subquery have the same source. */ private def canPushThroughCondition(plan: LogicalPlan, condition: Expression): Boolean = { val attributes = plan.outputSet val matched = condition.find { case s: SubqueryExpression => s.plan.outputSet.intersect(attributes).nonEmpty case _ => false } matched.isEmpty } } /** * Pushes down [[Filter]] operators where the `condition` can be * evaluated using only the attributes of the left or right side of a join. Other * [[Filter]] conditions are moved into the `condition` of the [[Join]]. * * And also pushes down the join filter, where the `condition` can be evaluated using only the * attributes of the left or right side of sub query when applicable. * * Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details */ object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper { /** * Splits join condition expressions or filter predicates (on a given join's output) into three * categories based on the attributes required to evaluate them. Note that we explicitly exclude * on-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or * canEvaluateInRight to prevent pushing these predicates on either side of the join. * * @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth) */ private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = { // Note: In order to ensure correctness, it's important to not change the relative ordering of // any deterministic expression that follows a non-deterministic expression. To achieve this, // we only consider pushing down those expressions that precede the first non-deterministic // expression in the condition. val (pushDownCandidates, containingNonDeterministic) = condition.span(_.deterministic) val (leftEvaluateCondition, rest) = pushDownCandidates.partition(_.references.subsetOf(left.outputSet)) val (rightEvaluateCondition, commonCondition) = rest.partition(expr => expr.references.subsetOf(right.outputSet)) (leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ containingNonDeterministic) } def apply(plan: LogicalPlan): LogicalPlan = plan transform { // push the where condition down into join filter case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition)) => val (leftFilterConditions, rightFilterConditions, commonFilterCondition) = split(splitConjunctivePredicates(filterCondition), left, right) joinType match { case _: InnerLike => // push down the single side `where` condition into respective sides val newLeft = leftFilterConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = rightFilterConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val (newJoinConditions, others) = commonFilterCondition.partition(canEvaluateWithinJoin) val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And) val join = Join(newLeft, newRight, joinType, newJoinCond) if (others.nonEmpty) { Filter(others.reduceLeft(And), join) } else { join } case RightOuter => // push down the right side only `where` condition val newLeft = left val newRight = rightFilterConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = joinCondition val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond) (leftFilterConditions ++ commonFilterCondition). reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin) case LeftOuter | LeftExistence(_) => // push down the left side only `where` condition val newLeft = leftFilterConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = right val newJoinCond = joinCondition val newJoin = Join(newLeft, newRight, joinType, newJoinCond) (rightFilterConditions ++ commonFilterCondition). reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin) case FullOuter => f // DO Nothing for Full Outer Join case NaturalJoin(_) => sys.error("Untransformed NaturalJoin node") case UsingJoin(_, _) => sys.error("Untransformed Using join node") } // push down the join filter into sub query scanning if applicable case j @ Join(left, right, joinType, joinCondition) => val (leftJoinConditions, rightJoinConditions, commonJoinCondition) = split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right) joinType match { case _: InnerLike | LeftSemi => // push down the single side only join filter for both sides sub queries val newLeft = leftJoinConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = rightJoinConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = commonJoinCondition.reduceLeftOption(And) Join(newLeft, newRight, joinType, newJoinCond) case RightOuter => // push down the left side only join filter for left side sub query val newLeft = leftJoinConditions. reduceLeftOption(And).map(Filter(_, left)).getOrElse(left) val newRight = right val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And) Join(newLeft, newRight, RightOuter, newJoinCond) case LeftOuter | LeftAnti | ExistenceJoin(_) => // push down the right side only join filter for right sub query val newLeft = left val newRight = rightJoinConditions. reduceLeftOption(And).map(Filter(_, right)).getOrElse(right) val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And) Join(newLeft, newRight, joinType, newJoinCond) case FullOuter => j case NaturalJoin(_) => sys.error("Untransformed NaturalJoin node") case UsingJoin(_, _) => sys.error("Untransformed Using join node") } } } /** * Combines two adjacent [[Limit]] operators into one, merging the * expressions into one single expression. */ object CombineLimits extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case GlobalLimit(le, GlobalLimit(ne, grandChild)) => GlobalLimit(Least(Seq(ne, le)), grandChild) case LocalLimit(le, LocalLimit(ne, grandChild)) => LocalLimit(Least(Seq(ne, le)), grandChild) case Limit(le, Limit(ne, grandChild)) => Limit(Least(Seq(ne, le)), grandChild) } } /** * Check if there any cartesian products between joins of any type in the optimized plan tree. * Throw an error if a cartesian product is found without an explicit cross join specified. * This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true. * * This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be * collected before checking if it is a cartesian product. If you have * SELECT * from R, S where R.r = S.s, * the join between R and S is not a cartesian product and therefore should be allowed. * The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule. */ case class CheckCartesianProducts(conf: SQLConf) extends Rule[LogicalPlan] with PredicateHelper { /** * Check if a join is a cartesian product. Returns true if * there are no join conditions involving references from both left and right. */ def isCartesianProduct(join: Join): Boolean = { val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil) !conditions.map(_.references).exists(refs => refs.exists(join.left.outputSet.contains) && refs.exists(join.right.outputSet.contains)) } def apply(plan: LogicalPlan): LogicalPlan = if (conf.crossJoinEnabled) { plan } else plan transform { case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, condition) if isCartesianProduct(j) => throw new AnalysisException( s"""Detected cartesian product for ${j.joinType.sql} join between logical plans |${left.treeString(false).trim} |and |${right.treeString(false).trim} |Join condition is missing or trivial. |Use the CROSS JOIN syntax to allow cartesian products between these relations.""" .stripMargin) } } /** * Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values. * * This uses the same rules for increasing the precision and scale of the output as * [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]]. */ case class DecimalAggregates(conf: SQLConf) extends Rule[LogicalPlan] { import Decimal.MAX_LONG_DIGITS /** Maximum number of decimal digits representable precisely in a Double */ private val MAX_DOUBLE_DIGITS = 15 def apply(plan: LogicalPlan): LogicalPlan = plan transform { case q: LogicalPlan => q transformExpressionsDown { case we @ WindowExpression(ae @ AggregateExpression(af, _, _, _), _) => af match { case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS => MakeDecimal(we.copy(windowFunction = ae.copy(aggregateFunction = Sum(UnscaledValue(e)))), prec + 10, scale) case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS => val newAggExpr = we.copy(windowFunction = ae.copy(aggregateFunction = Average(UnscaledValue(e)))) Cast( Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)), DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone)) case _ => we } case ae @ AggregateExpression(af, _, _, _) => af match { case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS => MakeDecimal(ae.copy(aggregateFunction = Sum(UnscaledValue(e))), prec + 10, scale) case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS => val newAggExpr = ae.copy(aggregateFunction = Average(UnscaledValue(e))) Cast( Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)), DecimalType(prec + 4, scale + 4), Option(conf.sessionLocalTimeZone)) case _ => ae } } } } /** * Converts local operations (i.e. ones that don't require data exchange) on LocalRelation to * another LocalRelation. * * This is relatively simple as it currently handles only a single case: Project. */ object ConvertToLocalRelation extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Project(projectList, LocalRelation(output, data)) if !projectList.exists(hasUnevaluableExpr) => val projection = new InterpretedProjection(projectList, output) projection.initialize(0) LocalRelation(projectList.map(_.toAttribute), data.map(projection)) } private def hasUnevaluableExpr(expr: Expression): Boolean = { expr.find(e => e.isInstanceOf[Unevaluable] && !e.isInstanceOf[AttributeReference]).isDefined } } /** * Replaces logical [[Distinct]] operator with an [[Aggregate]] operator. * {{{ * SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2 * }}} */ object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Distinct(child) => Aggregate(child.output, child.output, child) } } /** * Replaces logical [[Deduplicate]] operator with an [[Aggregate]] operator. */ object ReplaceDeduplicateWithAggregate extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Deduplicate(keys, child, streaming) if !streaming => val keyExprIds = keys.map(_.exprId) val aggCols = child.output.map { attr => if (keyExprIds.contains(attr.exprId)) { attr } else { Alias(new First(attr).toAggregateExpression(), attr.name)(attr.exprId) } } Aggregate(keys, aggCols, child) } } /** * Replaces logical [[Intersect]] operator with a left-semi [[Join]] operator. * {{{ * SELECT a1, a2 FROM Tab1 INTERSECT SELECT b1, b2 FROM Tab2 * ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT SEMI JOIN Tab2 ON a1<=>b1 AND a2<=>b2 * }}} * * Note: * 1. This rule is only applicable to INTERSECT DISTINCT. Do not use it for INTERSECT ALL. * 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated * join conditions will be incorrect. */ object ReplaceIntersectWithSemiJoin extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Intersect(left, right) => assert(left.output.size == right.output.size) val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) } Distinct(Join(left, right, LeftSemi, joinCond.reduceLeftOption(And))) } } /** * Replaces logical [[Except]] operator with a left-anti [[Join]] operator. * {{{ * SELECT a1, a2 FROM Tab1 EXCEPT SELECT b1, b2 FROM Tab2 * ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT ANTI JOIN Tab2 ON a1<=>b1 AND a2<=>b2 * }}} * * Note: * 1. This rule is only applicable to EXCEPT DISTINCT. Do not use it for EXCEPT ALL. * 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated * join conditions will be incorrect. */ object ReplaceExceptWithAntiJoin extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case Except(left, right) => assert(left.output.size == right.output.size) val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) } Distinct(Join(left, right, LeftAnti, joinCond.reduceLeftOption(And))) } } /** * Removes literals from group expressions in [[Aggregate]], as they have no effect to the result * but only makes the grouping key bigger. */ object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case a @ Aggregate(grouping, _, _) if grouping.nonEmpty => val newGrouping = grouping.filter(!_.foldable) if (newGrouping.nonEmpty) { a.copy(groupingExpressions = newGrouping) } else { // All grouping expressions are literals. We should not drop them all, because this can // change the return semantics when the input of the Aggregate is empty (SPARK-17114). We // instead replace this by single, easy to hash/sort, literal expression. a.copy(groupingExpressions = Seq(Literal(0, IntegerType))) } } } /** * Removes repetition from group expressions in [[Aggregate]], as they have no effect to the result * but only makes the grouping key bigger. */ object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] { def apply(plan: LogicalPlan): LogicalPlan = plan transform { case a @ Aggregate(grouping, _, _) => val newGrouping = ExpressionSet(grouping).toSeq a.copy(groupingExpressions = newGrouping) } }
poffuomo/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala
Scala
apache-2.0
54,322
package models.tenant case class Shift(id: Option[Long] = None, place: String, shiftTypeId: Long)
thomastoye/speelsysteem
app/models/tenant/Shift.scala
Scala
gpl-2.0
99
package scodec import scala.language.implicitConversions import java.nio.charset.Charset import java.security.cert.{ Certificate, X509Certificate } import java.util.UUID import scalaz.{ \\/, -\\/, \\/- } import scalaz.syntax.std.option._ import scodec.bits.{ BitVector, ByteOrdering, ByteVector } /** * Provides codecs for common types and combinators for building larger codecs. * * === Bits and Bytes Codecs === * * The simplest of the provided codecs are those that encode/decode `BitVector`s and `ByteVectors` directly. * These are provided by [[bits]] and [[bytes]] methods. These codecs encode all of the bits/bytes directly * in to the result and decode *all* of the remaining bits/bytes in to the result value. That is, the result * of `decode` always returns a empty bit vector for the remaining bits. * * Similarly, fixed size alternatives are provided by the `bits(size)` and `bytes(size)` methods, which * encode a fixed number of bits/bytes (or error if not provided the correct size) and decoded a fixed number * of bits/bytes (or error if that many bits/bytes are not available). * * There are more specialized codecs for working with bits, including [[ignore]] and [[constant]]. * * * === Numeric Codecs === * * There are built-in codecs for `Int`, `Long`, `Float`, and `Double`. * * There are a number of predefined integral codecs named using the form: {{{ [u]int${size}[L] }}} * where `u` stands for unsigned, `size` is replaced by one of `8, 16, 24, 32, 64`, and `L` stands for little-endian. * For each codec of that form, the type is `Codec[Int]` or `Codec[Long]` depending on the specified size. * For example, `int32` supports 32-bit big-endian 2s complement signed integers, and uint16L supports 16-bit little-endian * unsigned integers. * Note: `uint64[L]` are not provided because a 64-bit unsigned integer does not fit in to a `Long`. * * Additionally, methods of the form `[u]int[L](size: Int)` and `[u]long[L](size: Int)` exist to build arbitrarily * sized codecs, within the limitations of `Int` and `Long`. * * IEEE 754 floating point values are supported by the [[float]], [[floatL]], [[double]], and [[doubleL]] codecs. * * * === Miscellaneous Value Codecs === * * In addition to the numeric codecs, there are built-in codecs for `Boolean`, `String`, and `UUID`. * * Boolean values are supported by the [[bool]] codecs. * * * === Combinators === * * There are a number of methods provided that create codecs out of other codecs. These include simple combinators * such as [[fixedSizeBits]] and [[variableSizeBits]] and advanced combinators such as [[discriminated]], which * provides its own DSL for building a large codec out of many small codecs. For a list of all combinators, * see the Combinators section below. * * * === Tuple Codecs === * * The `~` operator supports combining a `Codec[A]` and a `Codec[B]` in to a `Codec[(A, B)]`. * * For example: {{{ val codec: Codec[Int ~ Int ~ Int] = uint8 ~ uint8 ~ uint8}}} * * Codecs generated with `~` result in left nested tuples. These left nested tuples can * be pulled back apart by pattern matching with `~`. For example: {{{ Codec.decode(uint8 ~ uint8 ~ uint8, bytes) map { case a ~ b ~ c => a + b + c } }}} * * Alternatively, a function of N arguments can be lifted to a function of left-nested tuples. For example: {{{ val add3 = (_: Int) + (_: Int) + (_: Int) Codec.decode(uint8 ~ uint8 ~ uint8, bytes) map add3 }}} * * Similarly, a left nested tuple can be created with the `~` operator. This is useful when creating the tuple structure * to pass to encode. For example: {{{ (uint8 ~ uint8 ~ uint8).encode(1 ~ 2 ~ 3) }}} * * Note: this design is heavily based on Scala's parser combinator library and the syntax it provides. * * * === Cryptography Codecs === * * There are codecs that support working with encrypted data ([[encrypted]]), digital signatures and checksums * ([[fixedSizeSignature]] and [[variableSizeSignature]]). Additionally, support for `java.security.cert.Certificate`s * is provided by [[certificate]] and [[x509Certificate]]. * * @groupname bits Bits and Bytes Codecs * @groupprio bits 0 * * @groupname numbers Number Codecs * @groupprio numbers 1 * * @groupname values Miscellaneous Value Codecs * @groupprio values 2 * * @groupname combinators Combinators * @groupprio combinators 3 * * @groupname tuples Tuple Support * @groupprio tuples 3 * * @groupname crypto Cryptography * @groupprio crypto 4 */ package object codecs { /** * Encodes by returning supplied bit vector; decodes by taking all remaining bits in the supplied bit vector. * @group bits */ def bits: Codec[BitVector] = BitVectorCodec.withToString("bits") /** * Encodes by returning the supplied bit vector if its length is `size` bits, otherwise returning error; * decodes by taking `size` bits from the supplied bit vector. * * @param size number of bits to encode/decode * @group bits */ def bits(size: Long): Codec[BitVector] = new Codec[BitVector] { private val codec = fixedSizeBits(size, BitVectorCodec) def encode(b: BitVector) = codec.encode(b) def decode(b: BitVector) = codec.decode(b) override def toString = s"bits($size)" } /** * Encodes by returning supplied byte vector as a bit vector; decodes by taking all remaining bits in supplied bit vector and converting to a byte vector. * @group bits */ def bytes: Codec[ByteVector] = bits.xmap[ByteVector](_.toByteVector, _.toBitVector).withToString("bytes") /** * Encodes by returning the supplied byte vector if its length is `size` bytes, otherwise returning error; * decodes by taking `size * 8` bits from the supplied bit vector and converting to a byte vector. * * @param size number of bits to encode/decode * @group bits */ def bytes(size: Int): Codec[ByteVector] = new Codec[ByteVector] { private val codec = fixedSizeBytes(size, BitVectorCodec).xmap[ByteVector](_.toByteVector, _.toBitVector) def encode(b: ByteVector) = codec.encode(b) def decode(b: BitVector) = codec.decode(b) override def toString = s"bytes($size)" } /** * Codec for 8-bit 2s complement bytes. * @group numbers */ val byte: Codec[Byte] = new ByteCodec(8, true) /** * Codec for 8-bit unsigned bytes. * @group numbers */ val ushort8: Codec[Short] = new ShortCodec(8, true, ByteOrdering.BigEndian) /** * Codec for 16-bit 2s complement big-endian shorts. * @group numbers */ val short16: Codec[Short] = new ShortCodec(16, true, ByteOrdering.BigEndian) /** * Codec for 8-bit 2s complement big-endian integers. * @group numbers */ val int8: Codec[Int] = new IntCodec(8, true, ByteOrdering.BigEndian) /** * Codec for 16-bit 2s complement big-endian integers. * @group numbers */ val int16: Codec[Int] = new IntCodec(16, true, ByteOrdering.BigEndian) /** * Codec for 24-bit 2s complement big-endian integers. * @group numbers */ val int24: Codec[Int] = new IntCodec(24, true, ByteOrdering.BigEndian) /** * Codec for 32-bit 2s complement big-endian integers. * @group numbers */ val int32: Codec[Int] = new IntCodec(32, true, ByteOrdering.BigEndian) /** * Codec for 64-bit 2s complement big-endian integers. * @group numbers */ val int64: Codec[Long] = new LongCodec(64, true, ByteOrdering.BigEndian) /** * Codec for 2-bit unsigned big-endian integers. * @group numbers */ val uint2: Codec[Int] = new IntCodec(2, false, ByteOrdering.BigEndian) /** * Codec for 4-bit unsigned big-endian integers. * @group numbers */ val uint4: Codec[Int] = new IntCodec(4, false, ByteOrdering.BigEndian) /** * Codec for 8-bit unsigned big-endian integers. * @group numbers */ val uint8: Codec[Int] = new IntCodec(8, false, ByteOrdering.BigEndian) /** * Codec for 16-bit unsigned big-endian integers. * @group numbers */ val uint16: Codec[Int] = new IntCodec(16, false, ByteOrdering.BigEndian) /** * Codec for 24-bit unsigned big-endian integers. * @group numbers */ val uint24: Codec[Int] = new IntCodec(24, false, ByteOrdering.BigEndian) /** * Codec for 32-bit unsigned big-endian integers. * @group numbers */ val uint32: Codec[Long] = new LongCodec(32, false, ByteOrdering.BigEndian) /** * Codec for 16-bit 2s complement little-endian shorts. * @group numbers */ val short16L: Codec[Short] = new ShortCodec(16, true, ByteOrdering.LittleEndian) /** * Codec for 8-bit 2s complement little-endian integers. * @group numbers */ val int8L: Codec[Int] = new IntCodec(8, true, ByteOrdering.LittleEndian) /** * Codec for 16-bit 2s complement little-endian integers. * @group numbers */ val int16L: Codec[Int] = new IntCodec(16, true, ByteOrdering.LittleEndian) /** * Codec for 24-bit 2s complement little-endian integers. * @group numbers */ val int24L: Codec[Int] = new IntCodec(24, true, ByteOrdering.LittleEndian) /** * Codec for 32-bit 2s complement little-endian integers. * @group numbers */ val int32L: Codec[Int] = new IntCodec(32, true, ByteOrdering.LittleEndian) /** * Codec for 64-bit 2s complement little-endian integers. * @group numbers */ val int64L: Codec[Long] = new LongCodec(64, true, ByteOrdering.LittleEndian) /** * Codec for 2-bit unsigned little-endian integers. * @group numbers */ val uint2L: Codec[Int] = new IntCodec(2, false, ByteOrdering.LittleEndian) /** * Codec for 4-bit unsigned little-endian integers. * @group numbers */ val uint4L: Codec[Int] = new IntCodec(4, false, ByteOrdering.LittleEndian) /** * Codec for 8-bit unsigned little-endian integers. * @group numbers */ val uint8L: Codec[Int] = new IntCodec(8, false, ByteOrdering.LittleEndian) /** * Codec for 16-bit unsigned little-endian integers. * @group numbers */ val uint16L: Codec[Int] = new IntCodec(16, false, ByteOrdering.LittleEndian) /** * Codec for 24-bit unsigned little-endian integers. * @group numbers */ val uint24L: Codec[Int] = new IntCodec(24, false, ByteOrdering.LittleEndian) /** * Codec for 32-bit unsigned little-endian integers. * @group numbers */ val uint32L: Codec[Long] = new LongCodec(32, false, ByteOrdering.LittleEndian) /** * Codec for n-bit 2s complement bytes. * @param size number of bits (must be 0 < size <= 8) * @group numbers */ def byte(size: Int): Codec[Byte] = new ByteCodec(size, true) /** * Codec for n-bit unsigned bytes. * @param size number of bits (must be 0 < size <= 7) * @group numbers */ def ubyte(size: Int): Codec[Byte] = new ByteCodec(size, false) /** * Codec for n-bit 2s complement big-endian shorts. * @param size number of bits (must be 0 < size <= 16) * @group numbers */ def short(size: Int): Codec[Short] = new ShortCodec(size, true, ByteOrdering.BigEndian) /** * Codec for n-bit unsigned big-endian shorts. * @param size number of bits (must be 0 < size <= 15) * @group numbers */ def ushort(size: Int): Codec[Short] = new ShortCodec(size, false, ByteOrdering.BigEndian) /** * Codec for n-bit 2s complement big-endian integers that are represented with `Int`. * @param size number of bits (must be 0 < size <= 32) * @group numbers */ def int(size: Int): Codec[Int] = new IntCodec(size, true, ByteOrdering.BigEndian) /** * Codec for n-bit unsigned big-endian integers that are represented with `Int`. * @param bits number of bits (must be 0 < size <= 31) * @group numbers */ def uint(bits: Int): Codec[Int] = new IntCodec(bits, false, ByteOrdering.BigEndian) /** * Codec for n-bit 2s complement big-endian integers that are represented with `Long`. * @param bits number of bits (must be 0 < size <= 64) * @group numbers */ def long(bits: Int): Codec[Long] = new LongCodec(bits, true, ByteOrdering.BigEndian) /** * Codec for n-bit unsigned big-endian integers that are represented with `Long`. * @param bits number of bits (must be 0 < size <= 63) * @group numbers */ def ulong(bits: Int): Codec[Long] = new LongCodec(bits, false, ByteOrdering.BigEndian) /** * Codec for n-bit 2s complement little-endian shorts. * @param size number of bits (must be 0 < size <= 16) * @group numbers */ def shortL(size: Int): Codec[Short] = new ShortCodec(size, true, ByteOrdering.LittleEndian) /** * Codec for n-bit unsigned little-endian shorts. * @param size number of bits (must be 0 < size <= 15) * @group numbers */ def ushortL(size: Int): Codec[Short] = new ShortCodec(size, false, ByteOrdering.LittleEndian) /** * Codec for n-bit 2s complement little-endian integers that are represented with `Int`. * @param bits number of bits (must be 0 < size <= 32) * @group numbers */ def intL(bits: Int): Codec[Int] = new IntCodec(bits, true, ByteOrdering.LittleEndian) /** * Codec for n-bit unsigned little-endian integers that are represented with `Int`. * @param bits number of bits (must be 0 < size <= 31) * @group numbers */ def uintL(bits: Int): Codec[Int] = new IntCodec(bits, false, ByteOrdering.LittleEndian) /** * Codec for n-bit 2s complement little-endian integers that are represented with `Long`. * @param bits number of bits (must be 0 < size <= 64) * @group numbers */ def longL(bits: Int): Codec[Long] = new LongCodec(bits, true, ByteOrdering.LittleEndian) /** * Codec for n-bit unsigned little-endian integers that are represented with `Long`. * @param bits number of bits (must be 0 < size <= 63) * @group numbers */ def ulongL(bits: Int): Codec[Long] = new LongCodec(bits, false, ByteOrdering.LittleEndian) /** * 32-bit big endian IEEE 754 floating point number. * @group numbers */ val float: Codec[Float] = new FloatCodec(ByteOrdering.BigEndian) /** * 32-bit little endian IEEE 754 floating point number. * @group numbers */ val floatL: Codec[Float] = new FloatCodec(ByteOrdering.LittleEndian) /** * 64-bit big endian IEEE 754 floating point number. * @group numbers */ val double: Codec[Double] = new DoubleCodec(ByteOrdering.BigEndian) /** * 64-bit little endian IEEE 754 floating point number. * @group numbers */ val doubleL: Codec[Double] = new DoubleCodec(ByteOrdering.LittleEndian) /** * 1-bit boolean codec, where false corresponds to bit value 0 and true corresponds to bit value 1. * @group values */ val bool: Codec[Boolean] = BooleanCodec /** * n-bit boolean codec, where false corresponds to bit vector of all 0s and true corresponds to all other vectors. * @group values */ def bool(n: Long): Codec[Boolean] = new Codec[Boolean] { private val zeros = BitVector.low(n) private val ones = BitVector.high(n) private val codec = bits(n).xmap[Boolean](bits => !(bits == zeros), b => if (b) ones else zeros) def encode(b: Boolean) = codec.encode(b) def decode(b: BitVector) = codec.decode(b) override def toString = "bool($n)" } /** * String codec that utilizes the implicit `Charset` to perform encoding/decoding. * * This codec does not encode the size of the string in to the output. Hence, decoding * a vector that has additional data after the encoded string will result in * unexpected output. Instead, it is common to use this codec along with either * [[fixedSizeBits]] or [[variableSizeBits]]. For example, a common encoding * is a size field, say 2 bytes, followed by the encoded string. This can be * accomplished with: {{{variableSizeBits(uint8, string)}}} * * @param charset charset to use to convert strings to/from binary * @group values */ def string(implicit charset: Charset): Codec[String] = new StringCodec(charset) /** * String codec that uses the `US-ASCII` charset. See [[string]] for more information on `String` codecs. * @group values */ val ascii = string(Charset.forName("US-ASCII")) /** * String codec that uses the `US-ASCII` charset. See [[string]] for more information on `String` codecs. * @group values */ val utf8 = string(Charset.forName("UTF-8")) /** * Encodes/decodes `UUID`s as 2 64-bit big-endian longs, first the high 64-bits then the low 64-bits. * @group values */ val uuid: Codec[UUID] = UuidCodec /** * Codec that always returns an empty vector from `encode` and always returns `(empty, value)` from `decode`. * This is often useful when combined with other codecs (e.g., the [[discriminated]]). * @param value value to return from decode * @group combinators */ def provide[A](value: A): Codec[A] = new ProvideCodec(value) /** * Codec that always encodes `size` 0 bits and always decodes `size` bits and then discards them, returning `()` instead. * @param size number of bits to ignore * @group bits */ def ignore(size: Long): Codec[Unit] = new IgnoreCodec(size) /** * Codec that always encodes the specified bits and always decodes the specified bits, returning `()` if the actual bits match * the specified bits and returning an error otherwise. * @param bits constant bits * @group bits */ def constant(bits: BitVector): Codec[Unit] = new ConstantCodec(bits) /** * Codec that always encodes the specified bytes and always decodes the specified bytes, returning `()` if the actual bytes match * the specified bytes and returning an error otherwise. * @param bytes constant bytes * @group bits */ def constant(bytes: ByteVector): Codec[Unit] = constant(bytes.bits) /** * Codec that always encodes the specified bits and always decodes the specified bits, returning `()` if the actual bits match * the specified bits and returning an error otherwise. * @param bits constant bits * @group bits */ def constant[A: Integral](bits: A*): Codec[Unit] = constant(BitVector(bits: _*)) /** * Codec that always encodes the specified bits and always decodes n bits, returning `()`, where n is the length of the * specified bits. * @param bits constant bits * @group bits */ def constantLenient(bits: BitVector): Codec[Unit] = new ConstantCodec(bits, false) /** * Codec that always encodes the specified bytes and always decodes n bytes, returning `()`, where n is the length of the * specified bytes. * @param bytes constant bytes * @group bits */ def constantLenient(bytes: ByteVector): Codec[Unit] = constantLenient(bytes.bits) /** * Codec that always encodes the specified bits and always decodes n bits, returning `()`, where n is the length of the * specified bits. * @param bits constant bits * @group bits */ def constantLenient[A: Integral](bits: A*): Codec[Unit] = constantLenient(BitVector(bits: _*)) /** * Provides implicit conversions from literal types to constant codecs. * * For example, with `literals._` imported, `constant(0x47) ~> uint8` * can be written as `0x47 ~> uint8`. * * Supports literal bytes, ints, `BitVector`s, and `ByteVector`s. * * @group bits */ object literals { implicit def constantIntCodec(a: Int): Codec[Unit] = constant(a) implicit def constantByteVectorCodec(a: ByteVector): Codec[Unit] = constant(a) implicit def constantBitVectorCodec(a: BitVector): Codec[Unit] = constant(a) } /** * Codec that limits the number of bits the specified codec works with. * * When encoding, if encoding with the specified codec * results in less than the specified size, the vector is right padded with 0 bits. If the result is larger than the specified * size, an encoding error is returned. * * When decoding, the specified codec is only given `size` bits. If the specified codec does not consume all the bits it was * given, any remaining bits are discarded. * * @param size number of bits * @param codec codec to limit * @group combinators */ def fixedSizeBits[A](size: Long, codec: Codec[A]): Codec[A] = new FixedSizeCodec(size, codec) /** * Byte equivalent of [[fixedSizeBits]]. * @param size number of bytes * @param codec codec to limit * @group combinators */ def fixedSizeBytes[A](size: Long, codec: Codec[A]): Codec[A] = new Codec[A] { private val fcodec = fixedSizeBits(size * 8, codec) def encode(a: A) = fcodec.encode(a) def decode(b: BitVector) = fcodec.decode(b) override def toString = s"fixedSizeBytes($size, $codec)" } /** * Codec that limits the number of bits the specified codec works with. * * If the encoded result is larger than the specified * size, an encoding error is returned. * * If encoding with the specified codec * results in less than the specified size, the vector is right padded by repeatedly encoding with padCodec. * An encoding error is returned if the padCodec result does not precisely fill the remaining space. * * When decoding, the specified codec is only given `size` bits. If the specified codec does not consume all the bits it was * given, all remaining bits are repeatedly decoded by padCodec. A decoding error is returned if any * padCodec decode returns an error. * * @param size number of bits * @param codec codec to limit * @param padCodec codec to pad * @group combinators */ def paddedFixedSizeBits[A](size: Long, codec: Codec[A], padCodec:Codec[Unit]): Codec[A] = new PaddedFixedSizeCodec(size, codec, padCodec) /** * Byte equivalent of [[paddedFixedSizeBits]]. * @param size number of bytes * @param codec codec to limit * @param padCodec codec to fill * @group combinators */ def paddedFixedSizeBytes[A](size: Long, codec: Codec[A], padCodec:Codec[Unit]): Codec[A] = new Codec[A] { private val fcodec = paddedFixedSizeBits(size * 8, codec, padCodec) def encode(a: A) = fcodec.encode(a) def decode(b: BitVector) = fcodec.decode(b) override def toString = s"paddedFixedSizeBytes($size, $codec, $padCodec)" } /** * Codec that supports vectors of the form `size ++ value` where the `size` field decodes to the bit length of the `value` field. * * For example, encoding the string `"hello"` with `variableSizeBits(uint8, ascii)` yields a vector of 6 bytes -- the first byte being * 0x05 and the next 5 bytes being the US-ASCII encoding of `"hello"`. * * The `size` field can be any `Int` codec. An optional padding can be applied to the size field. The `sizePadding` is added to * the calculated size before encoding, and subtracted from the decoded size before decoding the value. * * For example, encoding `"hello"` with `variableSizeBits(uint8, ascii, 1)` yields a vector of 6 bytes -- the first byte being * 0x06 and the next 5 bytes being the US-ASCII encoding of `"hello"`. * * @param size codec that encodes/decodes the size in bits * @param value codec the encodes/decodes the value * @param sizePadding number of bits to add to the size before encoding (and subtract from the size before decoding) * @group combinators */ def variableSizeBits[A](size: Codec[Int], value: Codec[A], sizePadding: Int = 0): Codec[A] = new VariableSizeCodec(size, value, sizePadding) /** * Byte equivalent of [[variableSizeBits]]. * @param size codec that encodes/decodes the size in bytes * @param value codec the encodes/decodes the value * @param sizePadding number of bytes to add to the size before encoding (and subtract from the size before decoding) * @group combinators */ def variableSizeBytes[A](size: Codec[Int], value: Codec[A], sizePadding: Int = 0): Codec[A] = new Codec[A] { private val codec = variableSizeBits(size.xmap[Int](_ * 8, _ / 8), value, sizePadding * 8) def encode(a: A) = codec.encode(a) def decode(b: BitVector) = codec.decode(b) override def toString = s"variableSizeBytes($size, $value)" } /** * Codec of `Option[A]` that delegates to a `Codec[A]` when the `included` parameter is true. * * When encoding, if `included` is true and the value to encode is a `Some`, the specified codec is used to encode the inner value. * Otherwise, an empty bit vector is returned. * * When decoding, if `included` is true, the specified codec is used and its result is wrapped in a `Some`. Otherwise, a `None` is returned. * * @param included whether this codec is enabled (meaning it delegates to the specified codec) or disabled, in which case it * encodes no bits and returns `None` from decode * @param codec codec to conditionally include * @group combinators */ def conditional[A](included: Boolean, codec: Codec[A]): Codec[Option[A]] = new ConditionalCodec(included, codec) /** * Codec of `Option[A]` that delegates to a `Codec[A]` when the `guard` codec decodes a true. * * When encoding, a `Some` results in `guard` encoding a `true` and `target` encoding the value. * A `None` results in `guard` encoding a false and the `target` not encoding anything. * * @param guard codec that determines whether the target codec is included * @param target codec to conditionally include * @group combinators */ def optional[A](guard: Codec[Boolean], target: Codec[A]): Codec[Option[A]] = either(guard, provide(()), target). xmap[Option[A]](_.toOption, _.toRightDisjunction(())). withToString(s"optional($guard, $target)") /** * Creates a `Codec[A]` from a `Codec[Option[A]]` and a fallback `Codec[A]`. * * When encoding, the `A` is encoded with `opt` (by wrapping it in a `Some`). * When decoding, `opt` is first used to decode the buffer. If it decodes a `Some(a)`, that * value is returned. If it decodes a `None`, `default` is used to decode the buffer. * * @param opt optional codec * @param default fallback codec used during decoding when `opt` decodes a `None` * @group combinators */ def withDefault[A](opt: Codec[Option[A]], default: Codec[A]): Codec[A] = { val paired = opt flatZip { case Some(a) => provide(a) case None => default } paired.xmap[A](_._2, a => (Some(a), a)).withToString(s"withDefault($opt, $default)") } /** * Creates a `Codec[A]` from a `Codec[Option[A]]` and a fallback value `A`. * * When encoding, the `A` is encoded with `opt` (by wrapping it in a `Some`). * When decoding, `opt` is first used to decode the buffer. If it decodes a `Some(a)`, that * value is returned. If it decodes a `None`, the `default` value is return. * * @param opt optional codec * @param default fallback value returned from `decode` when `opt` decodes a `None` * @group combinators */ def withDefaultValue[A](opt: Codec[Option[A]], default: A): Codec[A] = withDefault(opt, provide(default)) /** * Creates a codec that decodes true when the target codec decodes successfully and decodes false * when the target codec decodes unsuccessfully. Upon a successful decode of the target codec, the * remaining bits are returned, whereas upon an unsuccessful decode, the original input buffer is * returned. * * When encoding, a true results in the target codec encoding a unit whereas a false results * in encoding of an empty vector. * * @param target codec to recover errors from * @group combinators */ def recover(target: Codec[Unit]): Codec[Boolean] = new RecoverCodec(target, false) /** * Lookahead version of [[recover]] -- i.e., upon successful decoding with the target codec, * the original buffer is returned instead of the remaining buffer. * * @param target codec to recover errors from * @group combinators */ def lookahead(target: Codec[Unit]): Codec[Boolean] = new RecoverCodec(target, true) /** * Codec that encodes/decodes using the specified codecs by trying each codec in succession * and using the first successful result. * * @group combinators */ def choice[A](codecs: Codec[A]*): Codec[A] = Codec( Encoder.choiceEncoder(codecs: _*), Decoder.choiceDecoder(codecs: _*) ).withToString(codecs.mkString("choice(", ", ", ")")) /** * Codec that encodes/decodes an immutable `IndexedSeq[A]` from a `Codec[A]`. * * When encoding, each `A` in the sequence is encoded and all of the resulting vectors are concatenated. * * When decoding, `codec.decode` is called repeatedly until there are no more remaining bits and the value result * of each `decode` is returned in the sequence. * * @param codec codec to encode/decode a single element of the sequence * @group combinators */ @deprecated("Use vector codec or list codec instead.", "1.2.1") def repeated[A](codec: Codec[A]): Codec[collection.immutable.IndexedSeq[A]] = new VectorCodec(codec).xmap[collection.immutable.IndexedSeq[A]](identity, _.toVector) /** * Codec that encodes/decodes a `Vector[A]` from a `Codec[A]`. * * When encoding, each `A` in the vector is encoded and all of the resulting vectors are concatenated. * * When decoding, `codec.decode` is called repeatedly until there are no more remaining bits and the value result * of each `decode` is returned in the vector. * * @param codec codec to encode/decode a single element of the sequence * @group combinators */ def vector[A](codec: Codec[A]): Codec[Vector[A]] = new VectorCodec(codec) /** * Codec that encodes/decodes a `Vector[A]` of `N` elements using a `Codec[A]`. * * When encoding, the number of elements in the vector is encoded using `countCodec` * and the values are then each encoded using `valueCodec`. * * When decoding, the number of elements is decoded using `countCodec` and then that number of elements * are decoded using `valueCodec`. Any remaining bits are returned. * * Note: when the count is known statically, use `vectorOfN(provide(count), ...)`. * * @param codec codec to encode/decode a single element of the sequence * @group combinators */ def vectorOfN[A](countCodec: Codec[Int], valueCodec: Codec[A]): Codec[Vector[A]] = countCodec. flatZip { count => new VectorCodec(valueCodec, Some(count)) }. xmap[Vector[A]]({ case (cnt, vec) => vec }, vec => (vec.size, vec)). withToString(s"vectorOfN($countCodec, $valueCodec)") /** * Codec that encodes/decodes a `List[A]` from a `Codec[A]`. * * When encoding, each `A` in the list is encoded and all of the resulting vectors are concatenated. * * When decoding, `codec.decode` is called repeatedly until there are no more remaining bits and the value result * of each `decode` is returned in the list. * * @param codec codec to encode/decode a single element of the sequence * @group combinators */ def list[A](codec: Codec[A]): Codec[List[A]] = new ListCodec(codec) /** * Codec that encodes/decodes a `List[A]` of `N` elements using a `Codec[A]`. * * When encoding, the number of elements in the list is encoded using `countCodec` * and the values are then each encoded using `valueCodec`. * * When decoding, the number of elements is decoded using `countCodec` and then that number of elements * are decoded using `valueCodec`. Any remaining bits are returned. * * Note: when the count is known statically, use `listOfN(provide(count), ...)`. * * @param codec codec to encode/decode a single element of the sequence * @group combinators */ def listOfN[A](countCodec: Codec[Int], valueCodec: Codec[A]): Codec[List[A]] = countCodec. flatZip { count => new ListCodec(valueCodec, Some(count)) }. xmap[List[A]]({ case (cnt, xs) => xs }, xs => (xs.size, xs)). withToString(s"listOfN($countCodec, $valueCodec)") /** * Combinator that chooses amongst two codecs based on an implicitly available byte ordering. * @param big codec to use when big endian * @param little codec to use when little endian * @group combinators */ def endiannessDependent[A](big: Codec[A], little: Codec[A])(implicit ordering: ByteOrdering): Codec[A] = ordering match { case ByteOrdering.BigEndian => big case ByteOrdering.LittleEndian => little } /** * Disjunction codec that supports vectors of form `indicator ++ (left or right)` where a * value of `false` for the indicator indicates it is followed by a left value and a value * of `true` indicates it is followed by a right value. * @param indicator codec that encodes/decodes false for left and true for right * @param left codec the encodes a left value * @param right codec the encodes a right value * @group combinators */ def either[L, R](indicator: Codec[Boolean], left: Codec[L], right: Codec[R]): Codec[L \\/ R] = discriminated[L \\/ R].by(indicator) .| (false) { case -\\/(l) => l } (\\/.left) (left) .| (true) { case \\/-(r) => r } (\\/.right) (right) /** * Like [[either]], but encodes the standard library `Either` type. * @param indicator codec that encodes/decodes false for left and true for right * @param left codec the encodes a left value * @param right codec the encodes a right value * @group combinators */ def stdEither[L, R](indicator: Codec[Boolean], left: Codec[L], right: Codec[R]): Codec[Either[L,R]] = discriminated[Either[L,R]].by(indicator) .| (false) { case Left(l) => l } (Left.apply) (left) .| (true) { case Right(r) => r } (Right.apply) (right) /** * Provides a `Codec[A]` that delegates to a lazily evaluated `Codec[A]`. * @group combinators */ def lazily[A](codec: => Codec[A]): Codec[A] = new Codec[A] { private lazy val c = codec def encode(a: A) = c.encode(a) def decode(b: BitVector) = c.decode(b) override def toString = s"lazily($c)" } /** * Codec that always fails encoding and decoding with the specified message. * * @group combinators */ def fail[A](err: Err): Codec[A] = fail(err, err) /** * Codec that always fails encoding and decoding with the specified messages. * * @group combinators */ def fail[A](encErr: Err, decErr: Err): Codec[A] = new FailCodec[A](encErr, decErr) /** * Codec that encrypts and decrypts using a `javax.crypto.Cipher`. * * Encoding a value of type `A` is delegated to the specified codec and the resulting bit vector is encrypted * with a cipher provided by the implicit [[CipherFactory]]. * * Decoding first decrypts all of the remaining bits and then decodes the decrypted bits with the * specified codec. Successful decoding always returns no remaining bits, even if the specified * codec does not consume all decrypted bits. * * @param codec codec that encodes a value to plaintext bits and decodes plaintext bits to a value * @param cipherFactory factory to use for encryption/decryption * @group crypto */ def encrypted[A](codec: Codec[A])(implicit cipherFactory: CipherFactory): Codec[A] = new CipherCodec(codec)(cipherFactory) /** * Codec that includes a signature of the encoded bits. * * Encoding a value of type `A` is delegated to the specified codec and then a signature of those bits is * appended using the specified [[SignatureFactory]] to perform signing. * * Decoding first decodes using the specified codec and then all of the remaining bits are treated as * the signature of the decoded bits. The signature is verified and if it fails to verify, an error * is returned. * * Note: because decoding is first delegated to the specified code, care must be taken to ensure * that codec does not consume the signature bits. For example, if the target codec is an unbounded * string (e.g., ascii, utf8), decoding an encoded vector will result in the string codec trying to * decode the signature bits as part of the string. * * Use [[SignatureFactory]] or [[ChecksumFactory]] to create a [[SignerFactory]]. * * @param size size in bytes of signature * @param codec codec to use to encode/decode value field * @param signatureFactory factory to use for signing/verifying * @group crypto */ def fixedSizeSignature[A](size: Int)(codec: Codec[A])(implicit signerFactory: SignerFactory): Codec[A] = new SignatureCodec(codec, fixedSizeBytes(size, BitVectorCodec))(signerFactory) /** * Codec that includes a signature of the encoded bits. * * Same functionality as [[fixedSizeSignature]] with one difference -- the size of the signature bytes are * written between the encoded bits and the signature bits. * * Use [[SignatureFactory]] or [[ChecksumFactory]] to create a [[SignerFactory]]. * * @param size codec to use to encode/decode size of signature field * @param codec codec to use to encode/decode value field * @param signatureFactory factory to use for signing/verifying * @group crypto */ def variableSizeSignature[A](size: Codec[Int])(codec: Codec[A])(implicit signerFactory: SignerFactory): Codec[A] = new SignatureCodec(codec, variableSizeBytes(size, BitVectorCodec))(signerFactory) /** * Codec that encodes/decodes certificates using their default encoding. * * @param certType certificate type to pass to `java.security.cert.CertificateFactory.getInstance` * @group crypto */ def certificate(certType: String): Codec[Certificate] = new CertificateCodec(certType) /** * Codec that encodes/decodes certificates using their default encoding. * * @group crypto */ val x509Certificate: Codec[X509Certificate] = certificate("X.509"). xmap[X509Certificate](_.asInstanceOf[X509Certificate], identity). withToString("x509certificate") /** * Provides the `|` method on `String` that allows creation of a named codec. * * Usage: {{{val codec = "id" | uint8}}} * * @group combinators */ final implicit class StringEnrichedWithCodecNamingSupport(val name: String) extends AnyVal { /** Names the specified codec, resulting in the name being included in error messages. */ def |[A](codec: Codec[A]): Codec[A] = new NamedCodec(name, codec) } // Tuple codec syntax /** * Type alias for Tuple2 in order to allow left nested tuples to be written as A ~ B ~ C ~ .... * @group tuples */ final type ~[+A, +B] = (A, B) /** * Extractor that allows pattern matching on the tuples created by tupling codecs. * @group tuples */ object ~ { def unapply[A, B](t: (A, B)): Option[(A, B)] = Some(t) } /** * Allows creation of left nested pairs by successive usage of `~` operator. * @group tuples */ final implicit class ValueEnrichedWithTuplingSupport[A](val a: A) { def ~[B](b: B): (A, B) = (a, b) } /** * Allows use of a 2-arg function as a single arg function that takes a left-associated stack of pairs with 2 total elements. * @group tuples */ final implicit def liftF2ToNestedTupleF[A, B, X](fn: (A, B) => X): ((A, B)) => X = fn.tupled /** * Allows use of a 3-arg function as a single arg function that takes a left-associated stack of pairs with 3 total elements. * @group tuples */ final implicit def liftF3ToNestedTupleF[A, B, C, X](fn: (A, B, C) => X): (((A, B), C)) => X = { case a ~ b ~ c => fn(a, b, c) } /** * Allows use of a 4-arg function as a single arg function that takes a left-associated stack of pairs with 4 total elements. * @group tuples */ final implicit def liftF4ToNestedTupleF[A, B, C, D, X](fn: (A, B, C, D) => X): ((((A, B), C), D)) => X = { case a ~ b ~ c ~ d => fn(a, b, c, d) } /** * Allows use of a 5-arg function as a single arg function that takes a left-associated stack of pairs with 5 total elements. * @group tuples */ final implicit def liftF5ToNestedTupleF[A, B, C, D, E, X](fn: (A, B, C, D, E) => X): (((((A, B), C), D), E)) => X = { case a ~ b ~ c ~ d ~ e => fn(a, b, c, d, e) } /** * Allows use of a 6-arg function as a single arg function that takes a left-associated stack of pairs with 6 total elements. * @group tuples */ final implicit def liftF6ToNestedTupleF[A, B, C, D, E, F, X](fn: (A, B, C, D, E, F) => X): ((((((A, B), C), D), E), F)) => X = { case a ~ b ~ c ~ d ~ e ~ f => fn(a, b, c, d, e, f) } /** * Allows use of a 7-arg function as a single arg function that takes a left-associated stack of pairs with 7 total elements. * @group tuples */ final implicit def liftF7ToNestedTupleF[A, B, C, D, E, F, G, X](fn: (A, B, C, D, E, F, G) => X): (((((((A, B), C), D), E), F), G)) => X = { case a ~ b ~ c ~ d ~ e ~ f ~ g => fn(a, b, c, d, e, f, g) } /** * Allows use of an 8-arg function as a single arg function that takes a left-associated stack of pairs with 8 total elements. * @group tuples */ final implicit def liftF8ToNestedTupleF[A, B, C, D, E, F, G, H, X](fn: (A, B, C, D, E, F, G, H) => X): ((((((((A, B), C), D), E), F), G), H)) => X = { case a ~ b ~ c ~ d ~ e ~ f ~ g ~ h => fn(a, b, c, d, e, f, g, h) } // DiscriminatorCodec syntax /** * Provides syntax for building a [[DiscriminatorCodec]]. * * Usage: {{{ val codecA: Codec[A] = ... val codecB: Codec[B] = ... val codecE: Codec[Either[A,B]] = discriminated[Either[A,B]].by(uint8) .| (0) { case Left(l) => l } (Left.apply) (codecA) .| (1) { case Right(r) => r } (Right.apply) (codecB) .build }}} This encodes an `Either[A,B]` by checking the given patterns in sequence from top to bottom. For the first pattern that matches, it emits the corresponding discriminator value: `0` for `Left` and `1` for `Right`, encoded via the `uint8` codec. It then emits either an encoded `A`, encoded using `codecA`, or an encoded `B`, using `codecB`. Decoding is the mirror of this; the returned `codecE` will first read an `Int`, using the `uint8` codec. If it is a `0`, it then runs `codecA`, and injects the result into `Either` via `Left.apply`. If it is a `1`, it runs `codecB` and injects the result into `Either` via `Right.apply`. There are a few variations on this syntax, depending on whether you have a `PartialFunction` from the base type or an `B => Option[S]` function from the base type to the subcase. If you you already have a codec specific to the case, you can omit the 'injection' function. For instance: {{{ val leftCodec: Codec[Left[A,B]] = codecA.pxmap(Left.apply, Left.unapply) val rightCodec: Codec[Right[A,B]] = codecB.pxmap(Left.apply, Left.unapply) val codecE: Codec[Either[A,B]] = discriminated[Either[A,B]].by(uint8) .\\ (0) { case l@Left(_) => l } (leftCodec) // backslash instead of '|' .\\ (1) { case r@Right(_) => r } (rightCodec) }}} The actual formatted bits are identical with either formulation. * @group combinators */ final def discriminated[A]: NeedDiscriminatorCodec[A] = new NeedDiscriminatorCodec[A] { final def by[B](discriminatorCodec: Codec[B]): DiscriminatorCodec[A, B] = new DiscriminatorCodec[A, B](discriminatorCodec, Vector()) } /** * Provides a codec for an enumerated set of values, where each enumerated value is * mapped to a tag. * * @param discriminatorCodec codec used to encode/decode tag value * @param mappings mapping from tag values to/from enum values * @group combinators */ final def mappedEnum[A, B](discriminatorCodec: Codec[B], mappings: (A, B)*): Codec[A] = mappedEnum(discriminatorCodec, mappings.toMap) /** * Provides a codec for an enumerated set of values, where each enumerated value is * mapped to a tag. * * @param discriminatorCodec codec used to encode/decode tag value * @param map mapping from tag values to/from enum values * @group combinators */ final def mappedEnum[A, B](discriminatorCodec: Codec[B], map: Map[A, B]): Codec[A] = { map.foldLeft(discriminated[A].by(discriminatorCodec)) { case (acc, (value, tag)) => acc.subcaseO(tag)(a => if (a == value) Some(a) else None)(provide(value)) } } }
danielwegener/scodec
src/main/scala/scodec/codecs/package.scala
Scala
bsd-3-clause
44,244
package ildl.plugin package transform package bridge import scala.tools.nsc.transform.InfoTransform trait BridgeInfoTransformer extends InfoTransform { self: BridgeComponent => import global._ import helper._ override def transformInfo(sym: Symbol, tpe: Type): Type = { // make sure description objects do not nest if (currentRun.compiles(sym) && sym.isTransfDescriptionObject) { val enclosing = sym.ownerChain.find(s => (s != sym) && (s.isTransfDescriptionObject)) if (enclosing.isDefined) { global.reporter.error(sym.pos, s"The ${sym.name} transformation description object is nested inside the " + s"${enclosing.get.name} transformation description object, a construction which " + s"is illegal (al least for now).") } } if (tpe.finalResultType.hasHighAnnot) { // Match exterior description object val includedDescr = tpe.finalResultType.getAnnotation(ildlHighClass).get.args.headOption val enclosingDescr = sym.ownerChain.find(s => s.isTransfDescriptionObject) (includedDescr, enclosingDescr) match { case (_, None) => global.reporter.error(sym.pos, s"The ${sym} contains the @high annotation despite not being enclosed in a " + s"transformation description object. This is an invalid use of the @high " + s"annotation.") tpe.withoutHighAnnot case (Some(descr), _) => transformHighAnnotation(sym, tpe, gen.mkAttributedRef(descr.symbol)) case (_, Some(descr)) => transformHighAnnotation(sym, tpe, gen.mkAttributedRef(descr)) } } else tpe } def transformHighAnnotation(sym: Symbol, tpe: Type, descr: Tree): Type = { tpe match { case PolyType(targs, tpe) => PolyType(targs, transformHighAnnotation(sym, tpe, descr)) case MethodType(args, tpe) => MethodType(args, transformHighAnnotation(sym, tpe, descr)) case NullaryMethodType(tpe) => NullaryMethodType(transformHighAnnotation(sym, tpe, descr)) case _ if tpe.hasHighAnnot => val highTpe = getDescrHighType(descr.getDescrObject, tpe.withoutHighAnnot) if (highTpe != ErrorType) { highTpe.withReprAnnot(descr) } else { global.reporter.error(sym.pos, s"The ${descr.symbol.name} transformation description object contains a " + s"definition error: The @high annotation in $sym's type is applied to " + s"something that does not match the representation type. This is an error " + s"in the transformation description object definition.") tpe.withoutHighAnnot } case _ => tpe } } }
miniboxing/ildl-plugin
components/plugin/src/ildl/plugin/transform/bridge/BridgeInfoTransformer.scala
Scala
bsd-3-clause
2,871