code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package controllers
import play.api.data.Form
import play.api.libs.json.Json
import lila.api.Context
import lila.app._
import lila.common.{ Captcha, LilaCookie, HTTPRequest }
import lila.i18n.{ Translation, TransInfo }
import views._
object I18n extends LilaController {
private def env = Env.i18n
def select = OpenBody { implicit ctx =>
import play.api.data.Forms._
import play.api.data._
implicit val req = ctx.body
Form(single("lang" -> text.verifying(env.pool contains _))).bindFromRequest.fold(
_ => notFound,
lang => {
ctx.me.filterNot(_.lang contains lang) ?? { me =>
lila.user.UserRepo.setLang(me.id, lang)
}
} >> negotiate(
html = Redirect {
s"${Env.api.Net.Protocol}${lang}.${Env.api.Net.Domain}" + {
HTTPRequest.referer(ctx.req).fold(routes.Lobby.home.url) { str =>
try {
val pageUrl = new java.net.URL(str);
val path = pageUrl.getPath
val query = pageUrl.getQuery
if (query == null) path
else path + "?" + query
}
catch {
case e: java.net.MalformedURLException => routes.Lobby.home.url
}
}
}
}.fuccess,
api = _ => Ok(Json.obj("lang" -> lang)).fuccess)
)
}
def contribute = Open { implicit ctx =>
val mines = (ctx.req.acceptLanguages map env.transInfos.get).toList.flatten.distinct
Ok(html.i18n.contribute(env.transInfos.all, mines)).fuccess
}
def translationForm(lang: String) = Auth { implicit ctx =>
me =>
OptionFuOk(infoAndContext(lang)) {
case (info, context) => env.forms.translationWithCaptcha map {
case (form, captcha) => renderTranslationForm(form, info, captcha, context = context)
}
}
}
def translationPost(lang: String) = AuthBody { implicit ctx =>
me =>
OptionFuResult(infoAndContext(lang)) {
case (info, context) =>
implicit val req = ctx.body
val data = env.forms.decodeTranslationBody
FormFuResult(env.forms.translation) { form =>
env.forms.anyCaptcha map { captcha =>
renderTranslationForm(form, info, captcha, data = data, context = context)
}
} { metadata =>
env.forms.process(lang, metadata, data, me.username) inject {
Redirect(routes.I18n.contribute).flashing("success" -> "1") withCookies
LilaCookie.cookie(env.hideCallsCookieName, "1", maxAge = Some(60 * 24))
}
}
}
}
private def infoAndContext(lang: String) = env.transInfos.get(lang) ?? { i =>
env.context.get map (i -> _) map (_.some)
}
private def renderTranslationForm(
form: Form[_],
info: TransInfo,
captcha: Captcha,
context: Map[String, String],
data: Map[String, String] = Map.empty)(implicit ctx: Context) =
html.i18n.translationForm(
info,
form,
env.keys,
env.pool.default,
env.translator.rawTranslation(info.lang) _,
captcha,
data = data,
context = context)
def fetch(from: Int) = Open { implicit ctx =>
JsonOk(env jsonFromVersion from)
}
def hideCalls = Open { implicit ctx =>
implicit val req = ctx.req
val cookie = LilaCookie.cookie(
env.hideCallsCookieName,
"1",
maxAge = env.hideCallsCookieMaxAge.some)
fuccess(Redirect(routes.Lobby.home()) withCookies cookie)
}
}
| JimmyMow/lila | app/controllers/I18n.scala | Scala | mit | 3,523 |
package net.ruippeixotog.scalafbp.component.ppl.distrib
import spray.json._
import thinkbayes.Pmf
import net.ruippeixotog.scalafbp.component._
import net.ruippeixotog.scalafbp.thinkbayes.Implicits._
class UniformSpec extends ComponentSpec with AutoTerminateSpec {
val component = Uniform
"A Uniform component" should {
"Emit a uniform distribution for each element set it receives" in new ComponentInstance {
Uniform.elemsPort.send(Set("a".toJson, "b".toJson, "c".toJson, "d".toJson))
Uniform.varPort must emit(
Pmf("a".toJson -> 0.25, "b".toJson -> 0.25, "c".toJson -> 0.25, "d".toJson -> 0.25).toPVar)
Uniform.elemsPort.send(Set(5.toJson))
Uniform.varPort must emit(Pmf(5.toJson -> 1.0).toPVar)
Uniform.elemsPort.send(Set.empty)
Uniform.varPort must emit(Pmf.empty[JsValue].toPVar)
}
terminateItselfWhenAllInPortsAreClosed
terminateItselfWhenAllOutPortsAreClosed
}
}
| ruippeixotog/scalafbp | components/ppl/src/test/scala/net/ruippeixotog/scalafbp/component/ppl/distrib/UniformSpec.scala | Scala | mit | 943 |
package net.cucumbersome.rpgRoller.warhammer.combat.domain
import cats.data._
import net.cucumbersome.rpgRoller.warhammer.combat.domain.initiative.Initiative
import net.cucumbersome.rpgRoller.warhammer.player.CombatActor
private[combat] case class Combat(combatActors: List[InCombatActor])
private[domain] object Combat{
def addActor(players: List[InCombatActor]): State[Combat, Unit] = State[Combat, Unit] {
case Combat(currentPlayers) => (Combat(currentPlayers ++ players), Unit)
}
def sortByInitiative(roll: () => Int): State[Combat, Unit] = State[Combat, Unit] {
case Combat(players) => (Combat(Initiative.generateInitiativeAndSort(roll)(players)), Unit)
}
def removeActors(ids: List[InCombatActor.Id]): State[Combat, List[InCombatActor]] = State[Combat, List[InCombatActor]] {
case Combat(players) =>
val newActors = players.filterNot(p => ids.contains(p.id))
(Combat(newActors), players.diff(newActors))
}
def updateHealth(actor: InCombatActor, newHealth: CombatActor.Health): State[Combat, List[InCombatActor]] = State[Combat, List[InCombatActor]] {
case Combat(players) =>
val newPlayers = players.map(ca => if (ca == actor) actor.copy(currentHealth = newHealth) else ca)
val playersToBeRemoved = newPlayers.filter(ca => ca.currentHealth.data <= 0)
(
Combat(newPlayers.filterNot(playersToBeRemoved.contains(_))),
playersToBeRemoved
)
}
def empty: Combat = Combat(List())
}
| CucumisSativus/rpgRollerBackend | src/main/scala/net/cucumbersome/rpgRoller/warhammer/combat/domain/Combat.scala | Scala | mit | 1,476 |
package com.xhachi.gae4s.datastore
import com.google.appengine.tools.development.testing.LocalDatastoreServiceTestConfig
import com.xhachi.gae4s.tests.AppEngineTestSuite
import org.scalatest.{FunSuite, Matchers}
class UserTest extends FunSuite with AppEngineTestSuite with Matchers {
override def getConfig = new LocalDatastoreServiceTestConfig :: super.getConfig
implicit val context = NoAncestorEntityStoreContext
implicit val meta = UserStore.meta
test("allocateしたKeyが取得できること") {
val key = UserStore.allocateKey
assert(key.nameOption.isEmpty)
assert(key.idOption.isDefined)
assert(key.idOption.get > 0)
}
test("allocateしたKeyが取得できidが異なること") {
val key1 = UserStore.allocateKey
val key2 = UserStore.allocateKey
assert(key1.idOption.get != key2.idOption.get)
}
test("IDを指定したKeyが取得できること") {
val key = UserStore.createKeyWithId(1)
assert(key.nameOption.isEmpty)
assert(key.idOption.isDefined)
assert(key.idOption.get == 1)
}
test("Nameを指定したKeyが取得できること") {
val key = UserStore.createKeyWithName("key_name")
assert(key.idOption.isEmpty)
assert(key.nameOption.isDefined)
assert(key.nameOption.get == "key_name")
}
test("getOptionできること") {
val key = UserStore.createKeyWithName("key_name")
val created = Datastore.getOption(key)
assert(created.isEmpty)
}
test("putできること") {
val s = new User(UserStore.createKeyWithName("key_name"), "Hoge")
Datastore.put(s)
}
test("countできること") {
val count = UserStore.query.count
assert(count == 0)
}
test("putしてcountが増えること") {
val count1 = UserStore.query.count
assert(count1 == 0)
val s = new User(UserStore.createKeyWithName("key_name"), "Hoge")
Datastore.put(s)
val count2 = UserStore.query.count
assert(count2 == 1)
}
test("putしてcountとasSeqの件数が等しいこと") {
val s = new User(UserStore.createKeyWithName("key_name"), "Hoge")
Datastore.put(s)
val count = UserStore.query.count
val seq = UserStore.query.asSeq
assert(count == seq.size)
}
test("putしてgetして等しいこと") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val expected = new User(key, "Hoge")
Datastore.put(expected)
val actual = Datastore.get(key)
assert(actual.key == expected.key)
assert(actual.name == expected.name)
assert(actual.mobilePhone == expected.mobilePhone)
assert(actual.name == expected.name)
assert(actual.height == expected.height)
assert(actual.deleted == expected.deleted)
assert(actual.createdAt != null)
}
test("2つputしてgetで一度に取得できること") {
val key1: Key[User] = UserStore.createKeyWithName("key_name1")
val expected1 = new User(key1, "Hoge1")
Datastore.put(expected1)
val key2: Key[User] = UserStore.createKeyWithName("key_name2")
val expected2 = new User(key2, "Hoge2")
Datastore.put(expected2)
val actual = Datastore.get(key1 :: key2 :: Nil)
assert(actual.size == 2)
assert(actual(key1).name == expected1.name)
assert(actual(key1).height == expected1.height)
assert(actual(key1).deleted == expected1.deleted)
assert(actual(key1).createdAt != null)
assert(actual(key2).name == expected2.name)
assert(actual(key2).height == expected2.height)
assert(actual(key2).deleted == expected2.deleted)
assert(actual(key2).createdAt != null)
}
def createTaroJiroSaburo() = {
val tato = new User(UserStore.createKeyWithName("key_name_1"))
tato.name = "Taro"
tato.height = 190
tato.weight = 90
tato.mobilePhone = Some("090-xxxx-xxxx")
val jiro = new User(UserStore.createKeyWithName("key_name_2"))
jiro.name = "Jiro"
jiro.height = 200
jiro.weight = 90
jiro.deleted = true
val saburo = new User(UserStore.createKeyWithName("key_name_3"))
saburo.name = "Saburo"
saburo.height = 150
saburo.weight = 120
saburo.mobilePhone = Some("080-yyyy-yyyy")
saburo.deleted = true
UserStore.create(tato)
UserStore.create(jiro)
UserStore.create(saburo)
assert(UserStore.query.count == 3)
}
test("putしてcountとasSeqとasKeySeqの件数がすべて1であること") {
val s = new User(UserStore.createKeyWithName("key_name"), "Hoge")
UserStore.create(s)
val count = UserStore.query.count
val seq = UserStore.query.asSeq
val keySeq = UserStore.query.asKeySeq
assert(count == 1)
assert(seq.size == 1)
assert(keySeq.size == 1)
}
test("createしてgetしてversionが1、updateして2であること") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val u1 = new User(key, "Hoge")
assert(u1.version == 0L)
UserStore.create(u1)
val u2 = UserStore.get(key)
assert(u2.version == 1L)
UserStore.update(u2)
val u3 = UserStore.get(key)
assert(u3.version == 2L)
}
test("createしてgetして2回UPDATEしてVersionチェックエラーになること") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val u1 = new User(key, "Hoge")
assert(u1.version == 0L)
UserStore.create(u1)
val u21 = UserStore.get(key)
val u22 = UserStore.get(key)
assert(u21.version == 1L)
UserStore.update(u21)
val u31 = UserStore.get(key)
assert(u31.version == 2L)
intercept[Exception] {
UserStore.update(u22)
}
}
test("createしてgetして2回SeqでUPDATEしてVersionチェックエラーになること") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val u1 = new User(key, "Hoge")
assert(u1.version == 0L)
UserStore.create(u1)
val u21 = UserStore.get(key)
val u22 = UserStore.get(key)
assert(u21.version == 1L)
UserStore.update(Seq(u21))
val u31 = UserStore.get(key)
assert(u31.version == 2L)
intercept[Exception] {
UserStore.update(Seq(u21))
}
}
test("createしてgetしてcreatedAtと設定され、updateしてcreatedAtが変更されないこと") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val u1 = new User(key, "Hoge")
u1.createdAt should be(null)
UserStore.create(u1)
val u2 = UserStore.get(key)
assert(u2.createdAt != null)
UserStore.update(u2)
val u3 = UserStore.get(key)
assert(u3.createdAt != null)
assert(u3.createdAt == u2.createdAt)
}
test("createしてgetしてupdatedAtと設定され、updateしてupdatedAtが変更されること") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val u1 = new User(key, "Hoge")
u1.updatedAt should be(null)
UserStore.create(u1)
val u2 = UserStore.get(key)
assert(u2.updatedAt != null)
Thread.sleep(1)
UserStore.update(u2)
val u3 = UserStore.get(key)
assert(u3.updatedAt != null)
assert(u3.updatedAt != u2.updatedAt)
}
test("queryを試す") {
val key: Key[User] = UserStore.createKeyWithName("key_name")
val expected = new User(key, "Hoge")
UserStore.create(expected)
assert(UserStore.query.count == 1)
val seq = UserStore.query.asSeq
assert(seq.size == 1)
assert(seq.head.key == expected.key)
val single = UserStore.query.asSingle
assert(single.key == expected.key)
}
test("filterを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val query = Query[User].filter(_.name == "Taro")
assert(UserStore.asSeq(query).size == 1)
assert(SeqStore(all).asSeq(query).size == 1)
}
test("filterByMetaを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter = Query[User].filterByMeta(_.property("name").get.asInstanceOf[IndexedProperty[String]].isEqual("Taro"))
assert(UserStore.asSeq(filter).size == 1)
assert(SeqStore(all).asSeq(filter).size == 1)
}
test("filterでasSingleを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter = Query[User].filter(m => m.name == "Jiro")
assert(UserStore.asSingle(filter).name == "Jiro")
assert(SeqStore(all).asSingle(filter).name == "Jiro")
}
test("asSingleでヒットしない場合") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter = Query[User].filter(m => m.name == "hogehoge")
intercept[IllegalArgumentException] {
SeqStore(all).asSingle(filter)
}
intercept[IllegalArgumentException] {
UserStore.asSingle(filter)
}
}
test("asSingleOptionで見つかった場合") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter = Query[User].filter(m => m.name == "Jiro")
{
val single = UserStore.asSingleOption(filter)
assert(single.isDefined)
assert(single.get.name == "Jiro")
}
{
val single = SeqStore(all).asSingleOption(filter)
assert(single.isDefined)
assert(single.get.name == "Jiro")
}
}
test("asSingleOptionで見つからない場合") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter = Query[User].filter(m => m.name == "hogehoge")
assert(UserStore.asSingleOption(filter).isEmpty)
assert(SeqStore(all).asSingleOption(filter).isEmpty)
}
test("filterでandを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter1 = Query[User].filter(m => (m.name == "Jiro") && (m.deleted == false))
assert(UserStore.asSeq(filter1).size == 0)
assert(SeqStore(all).asSeq(filter1).size == 0)
val filter2 = Query[User].filter(m => (m.name == "Jiro") && (m.deleted == true))
assert(UserStore.asSeq(filter2).size == 1)
assert(SeqStore(all).asSeq(filter2).size == 1)
val filter3 = Query[User].filter(m => (m.name == "Jiro") && (m.deleted == true))
assert(UserStore.asSeq(filter2).head.key == UserStore.asSingle(filter3).key)
assert(SeqStore(all).asSeq(filter2).head.key == SeqStore(all).asSeq(filter3).head.key)
}
test("filterでorを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter1 = Query[User].filter(m => (m.name == "Jiro") || (m.name == "Taro"))
assert(UserStore.asSeq(filter1).size == 2)
assert(SeqStore(all).asSeq(filter1).size == 2)
val filter2 = Query[User].filter(m => (m.name == "Jiro") || (m.name == "Goro"))
assert(UserStore.asSeq(filter2).size == 1)
assert(SeqStore(all).asSeq(filter2).size == 1)
}
ignore("filterでinを試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
// val filter1 = Query[User].filter(_.name in("Taro", "Jiro", "Saburo"))
// assert(filter1.asSeq.size == 3)
// assert(filter1.asSeq(all).size == 3)
//
// val filter2 = Query[User].filter(_.name in("Jiro", "Taro"))
// assert(filter2.asSeq.size == 2)
// assert(filter2.asSeq(all).size == 2)
//
// val filter3 = Query[User].filter(_.name in("Jiro", "Goro"))
// assert(filter3.asSeq.size == 1)
// assert(filter3.asSeq(all).size == 1)
}
test("filterで大小比較を試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter1 = Query[User].filter(_.height < 190)
assert(UserStore.asSeq(filter1).size == 1)
assert(SeqStore(all).asSeq(filter1).size == 1)
val filter2 = Query[User].filter(_.height <= 190)
assert(UserStore.asSeq(filter2).size == 2)
assert(SeqStore(all).asSeq(filter2).size == 2)
val filter3 = Query[User].filter(_.height > 190)
assert(UserStore.asSeq(filter3).size == 1)
assert(SeqStore(all).asSeq(filter3).size == 1)
val filter4 = Query[User].filter(_.height >= 190)
assert(UserStore.asSeq(filter4).size == 2)
assert(SeqStore(all).asSeq(filter4).size == 2)
}
test("filterで大小比較を試す(from-to)") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter1 = Query[User].filter(m => m.height > 140 && m.height <= 190)
assert(UserStore.asSeq(filter1).size == 2)
assert(SeqStore(all).asSeq(filter1).size == 2)
val filter2 = Query[User].filter(m => m.height > 190 && m.height <= 220)
assert(UserStore.asSeq(filter2).size == 1)
assert(SeqStore(all).asSeq(filter2).size == 1)
//
// val filter3 = Query[User].filter(_.height > 190)
// assert(UserStore.asSeq(filter3).size == 1)
// assert(SeqStore(all).asSeq(filter3).size == 1)
//
// val filter4 = Query[User].filter(_.height >= 190)
// assert(UserStore.asSeq(filter4).size == 2)
// assert(SeqStore(all).asSeq(filter4).size == 2)
}
test("filterでnullでの検索を試す") {
createTaroJiroSaburo()
val all = UserStore.query.asSeq
val filter1 = Query[User].filter(_.mobilePhone == None)
assert(UserStore.asSeq(filter1).size == 1)
//FIXME: 今はまだOptionはフィルタできないです
// assert(filter1.asSeq(all).size == 1)
val filter2 = Query[User].filter(_.mobilePhone != None)
assert(UserStore.asSeq(filter2).size == 2)
//FIXME: 今はまだOptionはフィルタできないです
// assert(filter2.asSeq(all).size == 2)
val filter3 = Query[User].filter(_.mobilePhone == null)
assert(UserStore.asSeq(filter3).size == 1)
//FIXME: 今はまだOptionはフィルタできないです
// assert(filter3.asSeq(all).size == 1)
val filter4 = Query[User].filter(_.mobilePhone != null)
assert(UserStore.asSeq(filter4).size == 2)
//FIXME: 今はまだOptionはフィルタできないです
// assert(filter4.asSeq(all).size == 2)
}
//
// test("sortを試す") {
// createTaroJiroSaburo()
// val all = UserStore.query.asSeq
//
//
// val sort1 = UserStore.query.sort(_.height)
// val seq11 = sort1.asSeq
// assert(seq11.size == 3)
// assert(seq11(0).name == "Saburo")
// assert(seq11(1).name == "Taro")
// assert(seq11(2).name == "Jiro")
// val seq12 = sort1.asSeq(all)
// assert(seq12.size == 3)
// assert(seq12(0).name == "Saburo")
// assert(seq12(1).name == "Taro")
// assert(seq12(2).name == "Jiro")
//
// val seq2 = UserStore.query.sort(_.height.desc, _.weight.desc).asSeq
// assert(seq2.size == 3)
// assert(seq2(0).name == "Jiro")
// assert(seq2(1).name == "Taro")
// assert(seq2(2).name == "Saburo")
//
// val seq3 = UserStore.query.sort(_.weight.asc, _.height.desc).asSeq
// assert(seq3.size == 3)
// assert(seq3(0).name == "Jiro")
// assert(seq3(1).name == "Taro")
// assert(seq3(2).name == "Saburo")
//
// }
//
// test("offsetを試す") {
// createTaroJiroSaburo()
//
// //基本は三郎、太郎、次郎の順。
// val all = UserStore.query.sort(_.height.asc).asSeq
// assert(all.size == 3)
// assert(all(0).name == "Saburo")
// assert(all(1).name == "Taro")
// assert(all(2).name == "Jiro")
//
//
// val sort1 = UserStore.query.sort(_.height.asc).offset(1)
// val seq11 = sort1.asSeq
// assert(seq11.size == 2)
// assert(seq11(0).name == "Taro")
// assert(seq11(1).name == "Jiro")
// }
//
// test("limitを試す") {
// createTaroJiroSaburo()
// val all = UserStore.query.asSeq
//
// val sort1 = UserStore.query.sort(_.height.asc).limit(2)
// val seq11 = sort1.asSeq
// assert(seq11.size == 2)
// assert(seq11(0).name == "Saburo")
// assert(seq11(1).name == "Taro")
// }
//
// test("offsetとlimitを試す") {
// createTaroJiroSaburo()
// val all = UserStore.query.asSeq
//
// val sort1 = UserStore.query.sort(_.height.asc).offset(1).limit(1)
// val seq11 = sort1.asSeq
// assert(seq11.size == 1)
// assert(seq11(0).name == "Taro")
// }
}
class UserStore
extends EntityStore[User]
with NamedStore
with IdentifiableKeyStore
with AllocatableKeyStore
with CreatableStore
with UpdatableStore
with QueryableStore {
val meta = EntityMeta.createMeta[User]
}
object UserStore extends UserStore
| thachi/gae4s | store-extension/src/test/scala/com/xhachi/gae4s/datastore/UserTest.scala | Scala | apache-2.0 | 16,156 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools
import org.scalatest._
import scala.collection.mutable
import java.io.File
import java.util.regex.Pattern
import SuiteDiscoveryHelper.discoverTests
class SuiteDiscoveryHelperFriend(sdt: SuiteDiscoveryHelper.type) {
def transformToClassName(fileName: String, fileSeparator: Char): Option[String] = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("org$scalatest$tools$SuiteDiscoveryHelper$$transformToClassName",
Array(classOf[String], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileName, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Option[String]]
}
def extractClassNames(fileNames: Iterator[String], fileSeparator: Char): Iterator[String] = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("extractClassNames",
Array(classOf[Iterator[String]], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileNames, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Iterator[String]]
}
def isAccessibleSuite(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isAccessibleSuite",
Array(classOf[Class[_]]): _*) // This one works in 2.7
// Array(classOf[Class])) // This one works in 2.6
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
def isRunnable(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isRunnable",
Array(classOf[Class[_]]): _*) // This one works in 2.7
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
def processFileNames(fileNames: Iterator[String], fileSeparator: Char, loader: ClassLoader, suffixes: Option[Pattern]):
Set[String] =
{
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("org$scalatest$tools$SuiteDiscoveryHelper$$processFileNames",
Array(classOf[Iterator[String]], classOf[Char], classOf[ClassLoader], classOf[Option[Pattern]]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](fileNames, new java.lang.Character(fileSeparator), loader, suffixes): _*).asInstanceOf[Set[String]]
}
def getFileNamesSetFromFile(file: File, fileSeparator: Char): Set[String] = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("org$scalatest$tools$SuiteDiscoveryHelper$$getFileNamesSetFromFile",
Array(classOf[File], classOf[Char]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](file, new java.lang.Character(fileSeparator)): _*).asInstanceOf[Set[String]]
}
def isDiscoverableSuite(clazz: java.lang.Class[_]): Boolean = {
val m = Class.forName("org.scalatest.tools.SuiteDiscoveryHelper$").getDeclaredMethod("isDiscoverableSuite",
Array(classOf[Class[_]]): _*)
m.setAccessible(true)
m.invoke(sdt, Array[Object](clazz): _*).asInstanceOf[Boolean]
}
}
class SuiteDiscoveryHelperSpec extends FunSpec {
val sdtf = new SuiteDiscoveryHelperFriend(SuiteDiscoveryHelper)
val loader = getClass.getClassLoader
val accessibleSuites =
Set(
"org.scalatest.tools.RunnerSpec",
"org.scalatest.tools.SuiteDiscoveryHelperSpec",
"org.scalatest.tools.SuiteDiscoveryHelperSpec2")
//
// Given this Suite's name and one of its test names,
// discoverTests should return a SuiteParam object for this
// Suite and the specified test.
//
def `test discover tests 1` = {
val testSpecs = List(TestSpec("test discover tests 1", false))
val suiteParams = discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 1)
assert(suiteParam.testNames(0) === "test discover tests 1")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Given two test names, where only one is found, discoverTests should
// return a SuiteParam with just the one test name.
//
def `test discover tests 2` = {
val testSpecs =
List(
TestSpec("test discover tests 2", false),
TestSpec("test discover tests X", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 1)
assert(suiteParam.testNames(0) === "test discover tests 2")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Given two test names, where both are found, discoverTests should
// return a SuiteParam with both test names.
//
def `test discover tests 3` = {
val testSpecs =
List(
TestSpec("test discover tests 2", false),
TestSpec("test discover tests 1", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 1)
val suiteParam = suiteParams(0)
assert(suiteParam.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam.testNames.length === 2)
assert(suiteParam.testNames(0) === "test discover tests 1")
assert(suiteParam.testNames(1) === "test discover tests 2")
assert(suiteParam.wildcardTestNames.length === 0)
assert(suiteParam.nestedSuites.length === 0)
}
//
// Two test names, where both are in one Suite and one is in
// two Suites.
//
def `test discover tests 4` = {
val testSpecs =
List(
TestSpec("test discover tests 4", false),
TestSpec("test discover tests 1", false)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 2)
val suiteParam0 = suiteParams(0)
assert(suiteParam0.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam0.testNames.length === 2)
assert(suiteParam0.testNames(0) === "test discover tests 1")
assert(suiteParam0.testNames(1) === "test discover tests 4")
assert(suiteParam0.wildcardTestNames.length === 0)
assert(suiteParam0.nestedSuites.length === 0)
val suiteParam1 = suiteParams(1)
assert(suiteParam1.className === "org.scalatest.tools.SuiteDiscoveryHelperSpec2")
assert(suiteParam1.testNames.length === 1)
assert(suiteParam1.testNames(0) === "test discover tests 4")
assert(suiteParam1.wildcardTestNames.length === 0)
assert(suiteParam1.nestedSuites.length === 0)
}
//
// Discover tests using a substring. This should discover tests in
// two Suites.
//
def `test discover tests A1` = {
val testSpecs =
List(
TestSpec("test discover tests A", true)
)
val suiteParams =
discoverTests(testSpecs, accessibleSuites, loader)
assert(suiteParams.length === 2)
val suiteParam0 = suiteParams(0)
assert(suiteParam0.className ===
"org.scalatest.tools.SuiteDiscoveryHelperSpec")
assert(suiteParam0.testNames.length === 0)
assert(suiteParam0.wildcardTestNames.length === 1)
assert(suiteParam0.wildcardTestNames(0) === "test discover tests A")
assert(suiteParam0.nestedSuites.length === 0)
val suiteParam1 = suiteParams(1)
assert(suiteParam1.className ===
"org.scalatest.tools.SuiteDiscoveryHelperSpec2")
assert(suiteParam1.testNames.length === 0)
assert(suiteParam1.wildcardTestNames.length === 1)
assert(suiteParam1.wildcardTestNames(0) === "test discover tests A")
assert(suiteParam1.nestedSuites.length === 0)
}
def `test transform to class name` = {
assert(sdtf.transformToClassName("bob.class", '/') === Some("bob"))
assert(sdtf.transformToClassName("a.b.c.bob.class", '/') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a.b.c.bob", '/') === None)
assert(sdtf.transformToClassName("", '/') === None)
assert(sdtf.transformToClassName("notdotclass", '/') === None)
assert(sdtf.transformToClassName(".class", '/') === None)
assert(sdtf.transformToClassName("a/b/c/bob.class", '/') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a/b/c/bob", '/') === None)
assert(sdtf.transformToClassName("/.class", '/') === None)
assert(sdtf.transformToClassName("..class", '/') === Some("."))
assert(sdtf.transformToClassName("a\\\\b\\\\c\\\\bob.class", '\\\\') === Some("a.b.c.bob"))
assert(sdtf.transformToClassName("a\\\\b\\\\c\\\\bob", '\\\\') === None)
assert(sdtf.transformToClassName("\\\\.class", '\\\\') === None)
}
def `test is accessible suite` = {
assert(sdtf.isAccessibleSuite(classOf[SuiteDiscoveryHelperSpec]))
assert(!sdtf.isAccessibleSuite(classOf[PackageAccessSuite]))
assert(!sdtf.isAccessibleSuite(classOf[PackageAccessConstructorSuite]))
assert(!sdtf.isAccessibleSuite(classOf[Suite]))
assert(!sdtf.isAccessibleSuite(classOf[Object]))
}
def `test extract class names` = {
assert(sdtf.extractClassNames(List("bob.class").iterator, '/').toList === List("bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "a/b/c/bob.class").iterator, '/').toList === List("bob", "a.b.c.bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "a\\\\b\\\\c\\\\bob.class").iterator, '\\\\').toList === List("bob", "a.b.c.bob"))
assert(sdtf.extractClassNames(List("bob.class", "manifest.txt", "/a/b/c/bob.class").iterator, '/').toList === List("bob", "a.b.c.bob"))
}
def `test process file names` = {
val loader = getClass.getClassLoader
val discoveredSet1 = sdtf.processFileNames(List("doesNotExist.txt", "noSuchfile.class").iterator, '/', loader, None)
assert(discoveredSet1.isEmpty)
val discoveredSet2 = sdtf.processFileNames(List("org/scalatest/EasySuite.class", "noSuchfile.class", "org/scalatest/FastAsLight.class").iterator, '/', loader, None)
assert(discoveredSet2 === Set("org.scalatest.EasySuite"))
val fileNames3 =
List(
"org/scalatest/EasySuite.class",
"org/scalatest/RunnerSuite.class",
"org/scalatest/SlowAsMolasses.class",
"org/scalatest/SuiteSuite.class",
"noSuchfile.class",
"org/scalatest/FastAsLight.class"
)
val classNames3 =
Set(
"org.scalatest.EasySuite",
// "org.scalatest.RunnerSuite", dropped this when moved RunnerSuite to tools
"org.scalatest.SuiteSuite"
)
val discoveredSet3 = sdtf.processFileNames(fileNames3.iterator, '/', loader, None)
assert(discoveredSet3 === classNames3)
// Test with backslashes
val fileNames4 =
List(
"org\\\\scalatest\\\\EasySuite.class",
"org\\\\scalatest\\\\RunnerSuite.class",
"org\\\\scalatest\\\\SlowAsMolasses.class",
"org\\\\scalatest\\\\SuiteSuite.class",
"noSuchfile.class",
"org\\\\scalatest\\\\FastAsLight.class"
)
val discoveredSet4 = sdtf.processFileNames(fileNames4.iterator, '\\\\', loader, None)
assert(discoveredSet4 === classNames3)
// Test with leading slashes
val fileNames5 =
List(
"/org/scalatest/EasySuite.class",
"/org/scalatest/RunnerSuite.class",
"/org/scalatest/SlowAsMolasses.class",
"/org/scalatest/SuiteSuite.class",
"/noSuchfile.class",
"/org/scalatest/FastAsLight.class"
)
val discoveredSet5 = sdtf.processFileNames(fileNames5.iterator, '/', loader, None)
assert(discoveredSet5 === classNames3)
// Test for specified suffixes only
val fileNames6 =
List(
"/org/scalatest/EasySuite.class",
"/org/scalatest/RunnerSuite.class",
"/org/scalatest/SlowAsMolasses.class",
"/org/scalatest/SuiteSuite.class",
"/org/scalatest/FilterSpec.class",
"/noSuchfile.class",
"/org/scalatest/FastAsLight.class"
)
val classNames4 =
Set(
"org.scalatest.EasySuite",
"org.scalatest.SuiteSuite",
"org.scalatest.FilterSpec"
)
val discoveredSet6 = sdtf.processFileNames(fileNames6.iterator, '/', loader, Some(Pattern.compile(".*(Suite)$")))
assert(discoveredSet6 === classNames3)
val discoveredSet7 = sdtf.processFileNames(fileNames6.iterator, '/', loader, Some(Pattern.compile(".*(Spec|Suite)$")))
assert(discoveredSet7 === classNames4)
}
def `test get file names set from file` = {
assert(sdtf.getFileNamesSetFromFile(new File("harness/fnIteratorTest/empty.txt"), '/') === Set("empty.txt"))
/*
This one doesn't work now that I've checked the harness into subversion, because it finds the svn files.
So I need to first copy just the files I want somewhere, then run this.
assert(sdtf.getFileNamesSetFromFile(new File("harness/fnIteratorTest"), '/') === Set("subDir2/inSubDir2.class",
"subDir2/subSubDir/inSubSubDir.class", "empty.txt", "empty.class", "subDir1/inSubDir1.class"))
*/
}
def `test is discoverable suite` = {
assert(sdtf.isDiscoverableSuite(classOf[SuiteDiscoveryHelperSpec]))
@DoNotDiscover class NotDiscoverable {}
assert(!sdtf.isDiscoverableSuite(classOf[NotDiscoverable]))
}
def `test is runnable` = {
class NormalClass {}
class SuiteClass extends Suite
@WrapWith(classOf[SuiteClass])
class AnnotateDefaultConstructor
class WrongSuiteClass(testValue: String) extends Suite
@WrapWith(classOf[WrongSuiteClass])
class AnnotateWrongConstructor
assert(!sdtf.isRunnable(classOf[NormalClass]))
assert(!sdtf.isRunnable(classOf[SuiteClass]))
assert(!sdtf.isRunnable(classOf[AnnotateDefaultConstructor]))
assert(!sdtf.isRunnable(classOf[AnnotateWrongConstructor]))
assert(sdtf.isRunnable(classOf[SomeApiClass]))
assert(sdtf.isRunnable(classOf[SomeApiSubClass]))
}
}
//
// This class is just used by tests in SuiteDiscoveryHelperSpec
// for testing Suite discovery by test name.
//
class SuiteDiscoveryHelperSpec2 extends FunSpec {
def `test discover tests 4`: Unit = {
}
def `test discover tests A2`: Unit = {
}
def `test discover tests A3`: Unit = {
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/tools/SuiteDiscoveryHelperSuite.scala | Scala | apache-2.0 | 15,014 |
import sbt._
import Keys._
import com.jsuereth.sbtpgp.SbtPgp
import com.typesafe.sbt.packager.universal.{ UniversalPlugin, UniversalDeployPlugin }
import com.typesafe.sbt.packager.debian.{ DebianPlugin, DebianDeployPlugin }
import com.typesafe.sbt.packager.rpm.{ RpmPlugin, RpmDeployPlugin }
import com.jsuereth.sbtpgp.gpgExtension
object PackageSignerPlugin extends sbt.AutoPlugin {
override def trigger = allRequirements
override def requires = SbtPgp && UniversalDeployPlugin && DebianDeployPlugin && RpmDeployPlugin
import com.jsuereth.sbtpgp.PgpKeys._
import UniversalPlugin.autoImport._
import DebianPlugin.autoImport._
import RpmPlugin.autoImport._
override def projectSettings: Seq[Setting[_]] =
inConfig(Universal)(packageSignerSettings) ++
inConfig(Debian)(packageSignerSettings) ++
inConfig(Rpm)(packageSignerSettings)
def subExtension(art: Artifact, ext: String): Artifact =
art.copy(extension = ext)
def packageSignerSettings: Seq[Setting[_]] = Seq(
signedArtifacts := {
val artifacts = packagedArtifacts.value
val r = pgpSigner.value
val skipZ = (skip in pgpSigner).value
val s = streams.value
if (!skipZ) {
artifacts flatMap { case (art, f) =>
Seq(art -> f,
subExtension(art, art.extension + gpgExtension) ->
r.sign(f, file(f.getAbsolutePath + gpgExtension), s))
}
}
else artifacts
},
publishSignedConfiguration := Classpaths.publishConfig(
signedArtifacts.value,
None,
resolverName = Classpaths.getPublishTo(publishTo.value).name,
checksums = (checksums in publish).value,
logging = ivyLoggingLevel.value,
overwrite = isSnapshot.value),
publishLocalSignedConfiguration := Classpaths.publishConfig(
signedArtifacts.value,
None,
resolverName = "local",
checksums = (checksums in publish).value,
logging = ivyLoggingLevel.value,
overwrite = isSnapshot.value),
publishSigned := Classpaths.publishTask(publishSignedConfiguration, deliver).value,
publishLocalSigned := Classpaths.publishTask(publishLocalSignedConfiguration, deliver).value
)
}
| xuwei-k/xsbt | launcher-package/project/PackageSignerPlugin.scala | Scala | apache-2.0 | 2,216 |
package org.etl.process.onethread
class ZipAction {
} | jpvelsamy/sparrow | sparrow-server/src/main/scala/org/etl/process/onethread/ZipAction.scala | Scala | apache-2.0 | 57 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert.network.common
import org.specs.SpecificationWithJUnit
import java.lang.NullPointerException
import org.specs.mock.Mockito
import com.linkedin.norbert.network.InvalidMessageException
import com.google.protobuf.Message
import com.linkedin.norbert.protos.NorbertExampleProtos
class MessageRegistrySpec extends SpecificationWithJUnit with Mockito {
val messageRegistry = new MessageRegistry
val proto = NorbertExampleProtos.Ping.newBuilder.setTimestamp(System.currentTimeMillis).build
"MessageRegistry" should {
"throw a NullPointerException if requestMessage is null" in {
messageRegistry.registerMessage(null, null) must throwA[NullPointerException]
}
"throw an InvalidMessageExceptio if the requestMessage isn't registered" in {
messageRegistry.hasResponse(proto) must throwA[InvalidMessageException]
messageRegistry.responseMessageDefaultInstanceFor(proto) must throwA[InvalidMessageException]
}
"contains returns true if the specified request message has been registered" in {
val response = mock[Message]
messageRegistry.contains(proto) must beFalse
messageRegistry.registerMessage(proto, proto)
messageRegistry.contains(proto) must beTrue
}
"return true for hasResponse if the responseMessage is not null" in {
messageRegistry.registerMessage(proto, null)
messageRegistry.hasResponse(proto) must beFalse
messageRegistry.registerMessage(proto, proto)
messageRegistry.hasResponse(proto) must beTrue
}
"return true if the response message is of the correct type" in {
val name = "norbert.PingResponse"
messageRegistry.registerMessage(proto, null)
messageRegistry.validResponseFor(proto, name) must beFalse
messageRegistry.registerMessage(proto, proto)
messageRegistry.validResponseFor(proto, name) must beFalse
messageRegistry.validResponseFor(proto, proto.getDescriptorForType.getFullName) must beTrue
}
}
}
| rhavyn/norbert | network/src/test/scala/com/linkedin/norbert/network/common/MessageRegistrySpec.scala | Scala | apache-2.0 | 2,599 |
package com.twitter.app
import com.twitter.util._
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.collection.immutable.TreeSet
import java.net.InetSocketAddress
/**
* A typeclass providing evidence for parsing type `T`
* as a flag.
*/
trait Flaggable[T] {
def parse(s: String): T
def show(t: T): String = t.toString
def default: Option[T] = None
}
/**
* Default `Flaggable` implementations.
*/
object Flaggable {
def mandatory[T](f: String => T) = new Flaggable[T] {
def parse(s: String) = f(s)
}
implicit val ofBoolean = new Flaggable[Boolean] {
override def default = Some(true)
def parse(s: String) = s.toBoolean
}
implicit val ofString = mandatory(identity)
implicit val ofInt = mandatory(_.toInt)
implicit val ofLong = mandatory(_.toLong)
implicit val ofFloat = mandatory(_.toFloat)
implicit val ofDouble = mandatory(_.toDouble)
implicit val ofDuration = mandatory(Duration.parse(_))
implicit val ofStorageUnit = mandatory(StorageUnit.parse(_))
private val defaultTimeFormat = new TimeFormat("yyyy-MM-dd HH:mm:ss Z")
implicit val ofTime = mandatory(defaultTimeFormat.parse(_))
implicit object ofInetSocketAddress extends Flaggable[InetSocketAddress] {
def parse(v: String) = v.split(":") match {
case Array("", p) =>
new InetSocketAddress(p.toInt)
case Array(h, p) =>
new InetSocketAddress(h, p.toInt)
case _ =>
throw new IllegalArgumentException
}
override def show(addr: InetSocketAddress) =
"%s:%d".format(
Option(addr.getAddress) match {
case Some(a) if a.isAnyLocalAddress => ""
case _ => addr.getHostName
},
addr.getPort)
}
implicit def ofTuple[T: Flaggable, U: Flaggable] = new Flaggable[(T, U)] {
private val tflag = implicitly[Flaggable[T]]
private val uflag = implicitly[Flaggable[U]]
assert(!tflag.default.isDefined)
assert(!uflag.default.isDefined)
def parse(v: String) = v.split(",") match {
case Array(t, u) => (tflag.parse(t), uflag.parse(u))
case _ => throw new IllegalArgumentException("not a 't,u'")
}
override def show(tup: (T, U)) = {
val (t, u) = tup
tflag.show(t)+","+uflag.show(u)
}
}
implicit def ofSeq[T: Flaggable] = new Flaggable[Seq[T]] {
private val flag = implicitly[Flaggable[T]]
assert(!flag.default.isDefined)
def parse(v: String): Seq[T] = v.split(",") map flag.parse
override def show(seq: Seq[T]) = seq map flag.show mkString ","
}
implicit def ofMap[K: Flaggable, V: Flaggable] = new Flaggable[Map[K, V]] {
private val kflag = implicitly[Flaggable[K]]
private val vflag = implicitly[Flaggable[V]]
assert(!kflag.default.isDefined)
assert(!vflag.default.isDefined)
def parse(in: String): Map[K, V] = {
val tuples = in.split(',').foldLeft(Seq.empty[String]) {
case (acc, s) if !s.contains('=') =>
// In order to support comma-separated values, we concatenate
// consecutive tokens that don't contain equals signs.
acc.init :+ (acc.last + ',' + s)
case (acc, s) => acc :+ s
}
tuples map { tup =>
tup.split("=") match {
case Array(k, v) => (kflag.parse(k), vflag.parse(v))
case _ => throw new IllegalArgumentException("not a 'k=v'")
}
} toMap
}
override def show(out: Map[K, V]) = {
out.toSeq map { case (k, v) => k.toString + "=" + v.toString } mkString(",")
}
}
}
case class FlagParseException(which: String, cause: Throwable)
extends Exception(cause)
case class FlagUsageError(usage: String) extends Exception
class FlagValueRequiredException extends Exception("flag value is required")
class FlagUndefinedException extends Exception("flag undefined")
/**
* A single flag, instantiated by a [[com.twitter.app.Flags]] instance.
* Its current value is extracted with `apply()`.
*
* @see [[com.twitter.app.Flags]]
*/
class Flag[T: Flaggable] private[app](val name: String, val help: String, defaultOrUsage: Either[() => T, String]) {
private[app] def this(name: String, help: String, default: => T) = this(name, help, Left(() => default))
private[app] def this(name: String, help: String, usage: String) = this(name, help, Right(usage))
protected val flaggable = implicitly[Flaggable[T]]
@volatile private[this] var value: Option[T] = None
protected def getValue: Option[T] = value
private def default: Option[T] = defaultOrUsage.left.toOption map { d => d() }
private def valueOrDefault: Option[T] = getValue orElse default
/**
* Return this flag's current value. The default value is returned
* when the flag has not otherwise been set.
*/
def apply(): T = valueOrDefault getOrElse { throw new IllegalArgumentException }
/** Reset this flag's value */
def reset() { value = None }
/** True if the flag has been set */
def isDefined = getValue.isDefined
/** Get the value if it has been set */
def get: Option[T] = getValue
/** String representation of this flag's default value */
def defaultString = flaggable.show(default getOrElse { throw new IllegalArgumentException })
def usageString =
defaultOrUsage match {
case Left(_) => " -%s='%s': %s".format(name, defaultString, help)
case Right(usage) => " -%s=<%s>: %s".format(name, usage, help)
}
/**
* String representation of this flag in -foo='bar' format,
* suitable for being used on the command line.
*/
override def toString = {
valueOrDefault match {
case None => "-" + name + "=<unset>"
case Some(v) => "-" + name + "='" + flaggable.show(v).replaceAll("'", "'\\"'\\"'") + "'"
}
}
/** Parse value `raw` into this flag. */
def parse(raw: String) {
value = Some(flaggable.parse(raw))
}
/** Parse this flag with no argument. */
def parse() {
value = flaggable.default
}
def noArgumentOk = flaggable.default.isDefined
}
/**
* A simple flags implementation. We support only two formats:
*
* for flags with optional values (booleans):
* -flag, -flag=value
* for flags with required values:
* -flag[= ]value
*
* That's it. These can be parsed without ambiguity.
*
* There is no support for mandatory arguments: That is not what
* flags are for.
*
* Flags' `apply` adds a new flag to to the flag set, so it is idiomatic
* to assign instances of `Flags` to a singular `flag`:
*
* {{{
* val flag = new Flags("myapp")
* val i = flag("i", 123, "iteration count")
* }}}
*
* Global flags, detached from a particular `Flags` instance, but
* accessible to all, are defined by [[com.twitter.app.GlobalFlag]].
*/
class Flags(argv0: String, includeGlobal: Boolean) {
def this(argv0: String) = this(argv0, false)
private[this] val flags = new HashMap[String, Flag[_]]
@volatile private[this] var cmdUsage = ""
// Add a help flag by default
private[this] val helpFlag = this("help", false, "Show this help")
def add(f: Flag[_]) = synchronized {
if (flags contains f.name)
System.err.printf("Flag %s already defined!\\n", f.name)
flags(f.name) = f
}
def reset() = synchronized {
flags foreach { case (_, f) => f.reset() }
}
private[this] def resolveGlobalFlag(f: String) =
if (includeGlobal) GlobalFlag.get(f) else None
private[this] def resolveFlag(f: String): Option[Flag[_]] =
synchronized { flags.get(f) orElse resolveGlobalFlag(f) }
private[this] def hasFlag(f: String) = resolveFlag(f).isDefined
private[this] def flag(f: String) = resolveFlag(f).get
def parse(
args: Array[String],
undefOk: Boolean = false
): Seq[String] = synchronized {
reset()
val remaining = new ArrayBuffer[String]
var i = 0
while (i < args.size) {
val a = args(i)
i += 1
if (a == "--") {
remaining ++= args.slice(i, args.size)
i = args.size
} else if (a startsWith "-") {
a drop 1 split("=", 2) match {
// There seems to be a bug Scala's pattern matching
// optimizer that leaves `v' dangling in the last case if
// we make this a wildcard (Array(k, _@_*))
case Array(k) if !hasFlag(k) =>
if (undefOk)
remaining += a
else
throw FlagParseException(k, new FlagUndefinedException)
// Flag isn't defined
case Array(k, _) if !hasFlag(k) =>
if (undefOk)
remaining += a
else
throw FlagParseException(k, new FlagUndefinedException)
// Optional argument without a value
case Array(k) if flag(k).noArgumentOk =>
flags(k).parse()
// Mandatory argument without a value and with no more arguments.
case Array(k) if i == args.size =>
throw FlagParseException(k, new FlagValueRequiredException)
// Mandatory argument with another argument
case Array(k) =>
i += 1
try flag(k).parse(args(i-1)) catch {
case NonFatal(e) => throw FlagParseException(k, e)
}
// Mandatory k=v
case Array(k, v) =>
try flag(k).parse(v) catch {
case e: Throwable => throw FlagParseException(k, e)
}
}
} else {
remaining += a
}
}
if (helpFlag())
throw FlagUsageError(usage)
remaining
}
def parseOrExit1(args: Array[String], undefOk: Boolean = true): Seq[String] =
try parse(args, undefOk) catch {
case FlagUsageError(usage) =>
System.err.println(usage)
System.exit(1)
throw new IllegalStateException
case e@FlagParseException(k, cause) =>
System.err.println("Error parsing flag %s: %s".format(k, cause.getMessage))
System.err.println(usage)
System.exit(1)
throw new IllegalStateException
case e =>
System.err.println("Error parsing flags: %s".format(e.getMessage))
System.err.println(usage)
System.exit(1)
throw new IllegalStateException
}
def apply[T: Flaggable](name: String, default: => T, help: String) = {
val f = new Flag[T](name, help, default)
add(f)
f
}
def apply[T](name: String, help: String)(implicit _f: Flaggable[T], m: Manifest[T]) = {
val f = new Flag[T](name, help, m.toString)
add(f)
f
}
/**
* Set the flags' command usage; this is a message printed
* before the flag definitions in the usage string.
*/
def setCmdUsage(u: String) {
cmdUsage = u
}
def usage: String = synchronized {
val lines =
for (k <- flags.keys.toArray.sorted)
yield flags(k).usageString
val globalLines = if (!includeGlobal) Seq.empty else {
GlobalFlag.getAll(getClass.getClassLoader).map(_.usageString).sorted
}
val cmd = if (cmdUsage.nonEmpty) cmdUsage+"\\n" else ""
cmd+argv0+"\\n"+(lines mkString "\\n")+(
if (globalLines.isEmpty) "" else "\\nglobal flags:\\n"+(globalLines mkString "\\n")
)
}
/**
* Get all the flags known to this this Flags instance
*
* @param includeGlobal defaults to the includeGlobal settings of this instance
* @param classLoader needed to find global flags, defaults to this instance's class loader
* @return all the flags known to this this Flags instance
*/
def getAll(includeGlobal: Boolean = this.includeGlobal,
classLoader: ClassLoader = this.getClass.getClassLoader): Iterable[Flag[_]] = synchronized {
var flags = TreeSet[Flag[_]]()(Ordering.by(_.name)) ++ this.flags.valuesIterator
if (includeGlobal) {
flags ++= GlobalFlag.getAll(classLoader).iterator
}
flags
}
/**
* Formats all the values of all flags known to this instance into a format suitable for logging
*
* @param includeGlobal see getAll above
* @param classLoader see getAll above
* @return all the flag values in alphabetical order, grouped into (set, unset)
*/
def formattedFlagValues(includeGlobal: Boolean = this.includeGlobal,
classLoader: ClassLoader = this.getClass.getClassLoader):
(Iterable[String], Iterable[String]) = {
val (set, unset) = getAll(includeGlobal, classLoader).partition { _.get.isDefined }
(set.map { _ + " \\\\" }, unset.map { _ + " \\\\" })
}
/**
* Creates a string containing all the values of all flags known to this instance into a format suitable for logging
*
* @param includeGlobal set getAll above
* @param classLoader set getAll above
* @return A string suitable for logging
*/
def formattedFlagValuesString(includeGlobal: Boolean = this.includeGlobal,
classLoader: ClassLoader = this.getClass.getClassLoader): String = {
val (set, unset) = formattedFlagValues(includeGlobal, classLoader)
val lines = Seq("Set flags:") ++
set ++
Seq("Unset flags:") ++
unset
lines.mkString("\\n")
}
}
/**
* Declare a global flag by extending this class with an object.
*
* {{{
* object MyFlag extends GlobalFlag("my", "default value", "my global flag")
* }}}
*
* All such global flag declarations in a given classpath are
* visible, and are used by, [[com.twitter.app.App]].
*
* The name of the flag is the fully-qualified classname, for
* example, the flag
*
* {{{
* package com.twitter.server
*
* object port extends GlobalFlag(8080, "the TCP port to which we bind")
* }}}
*
* is accessed by the name `com.twitter.server.port`.
*
* Global flags may also be set by Java system properties with keys
* named in the same way, however values supplied by flags override
* those supplied by system properties.
*
*/
@GlobalFlagVisible
class GlobalFlag[T] private[app](defaultOrUsage: Either[() => T, String], help: String)(implicit _f: Flaggable[T])
extends Flag[T](null, help, defaultOrUsage) {
def this(default: T, help: String)(implicit _f: Flaggable[T]) = this(Left(() => default), help)
def this(help: String)(implicit _f: Flaggable[T], m: Manifest[T]) = this(Right(m.toString), help)
// Unfortunately, `getClass` in the the extends... above
// doesn't give the right answer.
override val name = getClass.getName.stripSuffix("$")
protected override def getValue = super.getValue orElse {
Option(System.getProperty(name)) flatMap { p =>
try Some(flaggable.parse(p)) catch {
case NonFatal(exc) =>
java.util.logging.Logger.getLogger("").log(
java.util.logging.Level.SEVERE,
"Failed to parse system property "+name+" as flag", exc)
None
}
}
}
def getGlobalFlag: Flag[_] = this
}
private object GlobalFlag {
def get(f: String): Option[Flag[_]] = try {
val cls = Class.forName(f)
val m = cls.getMethod("getGlobalFlag")
Some(m.invoke(null).asInstanceOf[Flag[_]])
} catch {
case _: ClassNotFoundException
| _: NoSuchMethodException
| _: IllegalArgumentException => None
}
def getAll(loader: ClassLoader) = {
val markerClass = classOf[GlobalFlagVisible]
val flags = new ArrayBuffer[Flag[_]]
for (info <- ClassPath.browse(loader)) try {
val cls = info.load()
if (cls.isAnnotationPresent(markerClass) && (info.name endsWith "$")) {
get(info.name.dropRight(1)) match {
case Some(f) => flags += f
case None => println("failed for "+info.name)
}
}
} catch {
case _: IllegalStateException
| _: NoClassDefFoundError
| _: ClassNotFoundException =>
}
flags
}
}
| mosesn/util | util-app/src/main/scala/com/twitter/app/Flag.scala | Scala | apache-2.0 | 15,649 |
package org.jetbrains.plugins.scala
package lang
package refactoring
package namesSuggester
import com.intellij.openapi.util.text.StringUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScLiteral, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.{ScDesignatorType, ScProjectionType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.refactoring.ScalaNamesValidator.isIdentifier
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.genericTypes.GenericTypeNamesProvider
import org.jetbrains.plugins.scala.lang.refactoring.util.{ScalaTypeValidator, ScalaValidator, ScalaVariableValidator}
/**
* @author Alexander Podkhalyuzin
* @since 26.06.2008
*/
object NameSuggester {
def suggestNames(expression: ScExpression)
(implicit validator: ScalaVariableValidator = ScalaVariableValidator.empty): Seq[String] = {
val names = collectTypes(expression).reverse
.flatMap(namesByType(_)) ++
namesByExpression(expression)
collectNames(names, validator)
}
private[this] def collectTypes(expression: ScExpression): Seq[ScType] = {
val types = expression.getType().toOption ++
expression.getTypeWithoutImplicits().toOption ++
expression.getTypeIgnoreBaseType.toOption
types.toSeq.sortWith {
case (_, t) if t.isUnit => true
case _ => false
}
}
private[this] def collectNames(names: Seq[String], validator: ScalaValidator): Seq[String] = {
import scala.collection.mutable
val filteredNames = mutable.LinkedHashSet(names: _*).map {
case "class" => "clazz"
case name => name
}.filter(isIdentifier)
val collected = filteredNames.toSeq match {
case Seq() => Seq("value")
case seq => seq.reverse
}
mutable.LinkedHashSet(collected: _*)
.map(validator.validateName)
.toSeq
}
def suggestNamesByType(`type`: ScType)
(implicit validator: ScalaTypeValidator = ScalaTypeValidator.empty): Seq[String] =
collectNames(namesByType(`type`), validator)
private[namesSuggester] def namesByType(`type`: ScType, withPlurals: Boolean = true, shortVersion: Boolean = true): Seq[String] = {
def toLowerCase(name: String, length: Int): String = {
val lowerCased = name.toLowerCase
if (shortVersion) lowerCased.substring(0, length) else lowerCased
}
def byName(name: String): Seq[String] = name match {
case "String" => Seq(toLowerCase(name, 3))
case _ => camelCaseNames(name)
}
val stdTypes = `type`.projectContext.stdTypes
import stdTypes._
def valTypeName(`type`: ValType): String = {
val typeName = `type`.name
val length = `type` match {
case Char | Byte | Int | Long | Double => 1
case Short | Float => 2
case Boolean => 4
case _ => typeName.length
}
toLowerCase(typeName, length)
}
`type` match {
case valType: ValType => Seq(valTypeName(valType))
case ScDesignatorType(e) => byName(e.name)
case parameterType: TypeParameterType => byName(parameterType.name)
case ScProjectionType(_, e, _) => byName(e.name)
case ScCompoundType(Seq(head, _*), _, _) => namesByType(head, withPlurals)
case _ => GenericTypeNamesProvider.providers.flatMap(_.names(`type`))
}
}
private[this] def namesByExpression: ScExpression => Seq[String] = {
case _: ScThisReference => Seq("thisInstance")
case _: ScSuperReference => Seq("superInstance")
case reference: ScReferenceElement if reference.refName != null => camelCaseNames(reference.refName)
case definition: ScNewTemplateDefinition =>
val namesByClass = definition.getType().toOption.toSeq
.flatMap(namesByType(_))
val parameters = definition.constructor.toSeq
.flatMap(_.matchedParameters)
enhancedNames(namesByClass, parameters)
case call@ScMethodCall(invoked, _) =>
enhancedNames(namesByExpression(invoked), call.matchedParameters)
case literal: ScLiteral if literal.isString =>
Option(literal.getValue).collect {
case string: String if isIdentifier(string.toLowerCase) => string
}.flatMap(string => camelCaseNames(string).headOption).toSeq
case expression =>
val maybeName = expression.getContext match {
case x: ScAssignStmt => x.assignName
case x: ScArgumentExprList => x.matchedParameters.collectFirst {
case (matchedExpression, parameter) if matchedExpression == expression => parameter
}.map(_.name)
case _ => None
}
maybeName.toSeq
}
private[this] def enhancedNames(names: Seq[String], parameters: Seq[(ScExpression, Parameter)]): Seq[String] = {
val namesByParameters = parameters.collect {
case (expression, parameter) if parameter.name == "name" => expression
}.flatMap(namesByExpression)
names ++ compoundNames(namesByParameters, names) ++ namesByParameters
}
private[namesSuggester] def compoundNames(firstNames: Seq[String],
lastNames: Seq[String],
separator: String = ""): Seq[String] =
for {
firstName <- firstNames
lastName <- lastNames
} yield s"$firstName$separator${lastName.capitalize}"
private[this] def camelCaseNames(name: String): Seq[String] = {
val actualName = name match {
case _ if StringUtil.isEmpty(name) =>
return Seq.empty
case _ if name.toUpperCase == name =>
return Seq(name.toLowerCase)
.map(_.replaceAll(isNotLetter, ""))
case _ =>
val beginIndex = name match {
case _ if name.startsWith("get") => 3
case _ if name.startsWith("set") => 3
case _ if name.startsWith("is") => 2
case _ => 0
}
name.substring(beginIndex)
}
val names = actualName.zipWithIndex.collect {
case (char, index) if index == 0 || char.isLetter && char.isUpper =>
Character.toLowerCase(char) + actualName.substring(index + 1)
}
names.map(_.replaceFirst(isNotLetter + "$", ""))
}
private[this] val isNotLetter = "[^\\\\p{IsAlphabetic}]"
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/namesSuggester/NameSuggester.scala | Scala | apache-2.0 | 6,471 |
package com.twitter.finagle.httpx.filter
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.httpx.{Request, Response, Status}
import com.twitter.util.{Duration, Future, Return, Stopwatch, Throw}
/**
* Statistic filter.
*
* Add counters:
* status.[code]
* status.[class]
* response_size (deprecated?)
* And metrics:
* time.[code]
* time.[class]
*/
class StatsFilter[REQUEST <: Request](stats: StatsReceiver)
extends SimpleFilter[REQUEST, Response] {
private[this] val statusReceiver = stats.scope("status")
private[this] val timeReceiver = stats.scope("time")
private[this] val responseSizeStat = stats.stat("response_size")
def apply(request: REQUEST, service: Service[REQUEST, Response]): Future[Response] = {
val elapsed = Stopwatch.start()
val future = service(request)
future respond {
case Return(response) =>
count(elapsed(), response)
case Throw(_) =>
// Treat exceptions as empty 500 errors
val response = Response(request.version, Status.InternalServerError)
count(elapsed(), response)
}
future
}
protected def count(duration: Duration, response: Response) {
val statusCode = response.statusCode.toString
val statusClass = (response.statusCode / 100).toString + "XX"
// TODO: Memoize on status code/class.
statusReceiver.counter(statusCode).incr()
statusReceiver.counter(statusClass).incr()
timeReceiver.stat(statusCode).add(duration.inMilliseconds)
timeReceiver.stat(statusClass).add(duration.inMilliseconds)
responseSizeStat.add(response.length)
}
}
| nkhuyu/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/filter/StatsFilter.scala | Scala | apache-2.0 | 1,681 |
package willrogers
package controllers
import java.text.SimpleDateFormat
import scala.collection.Seq
import org.joda.time.DateTime
import play.api.data.Form
import play.api.data.Forms.text
import play.api.data.Forms.tuple
import play.api.mvc.Action
import play.api.mvc.Results.Ok
import play.mvc.Controller
import satisfaction.TrackDescriptor
import willrogers.Global
object TrackHistoryPage extends Controller {
lazy val trackHistory = Global.trackHistory
/**
* default loader
*/
def loadHistoryPageAction() = Action {
val grList = trackHistory.getRecentHistory // by default - only grab recent tracks
Ok(views.html.trackhistory(grList))
}
/**
* filter based on the desired Track/Goal, as well as start/end time
*/
val filterHistoryForm = Form(
tuple(
"trackName" -> text,
"forUser" -> text,
"version" -> text,
"variant" -> text,
"goalName"-> text,
"startTime" -> text,
"endTime" -> text
))
def filterJobHistory = Action { implicit request =>
val(trackName, forUser, version, variant, goalName, startTime, endTime) = filterHistoryForm.bindFromRequest.get
//set up variables - need to massage this part....
val trackDesc : TrackDescriptor = TrackDescriptor (trackName)
val simpleDateFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
val sTime : Option[DateTime]= startTime match{
case timestring if timestring.length() > 0 => Some(new DateTime(simpleDateFormat.parse(timestring)))
case _ => None
}
val eTime: Option[DateTime]=endTime match{
case timestring if timestring.length() > 0 => Some(new DateTime(simpleDateFormat.parse(timestring)))
case _ => None
}
val grList = goalName match {
case name if name.length() > 0 => trackHistory.goalRunsForGoal(trackDesc, goalName, sTime, eTime)
case _ => trackHistory.goalRunsForTrack(trackDesc, sTime, eTime)
}
Ok(views.html.trackhistory(grList))
}
/**
* look up all instances of a goal run
* Following this tutorial: http://stackoverflow.com/questions/16857687/forms-in-scala-play-framework
*/
val lookupGoalHistoryForm = Form(
tuple(
"trackName" -> text,
"forUser" -> text,
"version" -> text,
"variant" -> text,
"goalName"-> text,
"witness" -> text
))
def lookupJobHistoryGoal = Action { implicit request =>
val(trackName, forUser, version, variant, goalName, witness) = lookupGoalHistoryForm.bindFromRequest.get
val trackDesc = TrackDescriptor(trackName) //FixME: eh... might have to massage this part a bit more. Esp. String->Witness
val grList = trackHistory.lookupGoalRun(trackDesc, goalName, null)
Ok(views.html.trackhistory(grList))
}
/**
* look up a specific goal run by ID
*/
val lookupGoalHistoryIDForm = Form(
"runId" -> text
)
def lookupGoalHistoryID = Action { implicit request =>
val runId= lookupGoalHistoryIDForm.bindFromRequest.get
val gr = trackHistory.lookupGoalRun(runId)
gr match {
case Some(goal) => Ok(views.html.trackhistory(Seq(goal)))
case None => Ok(views.html.trackhistory(Seq()))
}
}
/**
* If you want to see everything that ever ran
*/
def getAllHistoryRuns() = Action {
//val grList = trackHistory.getAllHistory
val grList=trackHistory.getAllHistory
Ok(views.html.trackhistory(grList))
}
}
| jeromebanks/satisfaction | apps/willrogers/app/controllers/TrackHistoryPage.scala | Scala | apache-2.0 | 3,527 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType}
class GroupedIteratorSuite extends SparkFunSuite {
test("basic") {
val schema = new StructType().add("i", IntegerType).add("s", StringType)
val encoder = RowEncoder(schema).resolveAndBind()
val input = Seq(Row(1, "a"), Row(1, "b"), Row(2, "c"))
val grouped = GroupedIterator(input.iterator.map(encoder.toRow),
Seq('i.int.at(0)), schema.toAttributes)
val result = grouped.map {
case (key, data) =>
assert(key.numFields == 1)
key.getInt(0) -> data.map(encoder.fromRow).toSeq
}.toSeq
assert(result ==
1 -> Seq(input(0), input(1)) ::
2 -> Seq(input(2)) :: Nil)
}
test("group by 2 columns") {
val schema = new StructType().add("i", IntegerType).add("l", LongType).add("s", StringType)
val encoder = RowEncoder(schema).resolveAndBind()
val input = Seq(
Row(1, 2L, "a"),
Row(1, 2L, "b"),
Row(1, 3L, "c"),
Row(2, 1L, "d"),
Row(3, 2L, "e"))
val grouped = GroupedIterator(input.iterator.map(encoder.toRow),
Seq('i.int.at(0), 'l.long.at(1)), schema.toAttributes)
val result = grouped.map {
case (key, data) =>
assert(key.numFields == 2)
(key.getInt(0), key.getLong(1), data.map(encoder.fromRow).toSeq)
}.toSeq
assert(result ==
(1, 2L, Seq(input(0), input(1))) ::
(1, 3L, Seq(input(2))) ::
(2, 1L, Seq(input(3))) ::
(3, 2L, Seq(input(4))) :: Nil)
}
test("do nothing to the value iterator") {
val schema = new StructType().add("i", IntegerType).add("s", StringType)
val encoder = RowEncoder(schema).resolveAndBind()
val input = Seq(Row(1, "a"), Row(1, "b"), Row(2, "c"))
val grouped = GroupedIterator(input.iterator.map(encoder.toRow),
Seq('i.int.at(0)), schema.toAttributes)
assert(grouped.length == 2)
}
}
| javalovelinux/SparkGroovyScript | sql/core/src/test/scala/org/apache/spark/sql/execution/GroupedIteratorSuite.scala | Scala | apache-2.0 | 2,928 |
package systems.adaptix.bling.data
import org.specs2.mutable.Specification
import scalikejdbc.SQLSyntax
/**
* Created by nkashyap on 6/4/15.
*/
class FieldInfoSpecification extends Specification {
"The FieldInfo trait enables the generation of chunks of SQL, as strings, pertaining to the field being represented. These SQL chunks, fieldName and sqlTypeDeclaration, can be used to dynamically generate SQL queries. FieldInfo is in fact a union of the following types:" >> {
"PlainFieldInfo, which represents a vanilla SQL column." in {
val plainField = PlainFieldInfo("name", "type")
plainField.fieldName mustEqual "name"
plainField.sqlTypeDeclaration mustEqual "type"
}
"NotNullFieldInfo, which represents an SQL column which is guaranteed to contain a value." in {
val notNullField = NotNullFieldInfo("name", "type")
notNullField.fieldName mustEqual "name"
notNullField.sqlTypeDeclaration mustEqual "type NOT NULL"
}
"PrimaryFieldInfo, which represents an SQL column holding a primary key." in {
val primaryField = PrimaryFieldInfo("name", "type")
primaryField.fieldName mustEqual "name"
primaryField.sqlTypeDeclaration mustEqual "type NOT NULL PRIMARY KEY"
}
"AutoIdFieldInfo, which represents an automatically incrementing primary key field." in {
val autoIdField = AutoIdFieldInfo("name")
autoIdField.fieldName mustEqual "name"
autoIdField.sqlTypeDeclaration mustEqual "SERIAL NOT NULL PRIMARY KEY"
}
}
}
| nkashy1/bling | src/test/scala/systems/adaptix/bling/data/FieldInfoSpecification.scala | Scala | mit | 1,526 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.tree.impurity
import java.util.Locale
import org.apache.spark.annotation.{DeveloperApi, Since}
/**
* Trait for calculating information gain.
* This trait is used for
* (a) setting the impurity parameter in [[org.apache.spark.mllib.tree.configuration.Strategy]]
* (b) calculating impurity values from sufficient statistics.
*/
@Since("1.0.0")
trait Impurity extends Serializable {
/**
* :: DeveloperApi ::
* information calculation for multiclass classification
* @param counts Array[Double] with counts for each label
* @param totalCount sum of counts for all labels
* @return information value, or 0 if totalCount = 0
*/
@Since("1.1.0")
@DeveloperApi
def calculate(counts: Array[Double], totalCount: Double): Double
/**
* :: DeveloperApi ::
* information calculation for regression
* @param count number of instances
* @param sum sum of labels
* @param sumSquares summation of squares of the labels
* @return information value, or 0 if count = 0
*/
@Since("1.0.0")
@DeveloperApi
def calculate(count: Double, sum: Double, sumSquares: Double): Double
}
/**
* Interface for updating views of a vector of sufficient statistics,
* in order to compute impurity from a sample.
* Note: Instances of this class do not hold the data; they operate on views of the data.
* @param statsSize Length of the vector of sufficient statistics for one bin.
*/
private[spark] abstract class ImpurityAggregator(val statsSize: Int) extends Serializable {
/**
* Merge the stats from one bin into another.
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for (node, feature, bin) which is modified by the merge.
* @param otherOffset Start index of stats for (node, feature, other bin) which is not modified.
*/
def merge(allStats: Array[Double], offset: Int, otherOffset: Int): Unit = {
var i = 0
while (i < statsSize) {
allStats(offset + i) += allStats(otherOffset + i)
i += 1
}
}
/**
* Update stats for one (node, feature, bin) with the given label.
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def update(allStats: Array[Double], offset: Int, label: Double, instanceWeight: Double): Unit
/**
* Get an [[ImpurityCalculator]] for a (node, feature, bin).
* @param allStats Flat stats array, with stats for this (node, feature, bin) contiguous.
* @param offset Start index of stats for this (node, feature, bin).
*/
def getCalculator(allStats: Array[Double], offset: Int): ImpurityCalculator
}
/**
* Stores statistics for one (node, feature, bin) for calculating impurity.
* Unlike [[ImpurityAggregator]], this class stores its own data and is for a specific
* (node, feature, bin).
* @param stats Array of sufficient statistics for a (node, feature, bin).
*/
private[spark] abstract class ImpurityCalculator(val stats: Array[Double]) extends Serializable {
/**
* Make a deep copy of this [[ImpurityCalculator]].
*/
def copy: ImpurityCalculator
/**
* Calculate the impurity from the stored sufficient statistics.
*/
def calculate(): Double
/**
* Add the stats from another calculator into this one, modifying and returning this calculator.
*/
def add(other: ImpurityCalculator): ImpurityCalculator = {
require(stats.length == other.stats.length,
s"Two ImpurityCalculator instances cannot be added with different counts sizes." +
s" Sizes are ${stats.length} and ${other.stats.length}.")
var i = 0
val len = other.stats.length
while (i < len) {
stats(i) += other.stats(i)
i += 1
}
this
}
/**
* Subtract the stats from another calculator from this one, modifying and returning this
* calculator.
*/
def subtract(other: ImpurityCalculator): ImpurityCalculator = {
require(stats.length == other.stats.length,
s"Two ImpurityCalculator instances cannot be subtracted with different counts sizes." +
s" Sizes are ${stats.length} and ${other.stats.length}.")
var i = 0
val len = other.stats.length
while (i < len) {
stats(i) -= other.stats(i)
i += 1
}
this
}
/**
* Number of data points accounted for in the sufficient statistics.
*/
def count: Long
/**
* Prediction which should be made based on the sufficient statistics.
*/
def predict: Double
/**
* Probability of the label given by [[predict]], or -1 if no probability is available.
*/
def prob(label: Double): Double = -1
/**
* Return the index of the largest array element.
* Fails if the array is empty.
*/
protected def indexOfLargestArrayElement(array: Array[Double]): Int = {
val result = array.foldLeft((-1, Double.MinValue, 0)) {
case ((maxIndex, maxValue, currentIndex), currentValue) =>
if (currentValue > maxValue) {
(currentIndex, currentValue, currentIndex + 1)
} else {
(maxIndex, maxValue, currentIndex + 1)
}
}
if (result._1 < 0) {
throw new RuntimeException("ImpurityCalculator internal error:" +
" indexOfLargestArrayElement failed")
}
result._1
}
}
private[spark] object ImpurityCalculator {
/**
* Create an [[ImpurityCalculator]] instance of the given impurity type and with
* the given stats.
*/
def getCalculator(impurity: String, stats: Array[Double]): ImpurityCalculator = {
impurity.toLowerCase(Locale.ROOT) match {
case "gini" => new GiniCalculator(stats)
case "entropy" => new EntropyCalculator(stats)
case "variance" => new VarianceCalculator(stats)
case _ =>
throw new IllegalArgumentException(
s"ImpurityCalculator builder did not recognize impurity type: $impurity")
}
}
}
| bravo-zhang/spark | mllib/src/main/scala/org/apache/spark/mllib/tree/impurity/Impurity.scala | Scala | apache-2.0 | 6,777 |
package com.monovore.coast.samza.safe
import java.io.File
import java.util
import com.monovore.coast.wire.{Protocol, Serializer}
import org.apache.samza.container.SamzaContainerContext
import org.apache.samza.metrics.MetricsRegistry
import org.apache.samza.serializers.Serde
import org.apache.samza.storage.kv.{BaseKeyValueStorageEngineFactory, KeyValueStore, SerializedKeyValueStore}
import org.apache.samza.storage.{StorageEngine, StorageEngineFactory}
import org.apache.samza.system.{IncomingMessageEnvelope, OutgoingMessageEnvelope, SystemStreamPartition}
import org.apache.samza.task.MessageCollector
import org.apache.samza.util.Logging
import scala.collection.JavaConverters._
trait CoastState[K, V] {
def upstream: Long
def downstream: Long
def state(key: K): V
def push(key: K, value: V, upstream: Long, downstream: Long): Unit
def update(key: K, upstreamOffset: Long)(block: (Long, V) => (Long, V)): Long = {
val minOffset = upstream
val nextUpstream = upstreamOffset + 1
if (upstreamOffset >= minOffset) {
val currentState = state(key)
val (nextDownstream, nextState) = block(downstream, currentState)
push(key, nextState, nextUpstream, nextDownstream)
}
nextUpstream
}
}
class CoastStorageEngine[K, V](
underlying: KeyValueStore[K, V],
keySerde: Serde[K],
valueSerde: Serde[V],
collector: MessageCollector,
ssp: SystemStreamPartition,
keyFormat: Serializer[Array[Byte]],
valueFormat: Serializer[(Long, Long, Array[Byte])]
) extends StorageEngine with Logging { store =>
val partitionID: Int = ssp.getPartition.getPartitionId
var nextOffset: Long = 0L
var downstreamOffset: Long = 0L
override def restore(messages: util.Iterator[IncomingMessageEnvelope]): Unit = {
for (message <- messages.asScala) {
val keyBytes = keyFormat.fromArray(message.getKey.asInstanceOf[Array[Byte]])
val (up, down, valueBytes) = valueFormat.fromArray(message.getMessage.asInstanceOf[Array[Byte]])
nextOffset = up
downstreamOffset = down
underlying.put(keySerde.fromBytes(keyBytes), valueSerde.fromBytes(valueBytes))
}
info(s"Restored offsets for $ssp: [upstream: $nextOffset, downstream: $downstreamOffset]")
}
def withDefault(default: V): CoastState[K, V] = new CoastState[K, V] {
override def upstream: Long = store.nextOffset
override def downstream: Long = store.downstreamOffset
override def state(key: K): V = Option(store.underlying.get(key)).getOrElse(default)
override def push(key: K, value: V, upstream: Long, downstream: Long): Unit = {
store.nextOffset = upstream
store.downstreamOffset = downstream
store.underlying.put(key, value)
val keyBytes = keyFormat.toArray(keySerde.toBytes(key))
val valueBytes = valueFormat.toArray(upstream, downstream, valueSerde.toBytes(value))
collector.send(new OutgoingMessageEnvelope(ssp, store.partitionID, keyBytes, valueBytes))
}
}
override def flush(): Unit = {
underlying.flush()
}
override def stop(): Unit = {
underlying.close()
}
}
class CoastStoreFactory[A, B] extends StorageEngineFactory[A, B] {
override def getStorageEngine(
storeName: String,
storeDir: File,
keySerde: Serde[A],
msgSerde: Serde[B],
collector: MessageCollector,
registry: MetricsRegistry,
changeLogSystemStreamPartition: SystemStreamPartition,
containerContext: SamzaContainerContext
): StorageEngine = {
val backingFactory = containerContext.config
.getNewInstance[BaseKeyValueStorageEngineFactory[_, _]](s"stores.$storeName.subfactory")
val underlying =
backingFactory.getKVStore(storeName, storeDir, registry, changeLogSystemStreamPartition, containerContext)
val serialized = new SerializedKeyValueStore[A, B](underlying, keySerde, msgSerde)
import Protocol.common._
val keyFormat = implicitly[Serializer[Array[Byte]]]
val valueFormat = implicitly[Serializer[(Long, Long, Array[Byte])]]
new CoastStorageEngine[A, B](serialized, keySerde, msgSerde, collector, changeLogSystemStreamPartition, keyFormat, valueFormat)
}
}
| bkirwi/coast | samza/src/main/scala/com/monovore/coast/samza/safe/CoastStorageEngine.scala | Scala | apache-2.0 | 4,174 |
package com.twitter.finagle
import com.twitter.finagle.util.LoadService
import com.twitter.util.{Closable, Future, Time}
import java.net.InetSocketAddress
import java.util.logging.Logger
import scala.collection.mutable
import scala.util.control.NoStackTrace
/**
* Indicates that an [[com.twitter.finagle.Announcer]] was not found for the
* given `scheme`.
*
* Announcers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are no libraries
* on the classpath that define an Announcer for the given scheme.
*/
class AnnouncerNotFoundException(scheme: String)
extends Exception("Announcer not found for scheme \"%s\"".format(scheme))
/**
* Indicates that multiple [[com.twitter.finagle.Announcer Announcers]] were
* discovered for given `scheme`.
*
* Announcers are discovered via Finagle's [[com.twitter.finagle.util.LoadService]]
* mechanism. These exceptions typically suggest that there are multiple
* libraries on the classpath with conflicting scheme definitions.
*/
class MultipleAnnouncersPerSchemeException(announcers: Map[String, Seq[Announcer]])
extends Exception with NoStackTrace
{
override def getMessage = {
val msgs = announcers map { case (scheme, rs) =>
"%s=(%s)".format(scheme, rs.map(_.getClass.getName).mkString(", "))
} mkString(" ")
"Multiple announcers defined: %s".format(msgs)
}
}
/**
* Indicates that a forum string passed to an [[com.twitter.finagle.Announcer]]
* was invalid according to the forum grammar [1].
*
* [1] https://twitter.github.io/finagle/guide/Names.html
*/
class AnnouncerForumInvalid(forum: String)
extends Exception("Announcer forum \"%s\" is not valid".format(forum))
trait Announcement extends Closable {
def close(deadline: Time) = unannounce()
def unannounce(): Future[Unit]
}
trait ProxyAnnouncement extends Announcement with Proxy {
val forums: List[String]
}
trait Announcer {
val scheme: String
def announce(addr: InetSocketAddress, name: String): Future[Announcement]
}
object Announcer {
private[this] lazy val announcers = {
val announcers = LoadService[Announcer]()
val log = Logger.getLogger(getClass.getName)
val dups = announcers groupBy(_.scheme) filter { case (_, rs) => rs.size > 1 }
if (dups.size > 0) throw new MultipleAnnouncersPerSchemeException(dups)
for (r <- announcers)
log.info("Announcer[%s] = %s(%s)".format(r.scheme, r.getClass.getName, r))
announcers
}
def get[T <: Announcer](clazz: Class[T]): Option[T] =
announcers find { _.getClass isAssignableFrom clazz } map { _.asInstanceOf[T] }
private[this] val _announcements = mutable.Set[(InetSocketAddress, List[String])]()
def announcements = synchronized { _announcements.toSet }
def announce(addr: InetSocketAddress, forum: String): Future[Announcement] = {
val announcement = forum.split("!", 2) match {
case Array(scheme, name) =>
announcers.find(_.scheme == scheme) match {
case Some(announcer) => announcer.announce(addr, name)
case None => Future.exception(new AnnouncerNotFoundException(scheme))
}
case _ =>
Future.exception(new AnnouncerForumInvalid(forum))
}
announcement map { ann =>
val lastForums = ann match {
case a: ProxyAnnouncement => a.forums
case _ => Nil
}
val proxyAnnouncement = new ProxyAnnouncement {
val self = ann
def unannounce() = ann.unannounce()
val forums = forum :: lastForums
}
synchronized {
_announcements -= ((addr, lastForums))
_announcements += ((addr, proxyAnnouncement.forums))
}
proxyAnnouncement
}
}
}
| koshelev/finagle | finagle-core/src/main/scala/com/twitter/finagle/Announcer.scala | Scala | apache-2.0 | 3,739 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.NotSerializableException
import java.util.Properties
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicInteger
import scala.annotation.tailrec
import scala.collection.Map
import scala.collection.mutable.{HashMap, HashSet, Stack}
import scala.concurrent.duration._
import scala.language.existentials
import scala.language.postfixOps
import scala.util.control.NonFatal
import org.apache.commons.lang3.SerializationUtils
import org.apache.spark._
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.config
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.partial.{ApproximateActionListener, ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.rpc.RpcTimeout
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.BlockManagerHeartbeat
import org.apache.spark.util._
/**
* The high-level scheduling layer that implements stage-oriented scheduling. It computes a DAG of
* stages for each job, keeps track of which RDDs and stage outputs are materialized, and finds a
* minimal schedule to run the job. It then submits stages as TaskSets to an underlying
* TaskScheduler implementation that runs them on the cluster. A TaskSet contains fully independent
* tasks that can run right away based on the data that's already on the cluster (e.g. map output
* files from previous stages), though it may fail if this data becomes unavailable.
*
* Spark stages are created by breaking the RDD graph at shuffle boundaries. RDD operations with
* "narrow" dependencies, like map() and filter(), are pipelined together into one set of tasks
* in each stage, but operations with shuffle dependencies require multiple stages (one to write a
* set of map output files, and another to read those files after a barrier). In the end, every
* stage will have only shuffle dependencies on other stages, and may compute multiple operations
* inside it. The actual pipelining of these operations happens in the RDD.compute() functions of
* various RDDs
*
* In addition to coming up with a DAG of stages, the DAGScheduler also determines the preferred
* locations to run each task on, based on the current cache status, and passes these to the
* low-level TaskScheduler. Furthermore, it handles failures due to shuffle output files being
* lost, in which case old stages may need to be resubmitted. Failures *within* a stage that are
* not caused by shuffle file loss are handled by the TaskScheduler, which will retry each task
* a small number of times before cancelling the whole stage.
*
* When looking through this code, there are several key concepts:
*
* - Jobs (represented by [[ActiveJob]]) are the top-level work items submitted to the scheduler.
* For example, when the user calls an action, like count(), a job will be submitted through
* submitJob. Each Job may require the execution of multiple stages to build intermediate data.
*
* - Stages ([[Stage]]) are sets of tasks that compute intermediate results in jobs, where each
* task computes the same function on partitions of the same RDD. Stages are separated at shuffle
* boundaries, which introduce a barrier (where we must wait for the previous stage to finish to
* fetch outputs). There are two types of stages: [[ResultStage]], for the final stage that
* executes an action, and [[ShuffleMapStage]], which writes map output files for a shuffle.
* Stages are often shared across multiple jobs, if these jobs reuse the same RDDs.
*
* - Tasks are individual units of work, each sent to one machine.
*
* - Cache tracking: the DAGScheduler figures out which RDDs are cached to avoid recomputing them
* and likewise remembers which shuffle map stages have already produced output files to avoid
* redoing the map side of a shuffle.
*
* - Preferred locations: the DAGScheduler also computes where to run each task in a stage based
* on the preferred locations of its underlying RDDs, or the location of cached or shuffle data.
*
* - Cleanup: all data structures are cleared when the running jobs that depend on them finish,
* to prevent memory leaks in a long-running application.
*
* To recover from failures, the same stage might need to run multiple times, which are called
* "attempts". If the TaskScheduler reports that a task failed because a map output file from a
* previous stage was lost, the DAGScheduler resubmits that lost stage. This is detected through a
* CompletionEvent with FetchFailed, or an ExecutorLost event. The DAGScheduler will wait a small
* amount of time to see whether other nodes or tasks fail, then resubmit TaskSets for any lost
* stage(s) that compute the missing tasks. As part of this process, we might also have to create
* Stage objects for old (finished) stages where we previously cleaned up the Stage object. Since
* tasks from the old attempt of a stage could still be running, care must be taken to map any
* events received in the correct Stage object.
*
* Here's a checklist to use when making or reviewing changes to this class:
*
* - All data structures should be cleared when the jobs involving them end to avoid indefinite
* accumulation of state in long-running programs.
*
* - When adding a new data structure, update `DAGSchedulerSuite.assertDataStructuresEmpty` to
* include the new structure. This will help to catch memory leaks.
*/
private[spark]
class DAGScheduler(
private[scheduler] val sc: SparkContext,
private[scheduler] val taskScheduler: TaskScheduler,
listenerBus: LiveListenerBus,
mapOutputTracker: MapOutputTrackerMaster,
blockManagerMaster: BlockManagerMaster,
env: SparkEnv,
clock: Clock = new SystemClock())
extends Logging {
def this(sc: SparkContext, taskScheduler: TaskScheduler) = {
this(
sc,
taskScheduler,
sc.listenerBus,
sc.env.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster],
sc.env.blockManager.master,
sc.env)
}
def this(sc: SparkContext) = this(sc, sc.taskScheduler)
private[spark] val metricsSource: DAGSchedulerSource = new DAGSchedulerSource(this)
private[scheduler] val nextJobId = new AtomicInteger(0)
private[scheduler] def numTotalJobs: Int = nextJobId.get()
private val nextStageId = new AtomicInteger(0)
private[scheduler] val jobIdToStageIds = new HashMap[Int, HashSet[Int]]
private[scheduler] val stageIdToStage = new HashMap[Int, Stage]
/**
* Mapping from shuffle dependency ID to the ShuffleMapStage that will generate the data for
* that dependency. Only includes stages that are part of currently running job (when the job(s)
* that require the shuffle stage complete, the mapping will be removed, and the only record of
* the shuffle data will be in the MapOutputTracker).
*/
private[scheduler] val shuffleIdToMapStage = new HashMap[Int, ShuffleMapStage]
private[scheduler] val jobIdToActiveJob = new HashMap[Int, ActiveJob]
// Stages we need to run whose parents aren't done
private[scheduler] val waitingStages = new HashSet[Stage]
// Stages we are running right now
private[scheduler] val runningStages = new HashSet[Stage]
// Stages that must be resubmitted due to fetch failures
private[scheduler] val failedStages = new HashSet[Stage]
private[scheduler] val activeJobs = new HashSet[ActiveJob]
/**
* Contains the locations that each RDD's partitions are cached on. This map's keys are RDD ids
* and its values are arrays indexed by partition numbers. Each array value is the set of
* locations where that RDD partition is cached.
*
* All accesses to this map should be guarded by synchronizing on it (see SPARK-4454).
*/
private val cacheLocs = new HashMap[Int, IndexedSeq[Seq[TaskLocation]]]
// For tracking failed nodes, we use the MapOutputTracker's epoch number, which is sent with
// every task. When we detect a node failing, we note the current epoch number and failed
// executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask results.
//
// TODO: Garbage collect information about failure epochs when we know there are no more
// stray messages to detect.
private val failedEpoch = new HashMap[String, Long]
private [scheduler] val outputCommitCoordinator = env.outputCommitCoordinator
// A closure serializer that we reuse.
// This is only safe because DAGScheduler runs in a single thread.
private val closureSerializer = SparkEnv.get.closureSerializer.newInstance()
/** If enabled, FetchFailed will not cause stage retry, in order to surface the problem. */
private val disallowStageRetryForTest = sc.getConf.getBoolean("spark.test.noStageRetry", false)
/**
* Whether to unregister all the outputs on the host in condition that we receive a FetchFailure,
* this is set default to false, which means, we only unregister the outputs related to the exact
* executor(instead of the host) on a FetchFailure.
*/
private[scheduler] val unRegisterOutputOnHostOnFetchFailure =
sc.getConf.get(config.UNREGISTER_OUTPUT_ON_HOST_ON_FETCH_FAILURE)
/**
* Number of consecutive stage attempts allowed before a stage is aborted.
*/
private[scheduler] val maxConsecutiveStageAttempts =
sc.getConf.getInt("spark.stage.maxConsecutiveAttempts",
DAGScheduler.DEFAULT_MAX_CONSECUTIVE_STAGE_ATTEMPTS)
private val messageScheduler =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("dag-scheduler-message")
private[scheduler] val eventProcessLoop = new DAGSchedulerEventProcessLoop(this)
taskScheduler.setDAGScheduler(this)
/**
* Called by the TaskSetManager to report task's starting.
*/
def taskStarted(task: Task[_], taskInfo: TaskInfo) {
eventProcessLoop.post(BeginEvent(task, taskInfo))
}
/**
* Called by the TaskSetManager to report that a task has completed
* and results are being fetched remotely.
*/
def taskGettingResult(taskInfo: TaskInfo) {
eventProcessLoop.post(GettingResultEvent(taskInfo))
}
/**
* Called by the TaskSetManager to report task completions or failures.
*/
def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
taskInfo: TaskInfo): Unit = {
eventProcessLoop.post(
CompletionEvent(task, reason, result, accumUpdates, taskInfo))
}
/**
* Update metrics for in-progress tasks and let the master know that the BlockManager is still
* alive. Return true if the driver knows about the given block manager. Otherwise, return false,
* indicating that the block manager should re-register.
*/
def executorHeartbeatReceived(
execId: String,
// (taskId, stageId, stageAttemptId, accumUpdates)
accumUpdates: Array[(Long, Int, Int, Seq[AccumulableInfo])],
blockManagerId: BlockManagerId): Boolean = {
listenerBus.post(SparkListenerExecutorMetricsUpdate(execId, accumUpdates))
blockManagerMaster.driverEndpoint.askSync[Boolean](
BlockManagerHeartbeat(blockManagerId), new RpcTimeout(600 seconds, "BlockManagerHeartbeat"))
}
/**
* Called by TaskScheduler implementation when an executor fails.
*/
def executorLost(execId: String, reason: ExecutorLossReason): Unit = {
eventProcessLoop.post(ExecutorLost(execId, reason))
}
/**
* Called by TaskScheduler implementation when a host is added.
*/
def executorAdded(execId: String, host: String): Unit = {
eventProcessLoop.post(ExecutorAdded(execId, host))
}
/**
* Called by the TaskSetManager to cancel an entire TaskSet due to either repeated failures or
* cancellation of the job itself.
*/
def taskSetFailed(taskSet: TaskSet, reason: String, exception: Option[Throwable]): Unit = {
eventProcessLoop.post(TaskSetFailed(taskSet, reason, exception))
}
private[scheduler]
def getCacheLocs(rdd: RDD[_]): IndexedSeq[Seq[TaskLocation]] = cacheLocs.synchronized {
// Note: this doesn't use `getOrElse()` because this method is called O(num tasks) times
if (!cacheLocs.contains(rdd.id)) {
// Note: if the storage level is NONE, we don't need to get locations from block manager.
val locs: IndexedSeq[Seq[TaskLocation]] = if (rdd.getStorageLevel == StorageLevel.NONE) {
IndexedSeq.fill(rdd.partitions.length)(Nil)
} else {
val blockIds =
rdd.partitions.indices.map(index => RDDBlockId(rdd.id, index)).toArray[BlockId]
blockManagerMaster.getLocations(blockIds).map { bms =>
bms.map(bm => TaskLocation(bm.host, bm.executorId))
}
}
cacheLocs(rdd.id) = locs
}
cacheLocs(rdd.id)
}
private def clearCacheLocs(): Unit = cacheLocs.synchronized {
cacheLocs.clear()
}
/**
* Gets a shuffle map stage if one exists in shuffleIdToMapStage. Otherwise, if the
* shuffle map stage doesn't already exist, this method will create the shuffle map stage in
* addition to any missing ancestor shuffle map stages.
*/
private def getOrCreateShuffleMapStage(
shuffleDep: ShuffleDependency[_, _, _],
firstJobId: Int): ShuffleMapStage = {
shuffleIdToMapStage.get(shuffleDep.shuffleId) match {
case Some(stage) =>
stage
case None =>
// Create stages for all missing ancestor shuffle dependencies.
getMissingAncestorShuffleDependencies(shuffleDep.rdd).foreach { dep =>
// Even though getMissingAncestorShuffleDependencies only returns shuffle dependencies
// that were not already in shuffleIdToMapStage, it's possible that by the time we
// get to a particular dependency in the foreach loop, it's been added to
// shuffleIdToMapStage by the stage creation process for an earlier dependency. See
// SPARK-13902 for more information.
if (!shuffleIdToMapStage.contains(dep.shuffleId)) {
createShuffleMapStage(dep, firstJobId)
}
}
// Finally, create a stage for the given shuffle dependency.
createShuffleMapStage(shuffleDep, firstJobId)
}
}
/**
* Creates a ShuffleMapStage that generates the given shuffle dependency's partitions. If a
* previously run stage generated the same shuffle data, this function will copy the output
* locations that are still available from the previous shuffle to avoid unnecessarily
* regenerating data.
*/
def createShuffleMapStage(shuffleDep: ShuffleDependency[_, _, _], jobId: Int): ShuffleMapStage = {
val rdd = shuffleDep.rdd
val numTasks = rdd.partitions.length
val parents = getOrCreateParentStages(rdd, jobId)
val id = nextStageId.getAndIncrement()
val stage = new ShuffleMapStage(
id, rdd, numTasks, parents, jobId, rdd.creationSite, shuffleDep, mapOutputTracker)
stageIdToStage(id) = stage
shuffleIdToMapStage(shuffleDep.shuffleId) = stage
updateJobIdStageIdMaps(jobId, stage)
if (!mapOutputTracker.containsShuffle(shuffleDep.shuffleId)) {
// Kind of ugly: need to register RDDs with the cache and map output tracker here
// since we can't do it in the RDD constructor because # of partitions is unknown
logInfo("Registering RDD " + rdd.id + " (" + rdd.getCreationSite + ")")
mapOutputTracker.registerShuffle(shuffleDep.shuffleId, rdd.partitions.length)
}
stage
}
/**
* Create a ResultStage associated with the provided jobId.
*/
private def createResultStage(
rdd: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
jobId: Int,
callSite: CallSite): ResultStage = {
val parents = getOrCreateParentStages(rdd, jobId)
val id = nextStageId.getAndIncrement()
val stage = new ResultStage(id, rdd, func, partitions, parents, jobId, callSite)
stageIdToStage(id) = stage
updateJobIdStageIdMaps(jobId, stage)
stage
}
/**
* Get or create the list of parent stages for a given RDD. The new Stages will be created with
* the provided firstJobId.
*/
private def getOrCreateParentStages(rdd: RDD[_], firstJobId: Int): List[Stage] = {
getShuffleDependencies(rdd).map { shuffleDep =>
getOrCreateShuffleMapStage(shuffleDep, firstJobId)
}.toList
}
/** Find ancestor shuffle dependencies that are not registered in shuffleToMapStage yet */
private def getMissingAncestorShuffleDependencies(
rdd: RDD[_]): Stack[ShuffleDependency[_, _, _]] = {
val ancestors = new Stack[ShuffleDependency[_, _, _]]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
waitingForVisit.push(rdd)
while (waitingForVisit.nonEmpty) {
val toVisit = waitingForVisit.pop()
if (!visited(toVisit)) {
visited += toVisit
getShuffleDependencies(toVisit).foreach { shuffleDep =>
if (!shuffleIdToMapStage.contains(shuffleDep.shuffleId)) {
ancestors.push(shuffleDep)
waitingForVisit.push(shuffleDep.rdd)
} // Otherwise, the dependency and its ancestors have already been registered.
}
}
}
ancestors
}
/**
* Returns shuffle dependencies that are immediate parents of the given RDD.
*
* This function will not return more distant ancestors. For example, if C has a shuffle
* dependency on B which has a shuffle dependency on A:
*
* A <-- B <-- C
*
* calling this function with rdd C will only return the B <-- C dependency.
*
* This function is scheduler-visible for the purpose of unit testing.
*/
private[scheduler] def getShuffleDependencies(
rdd: RDD[_]): HashSet[ShuffleDependency[_, _, _]] = {
val parents = new HashSet[ShuffleDependency[_, _, _]]
val visited = new HashSet[RDD[_]]
val waitingForVisit = new Stack[RDD[_]]
waitingForVisit.push(rdd)
while (waitingForVisit.nonEmpty) {
val toVisit = waitingForVisit.pop()
if (!visited(toVisit)) {
visited += toVisit
toVisit.dependencies.foreach {
case shuffleDep: ShuffleDependency[_, _, _] =>
parents += shuffleDep
case dependency =>
waitingForVisit.push(dependency.rdd)
}
}
}
parents
}
private def getMissingParentStages(stage: Stage): List[Stage] = {
val missing = new HashSet[Stage]
val visited = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visited(rdd)) {
visited += rdd
val rddHasUncachedPartitions = getCacheLocs(rdd).contains(Nil)
if (rddHasUncachedPartitions) {
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
val mapStage = getOrCreateShuffleMapStage(shufDep, stage.firstJobId)
if (!mapStage.isAvailable) {
missing += mapStage
}
case narrowDep: NarrowDependency[_] =>
waitingForVisit.push(narrowDep.rdd)
}
}
}
}
}
waitingForVisit.push(stage.rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
missing.toList
}
/**
* Registers the given jobId among the jobs that need the given stage and
* all of that stage's ancestors.
*/
private def updateJobIdStageIdMaps(jobId: Int, stage: Stage): Unit = {
@tailrec
def updateJobIdStageIdMapsList(stages: List[Stage]) {
if (stages.nonEmpty) {
val s = stages.head
s.jobIds += jobId
jobIdToStageIds.getOrElseUpdate(jobId, new HashSet[Int]()) += s.id
val parentsWithoutThisJobId = s.parents.filter { ! _.jobIds.contains(jobId) }
updateJobIdStageIdMapsList(parentsWithoutThisJobId ++ stages.tail)
}
}
updateJobIdStageIdMapsList(List(stage))
}
/**
* Removes state for job and any stages that are not needed by any other job. Does not
* handle cancelling tasks or notifying the SparkListener about finished jobs/stages/tasks.
*
* @param job The job whose state to cleanup.
*/
private def cleanupStateForJobAndIndependentStages(job: ActiveJob): Unit = {
val registeredStages = jobIdToStageIds.get(job.jobId)
if (registeredStages.isEmpty || registeredStages.get.isEmpty) {
logError("No stages registered for job " + job.jobId)
} else {
stageIdToStage.filterKeys(stageId => registeredStages.get.contains(stageId)).foreach {
case (stageId, stage) =>
val jobSet = stage.jobIds
if (!jobSet.contains(job.jobId)) {
logError(
"Job %d not registered for stage %d even though that stage was registered for the job"
.format(job.jobId, stageId))
} else {
def removeStage(stageId: Int) {
// data structures based on Stage
for (stage <- stageIdToStage.get(stageId)) {
if (runningStages.contains(stage)) {
logDebug("Removing running stage %d".format(stageId))
runningStages -= stage
}
for ((k, v) <- shuffleIdToMapStage.find(_._2 == stage)) {
shuffleIdToMapStage.remove(k)
}
if (waitingStages.contains(stage)) {
logDebug("Removing stage %d from waiting set.".format(stageId))
waitingStages -= stage
}
if (failedStages.contains(stage)) {
logDebug("Removing stage %d from failed set.".format(stageId))
failedStages -= stage
}
}
// data structures based on StageId
stageIdToStage -= stageId
logDebug("After removal of stage %d, remaining stages = %d"
.format(stageId, stageIdToStage.size))
}
jobSet -= job.jobId
if (jobSet.isEmpty) { // no other job needs this stage
removeStage(stageId)
}
}
}
}
jobIdToStageIds -= job.jobId
jobIdToActiveJob -= job.jobId
activeJobs -= job
job.finalStage match {
case r: ResultStage => r.removeActiveJob()
case m: ShuffleMapStage => m.removeActiveJob(job)
}
}
/**
* Submit an action job to the scheduler.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like first()
* @param callSite where in the user program this job was called
* @param resultHandler callback to pass each result to
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*
* @return a JobWaiter object that can be used to block until the job finishes executing
* or can be used to cancel the job.
*
* @throws IllegalArgumentException when partitions ids are illegal
*/
def submitJob[T, U](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: CallSite,
resultHandler: (Int, U) => Unit,
properties: Properties): JobWaiter[U] = {
// Check to make sure we are not launching a task on a partition that does not exist.
val maxPartitions = rdd.partitions.length
partitions.find(p => p >= maxPartitions || p < 0).foreach { p =>
throw new IllegalArgumentException(
"Attempting to access a non-existent partition: " + p + ". " +
"Total number of partitions: " + maxPartitions)
}
val jobId = nextJobId.getAndIncrement()
if (partitions.size == 0) {
// Return immediately if the job is running 0 tasks
return new JobWaiter[U](this, jobId, 0, resultHandler)
}
assert(partitions.size > 0)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val waiter = new JobWaiter(this, jobId, partitions.size, resultHandler)
eventProcessLoop.post(JobSubmitted(
jobId, rdd, func2, partitions.toArray, callSite, waiter,
SerializationUtils.clone(properties)))
waiter
}
/**
* Run an action job on the given RDD and pass all the results to the resultHandler function as
* they arrive.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like first()
* @param callSite where in the user program this job was called
* @param resultHandler callback to pass each result to
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*
* @note Throws `Exception` when the job fails
*/
def runJob[T, U](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
callSite: CallSite,
resultHandler: (Int, U) => Unit,
properties: Properties): Unit = {
val start = System.nanoTime
val waiter = submitJob(rdd, func, partitions, callSite, resultHandler, properties)
ThreadUtils.awaitReady(waiter.completionFuture, Duration.Inf)
waiter.completionFuture.value.get match {
case scala.util.Success(_) =>
logInfo("Job %d finished: %s, took %f s".format
(waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9))
case scala.util.Failure(exception) =>
logInfo("Job %d failed: %s, took %f s".format
(waiter.jobId, callSite.shortForm, (System.nanoTime - start) / 1e9))
// SPARK-8644: Include user stack trace in exceptions coming from DAGScheduler.
val callerStackTrace = Thread.currentThread().getStackTrace.tail
exception.setStackTrace(exception.getStackTrace ++ callerStackTrace)
throw exception
}
}
/**
* Run an approximate job on the given RDD and pass all the results to an ApproximateEvaluator
* as they arrive. Returns a partial result object from the evaluator.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param callSite where in the user program this job was called
* @param timeout maximum time to wait for the job, in milliseconds
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
callSite: CallSite,
timeout: Long,
properties: Properties): PartialResult[R] = {
val listener = new ApproximateActionListener(rdd, func, evaluator, timeout)
val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _]
val partitions = (0 until rdd.partitions.length).toArray
val jobId = nextJobId.getAndIncrement()
eventProcessLoop.post(JobSubmitted(
jobId, rdd, func2, partitions, callSite, listener, SerializationUtils.clone(properties)))
listener.awaitResult() // Will throw an exception if the job fails
}
/**
* Submit a shuffle map stage to run independently and get a JobWaiter object back. The waiter
* can be used to block until the job finishes executing or can be used to cancel the job.
* This method is used for adaptive query planning, to run map stages and look at statistics
* about their outputs before submitting downstream stages.
*
* @param dependency the ShuffleDependency to run a map stage for
* @param callback function called with the result of the job, which in this case will be a
* single MapOutputStatistics object showing how much data was produced for each partition
* @param callSite where in the user program this job was submitted
* @param properties scheduler properties to attach to this job, e.g. fair scheduler pool name
*/
def submitMapStage[K, V, C](
dependency: ShuffleDependency[K, V, C],
callback: MapOutputStatistics => Unit,
callSite: CallSite,
properties: Properties): JobWaiter[MapOutputStatistics] = {
val rdd = dependency.rdd
val jobId = nextJobId.getAndIncrement()
if (rdd.partitions.length == 0) {
throw new SparkException("Can't run submitMapStage on RDD with 0 partitions")
}
// We create a JobWaiter with only one "task", which will be marked as complete when the whole
// map stage has completed, and will be passed the MapOutputStatistics for that stage.
// This makes it easier to avoid race conditions between the user code and the map output
// tracker that might result if we told the user the stage had finished, but then they queries
// the map output tracker and some node failures had caused the output statistics to be lost.
val waiter = new JobWaiter(this, jobId, 1, (i: Int, r: MapOutputStatistics) => callback(r))
eventProcessLoop.post(MapStageSubmitted(
jobId, dependency, callSite, waiter, SerializationUtils.clone(properties)))
waiter
}
/**
* Cancel a job that is running or waiting in the queue.
*/
def cancelJob(jobId: Int, reason: Option[String]): Unit = {
logInfo("Asked to cancel job " + jobId)
eventProcessLoop.post(JobCancelled(jobId, reason))
}
/**
* Cancel all jobs in the given job group ID.
*/
def cancelJobGroup(groupId: String): Unit = {
logInfo("Asked to cancel job group " + groupId)
eventProcessLoop.post(JobGroupCancelled(groupId))
}
/**
* Cancel all jobs that are running or waiting in the queue.
*/
def cancelAllJobs(): Unit = {
eventProcessLoop.post(AllJobsCancelled)
}
private[scheduler] def doCancelAllJobs() {
// Cancel all running jobs.
runningStages.map(_.firstJobId).foreach(handleJobCancellation(_,
Option("as part of cancellation of all jobs")))
activeJobs.clear() // These should already be empty by this point,
jobIdToActiveJob.clear() // but just in case we lost track of some jobs...
}
/**
* Cancel all jobs associated with a running or scheduled stage.
*/
def cancelStage(stageId: Int, reason: Option[String]) {
eventProcessLoop.post(StageCancelled(stageId, reason))
}
/**
* Kill a given task. It will be retried.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(taskId: Long, interruptThread: Boolean, reason: String): Boolean = {
taskScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Resubmit any failed stages. Ordinarily called after a small amount of time has passed since
* the last fetch failure.
*/
private[scheduler] def resubmitFailedStages() {
if (failedStages.size > 0) {
// Failed stages may be removed by job cancellation, so failed might be empty even if
// the ResubmitFailedStages event has been scheduled.
logInfo("Resubmitting failed stages")
clearCacheLocs()
val failedStagesCopy = failedStages.toArray
failedStages.clear()
for (stage <- failedStagesCopy.sortBy(_.firstJobId)) {
submitStage(stage)
}
}
}
/**
* Check for waiting stages which are now eligible for resubmission.
* Submits stages that depend on the given parent stage. Called when the parent stage completes
* successfully.
*/
private def submitWaitingChildStages(parent: Stage) {
logTrace(s"Checking if any dependencies of $parent are now runnable")
logTrace("running: " + runningStages)
logTrace("waiting: " + waitingStages)
logTrace("failed: " + failedStages)
val childStages = waitingStages.filter(_.parents.contains(parent)).toArray
waitingStages --= childStages
for (stage <- childStages.sortBy(_.firstJobId)) {
submitStage(stage)
}
}
/** Finds the earliest-created active job that needs the stage */
// TODO: Probably should actually find among the active jobs that need this
// stage the one with the highest priority (highest-priority pool, earliest created).
// That should take care of at least part of the priority inversion problem with
// cross-job dependencies.
private def activeJobForStage(stage: Stage): Option[Int] = {
val jobsThatUseStage: Array[Int] = stage.jobIds.toArray.sorted
jobsThatUseStage.find(jobIdToActiveJob.contains)
}
private[scheduler] def handleJobGroupCancelled(groupId: String) {
// Cancel all jobs belonging to this job group.
// First finds all active jobs with this group id, and then kill stages for them.
val activeInGroup = activeJobs.filter { activeJob =>
Option(activeJob.properties).exists {
_.getProperty(SparkContext.SPARK_JOB_GROUP_ID) == groupId
}
}
val jobIds = activeInGroup.map(_.jobId)
jobIds.foreach(handleJobCancellation(_,
Option("part of cancelled job group %s".format(groupId))))
}
private[scheduler] def handleBeginEvent(task: Task[_], taskInfo: TaskInfo) {
// Note that there is a chance that this task is launched after the stage is cancelled.
// In that case, we wouldn't have the stage anymore in stageIdToStage.
val stageAttemptId = stageIdToStage.get(task.stageId).map(_.latestInfo.attemptId).getOrElse(-1)
listenerBus.post(SparkListenerTaskStart(task.stageId, stageAttemptId, taskInfo))
}
private[scheduler] def handleTaskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
stageIdToStage.get(taskSet.stageId).foreach { abortStage(_, reason, exception) }
}
private[scheduler] def cleanUpAfterSchedulerStop() {
for (job <- activeJobs) {
val error =
new SparkException(s"Job ${job.jobId} cancelled because SparkContext was shut down")
job.listener.jobFailed(error)
// Tell the listeners that all of the running stages have ended. Don't bother
// cancelling the stages because if the DAG scheduler is stopped, the entire application
// is in the process of getting stopped.
val stageFailedMessage = "Stage cancelled because SparkContext was shut down"
// The `toArray` here is necessary so that we don't iterate over `runningStages` while
// mutating it.
runningStages.toArray.foreach { stage =>
markStageAsFinished(stage, Some(stageFailedMessage))
}
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error)))
}
}
private[scheduler] def handleGetTaskResult(taskInfo: TaskInfo) {
listenerBus.post(SparkListenerTaskGettingResult(taskInfo))
}
private[scheduler] def handleJobSubmitted(jobId: Int,
finalRDD: RDD[_],
func: (TaskContext, Iterator[_]) => _,
partitions: Array[Int],
callSite: CallSite,
listener: JobListener,
properties: Properties) {
var finalStage: ResultStage = null
try {
// New stage creation may throw an exception if, for example, jobs are run on a
// HadoopRDD whose underlying HDFS files have been deleted.
finalStage = createResultStage(finalRDD, func, partitions, jobId, callSite)
} catch {
case e: Exception =>
logWarning("Creating new stage failed due to exception - job: " + jobId, e)
listener.jobFailed(e)
return
}
val job = new ActiveJob(jobId, finalStage, callSite, listener, properties)
clearCacheLocs()
logInfo("Got job %s (%s) with %d output partitions".format(
job.jobId, callSite.shortForm, partitions.length))
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
val jobSubmissionTime = clock.getTimeMillis()
jobIdToActiveJob(jobId) = job
activeJobs += job
finalStage.setActiveJob(job)
val stageIds = jobIdToStageIds(jobId).toArray
val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo))
listenerBus.post(
SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties))
submitStage(finalStage)
}
private[scheduler] def handleMapStageSubmitted(jobId: Int,
dependency: ShuffleDependency[_, _, _],
callSite: CallSite,
listener: JobListener,
properties: Properties) {
// Submitting this map stage might still require the creation of some parent stages, so make
// sure that happens.
var finalStage: ShuffleMapStage = null
try {
// New stage creation may throw an exception if, for example, jobs are run on a
// HadoopRDD whose underlying HDFS files have been deleted.
finalStage = getOrCreateShuffleMapStage(dependency, jobId)
} catch {
case e: Exception =>
logWarning("Creating new stage failed due to exception - job: " + jobId, e)
listener.jobFailed(e)
return
}
val job = new ActiveJob(jobId, finalStage, callSite, listener, properties)
clearCacheLocs()
logInfo("Got map stage job %s (%s) with %d output partitions".format(
jobId, callSite.shortForm, dependency.rdd.partitions.length))
logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")")
logInfo("Parents of final stage: " + finalStage.parents)
logInfo("Missing parents: " + getMissingParentStages(finalStage))
val jobSubmissionTime = clock.getTimeMillis()
jobIdToActiveJob(jobId) = job
activeJobs += job
finalStage.addActiveJob(job)
val stageIds = jobIdToStageIds(jobId).toArray
val stageInfos = stageIds.flatMap(id => stageIdToStage.get(id).map(_.latestInfo))
listenerBus.post(
SparkListenerJobStart(job.jobId, jobSubmissionTime, stageInfos, properties))
submitStage(finalStage)
// If the whole stage has already finished, tell the listener and remove it
if (finalStage.isAvailable) {
markMapStageJobAsFinished(job, mapOutputTracker.getStatistics(dependency))
}
}
/** Submits stage, but first recursively submits any missing parents. */
private def submitStage(stage: Stage) {
val jobId = activeJobForStage(stage)
if (jobId.isDefined) {
logDebug("submitStage(" + stage + ")")
if (!waitingStages(stage) && !runningStages(stage) && !failedStages(stage)) {
val missing = getMissingParentStages(stage).sortBy(_.id)
logDebug("missing: " + missing)
if (missing.isEmpty) {
logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents")
submitMissingTasks(stage, jobId.get)
} else {
for (parent <- missing) {
submitStage(parent)
}
waitingStages += stage
}
}
} else {
abortStage(stage, "No active job for stage " + stage.id, None)
}
}
/** Called when stage's parents are available and we can now do its task. */
private def submitMissingTasks(stage: Stage, jobId: Int) {
logDebug("submitMissingTasks(" + stage + ")")
// First figure out the indexes of partition ids to compute.
val partitionsToCompute: Seq[Int] = stage.findMissingPartitions()
// Use the scheduling pool, job group, description, etc. from an ActiveJob associated
// with this Stage
val properties = jobIdToActiveJob(jobId).properties
runningStages += stage
// SparkListenerStageSubmitted should be posted before testing whether tasks are
// serializable. If tasks are not serializable, a SparkListenerStageCompleted event
// will be posted, which should always come after a corresponding SparkListenerStageSubmitted
// event.
stage match {
case s: ShuffleMapStage =>
outputCommitCoordinator.stageStart(stage = s.id, maxPartitionId = s.numPartitions - 1)
case s: ResultStage =>
outputCommitCoordinator.stageStart(
stage = s.id, maxPartitionId = s.rdd.partitions.length - 1)
}
val taskIdToLocations: Map[Int, Seq[TaskLocation]] = try {
stage match {
case s: ShuffleMapStage =>
partitionsToCompute.map { id => (id, getPreferredLocs(stage.rdd, id))}.toMap
case s: ResultStage =>
partitionsToCompute.map { id =>
val p = s.partitions(id)
(id, getPreferredLocs(stage.rdd, p))
}.toMap
}
} catch {
case NonFatal(e) =>
stage.makeNewStageAttempt(partitionsToCompute.size)
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
abortStage(stage, s"Task creation failed: $e\\n${Utils.exceptionString(e)}", Some(e))
runningStages -= stage
return
}
stage.makeNewStageAttempt(partitionsToCompute.size, taskIdToLocations.values.toSeq)
// If there are tasks to execute, record the submission time of the stage. Otherwise,
// post the even without the submission time, which indicates that this stage was
// skipped.
if (partitionsToCompute.nonEmpty) {
stage.latestInfo.submissionTime = Some(clock.getTimeMillis())
}
listenerBus.post(SparkListenerStageSubmitted(stage.latestInfo, properties))
// TODO: Maybe we can keep the taskBinary in Stage to avoid serializing it multiple times.
// Broadcasted binary for the task, used to dispatch tasks to executors. Note that we broadcast
// the serialized copy of the RDD and for each task we will deserialize it, which means each
// task gets a different copy of the RDD. This provides stronger isolation between tasks that
// might modify state of objects referenced in their closures. This is necessary in Hadoop
// where the JobConf/Configuration object is not thread-safe.
var taskBinary: Broadcast[Array[Byte]] = null
try {
// For ShuffleMapTask, serialize and broadcast (rdd, shuffleDep).
// For ResultTask, serialize and broadcast (rdd, func).
val taskBinaryBytes: Array[Byte] = stage match {
case stage: ShuffleMapStage =>
JavaUtils.bufferToArray(
closureSerializer.serialize((stage.rdd, stage.shuffleDep): AnyRef))
case stage: ResultStage =>
JavaUtils.bufferToArray(closureSerializer.serialize((stage.rdd, stage.func): AnyRef))
}
taskBinary = sc.broadcast(taskBinaryBytes)
} catch {
// In the case of a failure during serialization, abort the stage.
case e: NotSerializableException =>
abortStage(stage, "Task not serializable: " + e.toString, Some(e))
runningStages -= stage
// Abort execution
return
case NonFatal(e) =>
abortStage(stage, s"Task serialization failed: $e\\n${Utils.exceptionString(e)}", Some(e))
runningStages -= stage
return
}
val tasks: Seq[Task[_]] = try {
val serializedTaskMetrics = closureSerializer.serialize(stage.latestInfo.taskMetrics).array()
stage match {
case stage: ShuffleMapStage =>
stage.pendingPartitions.clear()
partitionsToCompute.map { id =>
val locs = taskIdToLocations(id)
val part = stage.rdd.partitions(id)
stage.pendingPartitions += id
new ShuffleMapTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, properties, serializedTaskMetrics, Option(jobId),
Option(sc.applicationId), sc.applicationAttemptId)
}
case stage: ResultStage =>
partitionsToCompute.map { id =>
val p: Int = stage.partitions(id)
val part = stage.rdd.partitions(p)
val locs = taskIdToLocations(id)
new ResultTask(stage.id, stage.latestInfo.attemptId,
taskBinary, part, locs, id, properties, serializedTaskMetrics,
Option(jobId), Option(sc.applicationId), sc.applicationAttemptId)
}
}
} catch {
case NonFatal(e) =>
abortStage(stage, s"Task creation failed: $e\\n${Utils.exceptionString(e)}", Some(e))
runningStages -= stage
return
}
if (tasks.size > 0) {
logInfo(s"Submitting ${tasks.size} missing tasks from $stage (${stage.rdd}) (first 15 " +
s"tasks are for partitions ${tasks.take(15).map(_.partitionId)})")
taskScheduler.submitTasks(new TaskSet(
tasks.toArray, stage.id, stage.latestInfo.attemptId, jobId, properties))
} else {
// Because we posted SparkListenerStageSubmitted earlier, we should mark
// the stage as completed here in case there are no tasks to run
markStageAsFinished(stage, None)
val debugString = stage match {
case stage: ShuffleMapStage =>
s"Stage ${stage} is actually done; " +
s"(available: ${stage.isAvailable}," +
s"available outputs: ${stage.numAvailableOutputs}," +
s"partitions: ${stage.numPartitions})"
case stage : ResultStage =>
s"Stage ${stage} is actually done; (partitions: ${stage.numPartitions})"
}
logDebug(debugString)
submitWaitingChildStages(stage)
}
}
/**
* Merge local values from a task into the corresponding accumulators previously registered
* here on the driver.
*
* Although accumulators themselves are not thread-safe, this method is called only from one
* thread, the one that runs the scheduling loop. This means we only handle one task
* completion event at a time so we don't need to worry about locking the accumulators.
* This still doesn't stop the caller from updating the accumulator outside the scheduler,
* but that's not our problem since there's nothing we can do about that.
*/
private def updateAccumulators(event: CompletionEvent): Unit = {
val task = event.task
val stage = stageIdToStage(task.stageId)
try {
event.accumUpdates.foreach { updates =>
val id = updates.id
// Find the corresponding accumulator on the driver and update it
val acc: AccumulatorV2[Any, Any] = AccumulatorContext.get(id) match {
case Some(accum) => accum.asInstanceOf[AccumulatorV2[Any, Any]]
case None =>
throw new SparkException(s"attempted to access non-existent accumulator $id")
}
acc.merge(updates.asInstanceOf[AccumulatorV2[Any, Any]])
// To avoid UI cruft, ignore cases where value wasn't updated
if (acc.name.isDefined && !updates.isZero) {
stage.latestInfo.accumulables(id) = acc.toInfo(None, Some(acc.value))
event.taskInfo.setAccumulables(
acc.toInfo(Some(updates.value), Some(acc.value)) +: event.taskInfo.accumulables)
}
}
} catch {
case NonFatal(e) =>
logError(s"Failed to update accumulators for task ${task.partitionId}", e)
}
}
/**
* Responds to a task finishing. This is called inside the event loop so it assumes that it can
* modify the scheduler's internal state. Use taskEnded() to post a task end event from outside.
*/
private[scheduler] def handleTaskCompletion(event: CompletionEvent) {
val task = event.task
val taskId = event.taskInfo.id
val stageId = task.stageId
val taskType = Utils.getFormattedClassName(task)
outputCommitCoordinator.taskCompleted(
stageId,
task.partitionId,
event.taskInfo.attemptNumber, // this is a task attempt number
event.reason)
// Reconstruct task metrics. Note: this may be null if the task has failed.
val taskMetrics: TaskMetrics =
if (event.accumUpdates.nonEmpty) {
try {
TaskMetrics.fromAccumulators(event.accumUpdates)
} catch {
case NonFatal(e) =>
logError(s"Error when attempting to reconstruct metrics for task $taskId", e)
null
}
} else {
null
}
// The stage may have already finished when we get this event -- eg. maybe it was a
// speculative task. It is important that we send the TaskEnd event in any case, so listeners
// are properly notified and can chose to handle it. For instance, some listeners are
// doing their own accounting and if they don't get the task end event they think
// tasks are still running when they really aren't.
listenerBus.post(SparkListenerTaskEnd(
stageId, task.stageAttemptId, taskType, event.reason, event.taskInfo, taskMetrics))
if (!stageIdToStage.contains(task.stageId)) {
// Skip all the actions if the stage has been cancelled.
return
}
val stage = stageIdToStage(task.stageId)
event.reason match {
case Success =>
task match {
case rt: ResultTask[_, _] =>
// Cast to ResultStage here because it's part of the ResultTask
// TODO Refactor this out to a function that accepts a ResultStage
val resultStage = stage.asInstanceOf[ResultStage]
resultStage.activeJob match {
case Some(job) =>
if (!job.finished(rt.outputId)) {
updateAccumulators(event)
job.finished(rt.outputId) = true
job.numFinished += 1
// If the whole job has finished, remove it
if (job.numFinished == job.numPartitions) {
markStageAsFinished(resultStage)
cleanupStateForJobAndIndependentStages(job)
listenerBus.post(
SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded))
}
// taskSucceeded runs some user code that might throw an exception. Make sure
// we are resilient against that.
try {
job.listener.taskSucceeded(rt.outputId, event.result)
} catch {
case e: Exception =>
// TODO: Perhaps we want to mark the resultStage as failed?
job.listener.jobFailed(new SparkDriverExecutionException(e))
}
}
case None =>
logInfo("Ignoring result from " + rt + " because its job has finished")
}
case smt: ShuffleMapTask =>
val shuffleStage = stage.asInstanceOf[ShuffleMapStage]
updateAccumulators(event)
val status = event.result.asInstanceOf[MapStatus]
val execId = status.location.executorId
logDebug("ShuffleMapTask finished on " + execId)
if (stageIdToStage(task.stageId).latestInfo.attemptId == task.stageAttemptId) {
// This task was for the currently running attempt of the stage. Since the task
// completed successfully from the perspective of the TaskSetManager, mark it as
// no longer pending (the TaskSetManager may consider the task complete even
// when the output needs to be ignored because the task's epoch is too small below.
// In this case, when pending partitions is empty, there will still be missing
// output locations, which will cause the DAGScheduler to resubmit the stage below.)
shuffleStage.pendingPartitions -= task.partitionId
}
if (failedEpoch.contains(execId) && smt.epoch <= failedEpoch(execId)) {
logInfo(s"Ignoring possibly bogus $smt completion from executor $execId")
} else {
// The epoch of the task is acceptable (i.e., the task was launched after the most
// recent failure we're aware of for the executor), so mark the task's output as
// available.
mapOutputTracker.registerMapOutput(
shuffleStage.shuffleDep.shuffleId, smt.partitionId, status)
// Remove the task's partition from pending partitions. This may have already been
// done above, but will not have been done yet in cases where the task attempt was
// from an earlier attempt of the stage (i.e., not the attempt that's currently
// running). This allows the DAGScheduler to mark the stage as complete when one
// copy of each task has finished successfully, even if the currently active stage
// still has tasks running.
shuffleStage.pendingPartitions -= task.partitionId
}
if (runningStages.contains(shuffleStage) && shuffleStage.pendingPartitions.isEmpty) {
markStageAsFinished(shuffleStage)
logInfo("looking for newly runnable stages")
logInfo("running: " + runningStages)
logInfo("waiting: " + waitingStages)
logInfo("failed: " + failedStages)
// This call to increment the epoch may not be strictly necessary, but it is retained
// for now in order to minimize the changes in behavior from an earlier version of the
// code. This existing behavior of always incrementing the epoch following any
// successful shuffle map stage completion may have benefits by causing unneeded
// cached map outputs to be cleaned up earlier on executors. In the future we can
// consider removing this call, but this will require some extra investigation.
// See https://github.com/apache/spark/pull/17955/files#r117385673 for more details.
mapOutputTracker.incrementEpoch()
clearCacheLocs()
if (!shuffleStage.isAvailable) {
// Some tasks had failed; let's resubmit this shuffleStage.
// TODO: Lower-level scheduler should also deal with this
logInfo("Resubmitting " + shuffleStage + " (" + shuffleStage.name +
") because some of its tasks had failed: " +
shuffleStage.findMissingPartitions().mkString(", "))
submitStage(shuffleStage)
} else {
// Mark any map-stage jobs waiting on this stage as finished
if (shuffleStage.mapStageJobs.nonEmpty) {
val stats = mapOutputTracker.getStatistics(shuffleStage.shuffleDep)
for (job <- shuffleStage.mapStageJobs) {
markMapStageJobAsFinished(job, stats)
}
}
submitWaitingChildStages(shuffleStage)
}
}
}
case Resubmitted =>
logInfo("Resubmitted " + task + ", so marking it as still running")
stage match {
case sms: ShuffleMapStage =>
sms.pendingPartitions += task.partitionId
case _ =>
assert(false, "TaskSetManagers should only send Resubmitted task statuses for " +
"tasks in ShuffleMapStages.")
}
case FetchFailed(bmAddress, shuffleId, mapId, reduceId, failureMessage) =>
val failedStage = stageIdToStage(task.stageId)
val mapStage = shuffleIdToMapStage(shuffleId)
if (failedStage.latestInfo.attemptId != task.stageAttemptId) {
logInfo(s"Ignoring fetch failure from $task as it's from $failedStage attempt" +
s" ${task.stageAttemptId} and there is a more recent attempt for that stage " +
s"(attempt ID ${failedStage.latestInfo.attemptId}) running")
} else {
// It is likely that we receive multiple FetchFailed for a single stage (because we have
// multiple tasks running concurrently on different executors). In that case, it is
// possible the fetch failure has already been handled by the scheduler.
if (runningStages.contains(failedStage)) {
logInfo(s"Marking $failedStage (${failedStage.name}) as failed " +
s"due to a fetch failure from $mapStage (${mapStage.name})")
markStageAsFinished(failedStage, Some(failureMessage))
} else {
logDebug(s"Received fetch failure from $task, but its from $failedStage which is no " +
s"longer running")
}
failedStage.fetchFailedAttemptIds.add(task.stageAttemptId)
val shouldAbortStage =
failedStage.fetchFailedAttemptIds.size >= maxConsecutiveStageAttempts ||
disallowStageRetryForTest
if (shouldAbortStage) {
val abortMessage = if (disallowStageRetryForTest) {
"Fetch failure will not retry stage due to testing config"
} else {
s"""$failedStage (${failedStage.name})
|has failed the maximum allowable number of
|times: $maxConsecutiveStageAttempts.
|Most recent failure reason: $failureMessage""".stripMargin.replaceAll("\\n", " ")
}
abortStage(failedStage, abortMessage, None)
} else { // update failedStages and make sure a ResubmitFailedStages event is enqueued
// TODO: Cancel running tasks in the failed stage -- cf. SPARK-17064
val noResubmitEnqueued = !failedStages.contains(failedStage)
failedStages += failedStage
failedStages += mapStage
if (noResubmitEnqueued) {
// We expect one executor failure to trigger many FetchFailures in rapid succession,
// but all of those task failures can typically be handled by a single resubmission of
// the failed stage. We avoid flooding the scheduler's event queue with resubmit
// messages by checking whether a resubmit is already in the event queue for the
// failed stage. If there is already a resubmit enqueued for a different failed
// stage, that event would also be sufficient to handle the current failed stage, but
// producing a resubmit for each failed stage makes debugging and logging a little
// simpler while not producing an overwhelming number of scheduler events.
logInfo(
s"Resubmitting $mapStage (${mapStage.name}) and " +
s"$failedStage (${failedStage.name}) due to fetch failure"
)
messageScheduler.schedule(
new Runnable {
override def run(): Unit = eventProcessLoop.post(ResubmitFailedStages)
},
DAGScheduler.RESUBMIT_TIMEOUT,
TimeUnit.MILLISECONDS
)
}
}
// Mark the map whose fetch failed as broken in the map stage
if (mapId != -1) {
mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress)
}
// TODO: mark the executor as failed only if there were lots of fetch failures on it
if (bmAddress != null) {
val hostToUnregisterOutputs = if (env.blockManager.externalShuffleServiceEnabled &&
unRegisterOutputOnHostOnFetchFailure) {
// We had a fetch failure with the external shuffle service, so we
// assume all shuffle data on the node is bad.
Some(bmAddress.host)
} else {
// Unregister shuffle data just for one executor (we don't have any
// reason to believe shuffle data has been lost for the entire host).
None
}
removeExecutorAndUnregisterOutputs(
execId = bmAddress.executorId,
fileLost = true,
hostToUnregisterOutputs = hostToUnregisterOutputs,
maybeEpoch = Some(task.epoch))
}
}
case commitDenied: TaskCommitDenied =>
// Do nothing here, left up to the TaskScheduler to decide how to handle denied commits
case exceptionFailure: ExceptionFailure =>
// Tasks failed with exceptions might still have accumulator updates.
updateAccumulators(event)
case TaskResultLost =>
// Do nothing here; the TaskScheduler handles these failures and resubmits the task.
case _: ExecutorLostFailure | _: TaskKilled | UnknownReason =>
// Unrecognized failure - also do nothing. If the task fails repeatedly, the TaskScheduler
// will abort the job.
}
}
/**
* Responds to an executor being lost. This is called inside the event loop, so it assumes it can
* modify the scheduler's internal state. Use executorLost() to post a loss event from outside.
*
* We will also assume that we've lost all shuffle blocks associated with the executor if the
* executor serves its own blocks (i.e., we're not using external shuffle), the entire slave
* is lost (likely including the shuffle service), or a FetchFailed occurred, in which case we
* presume all shuffle data related to this executor to be lost.
*
* Optionally the epoch during which the failure was caught can be passed to avoid allowing
* stray fetch failures from possibly retriggering the detection of a node as lost.
*/
private[scheduler] def handleExecutorLost(
execId: String,
workerLost: Boolean): Unit = {
// if the cluster manager explicitly tells us that the entire worker was lost, then
// we know to unregister shuffle output. (Note that "worker" specifically refers to the process
// from a Standalone cluster, where the shuffle service lives in the Worker.)
val fileLost = workerLost || !env.blockManager.externalShuffleServiceEnabled
removeExecutorAndUnregisterOutputs(
execId = execId,
fileLost = fileLost,
hostToUnregisterOutputs = None,
maybeEpoch = None)
}
private def removeExecutorAndUnregisterOutputs(
execId: String,
fileLost: Boolean,
hostToUnregisterOutputs: Option[String],
maybeEpoch: Option[Long] = None): Unit = {
val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch)
if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) {
failedEpoch(execId) = currentEpoch
logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch))
blockManagerMaster.removeExecutor(execId)
if (fileLost) {
hostToUnregisterOutputs match {
case Some(host) =>
logInfo("Shuffle files lost for host: %s (epoch %d)".format(host, currentEpoch))
mapOutputTracker.removeOutputsOnHost(host)
case None =>
logInfo("Shuffle files lost for executor: %s (epoch %d)".format(execId, currentEpoch))
mapOutputTracker.removeOutputsOnExecutor(execId)
}
clearCacheLocs()
} else {
logDebug("Additional executor lost message for %s (epoch %d)".format(execId, currentEpoch))
}
}
}
private[scheduler] def handleExecutorAdded(execId: String, host: String) {
// remove from failedEpoch(execId) ?
if (failedEpoch.contains(execId)) {
logInfo("Host added was in lost list earlier: " + host)
failedEpoch -= execId
}
}
private[scheduler] def handleStageCancellation(stageId: Int, reason: Option[String]) {
stageIdToStage.get(stageId) match {
case Some(stage) =>
val jobsThatUseStage: Array[Int] = stage.jobIds.toArray
jobsThatUseStage.foreach { jobId =>
val reasonStr = reason match {
case Some(originalReason) =>
s"because $originalReason"
case None =>
s"because Stage $stageId was cancelled"
}
handleJobCancellation(jobId, Option(reasonStr))
}
case None =>
logInfo("No active jobs to kill for Stage " + stageId)
}
}
private[scheduler] def handleJobCancellation(jobId: Int, reason: Option[String]) {
if (!jobIdToStageIds.contains(jobId)) {
logDebug("Trying to cancel unregistered job " + jobId)
} else {
failJobAndIndependentStages(
jobIdToActiveJob(jobId), "Job %d cancelled %s".format(jobId, reason.getOrElse("")))
}
}
/**
* Marks a stage as finished and removes it from the list of running stages.
*/
private def markStageAsFinished(stage: Stage, errorMessage: Option[String] = None): Unit = {
val serviceTime = stage.latestInfo.submissionTime match {
case Some(t) => "%.03f".format((clock.getTimeMillis() - t) / 1000.0)
case _ => "Unknown"
}
if (errorMessage.isEmpty) {
logInfo("%s (%s) finished in %s s".format(stage, stage.name, serviceTime))
stage.latestInfo.completionTime = Some(clock.getTimeMillis())
// Clear failure count for this stage, now that it's succeeded.
// We only limit consecutive failures of stage attempts,so that if a stage is
// re-used many times in a long-running job, unrelated failures don't eventually cause the
// stage to be aborted.
stage.clearFailures()
} else {
stage.latestInfo.stageFailed(errorMessage.get)
logInfo(s"$stage (${stage.name}) failed in $serviceTime s due to ${errorMessage.get}")
}
outputCommitCoordinator.stageEnd(stage.id)
listenerBus.post(SparkListenerStageCompleted(stage.latestInfo))
runningStages -= stage
}
/**
* Aborts all jobs depending on a particular Stage. This is called in response to a task set
* being canceled by the TaskScheduler. Use taskSetFailed() to inject this event from outside.
*/
private[scheduler] def abortStage(
failedStage: Stage,
reason: String,
exception: Option[Throwable]): Unit = {
if (!stageIdToStage.contains(failedStage.id)) {
// Skip all the actions if the stage has been removed.
return
}
val dependentJobs: Seq[ActiveJob] =
activeJobs.filter(job => stageDependsOn(job.finalStage, failedStage)).toSeq
failedStage.latestInfo.completionTime = Some(clock.getTimeMillis())
for (job <- dependentJobs) {
failJobAndIndependentStages(job, s"Job aborted due to stage failure: $reason", exception)
}
if (dependentJobs.isEmpty) {
logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done")
}
}
/** Fails a job and all stages that are only used by that job, and cleans up relevant state. */
private def failJobAndIndependentStages(
job: ActiveJob,
failureReason: String,
exception: Option[Throwable] = None): Unit = {
val error = new SparkException(failureReason, exception.getOrElse(null))
var ableToCancelStages = true
val shouldInterruptThread =
if (job.properties == null) false
else job.properties.getProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false").toBoolean
// Cancel all independent, running stages.
val stages = jobIdToStageIds(job.jobId)
if (stages.isEmpty) {
logError("No stages registered for job " + job.jobId)
}
stages.foreach { stageId =>
val jobsForStage: Option[HashSet[Int]] = stageIdToStage.get(stageId).map(_.jobIds)
if (jobsForStage.isEmpty || !jobsForStage.get.contains(job.jobId)) {
logError(
"Job %d not registered for stage %d even though that stage was registered for the job"
.format(job.jobId, stageId))
} else if (jobsForStage.get.size == 1) {
if (!stageIdToStage.contains(stageId)) {
logError(s"Missing Stage for stage with id $stageId")
} else {
// This is the only job that uses this stage, so fail the stage if it is running.
val stage = stageIdToStage(stageId)
if (runningStages.contains(stage)) {
try { // cancelTasks will fail if a SchedulerBackend does not implement killTask
taskScheduler.cancelTasks(stageId, shouldInterruptThread)
markStageAsFinished(stage, Some(failureReason))
} catch {
case e: UnsupportedOperationException =>
logInfo(s"Could not cancel tasks for stage $stageId", e)
ableToCancelStages = false
}
}
}
}
}
if (ableToCancelStages) {
// SPARK-15783 important to cleanup state first, just for tests where we have some asserts
// against the state. Otherwise we have a *little* bit of flakiness in the tests.
cleanupStateForJobAndIndependentStages(job)
job.listener.jobFailed(error)
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobFailed(error)))
}
}
/** Return true if one of stage's ancestors is target. */
private def stageDependsOn(stage: Stage, target: Stage): Boolean = {
if (stage == target) {
return true
}
val visitedRdds = new HashSet[RDD[_]]
// We are manually maintaining a stack here to prevent StackOverflowError
// caused by recursively visiting
val waitingForVisit = new Stack[RDD[_]]
def visit(rdd: RDD[_]) {
if (!visitedRdds(rdd)) {
visitedRdds += rdd
for (dep <- rdd.dependencies) {
dep match {
case shufDep: ShuffleDependency[_, _, _] =>
val mapStage = getOrCreateShuffleMapStage(shufDep, stage.firstJobId)
if (!mapStage.isAvailable) {
waitingForVisit.push(mapStage.rdd)
} // Otherwise there's no need to follow the dependency back
case narrowDep: NarrowDependency[_] =>
waitingForVisit.push(narrowDep.rdd)
}
}
}
}
waitingForVisit.push(stage.rdd)
while (waitingForVisit.nonEmpty) {
visit(waitingForVisit.pop())
}
visitedRdds.contains(target.rdd)
}
/**
* Gets the locality information associated with a partition of a particular RDD.
*
* This method is thread-safe and is called from both DAGScheduler and SparkContext.
*
* @param rdd whose partitions are to be looked at
* @param partition to lookup locality information for
* @return list of machines that are preferred by the partition
*/
private[spark]
def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
getPreferredLocsInternal(rdd, partition, new HashSet)
}
/**
* Recursive implementation for getPreferredLocs.
*
* This method is thread-safe because it only accesses DAGScheduler state through thread-safe
* methods (getCacheLocs()); please be careful when modifying this method, because any new
* DAGScheduler state accessed by it may require additional synchronization.
*/
private def getPreferredLocsInternal(
rdd: RDD[_],
partition: Int,
visited: HashSet[(RDD[_], Int)]): Seq[TaskLocation] = {
// If the partition has already been visited, no need to re-visit.
// This avoids exponential path exploration. SPARK-695
if (!visited.add((rdd, partition))) {
// Nil has already been returned for previously visited partitions.
return Nil
}
// If the partition is cached, return the cache locations
val cached = getCacheLocs(rdd)(partition)
if (cached.nonEmpty) {
return cached
}
// If the RDD has some placement preferences (as is the case for input RDDs), get those
val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList
if (rddPrefs.nonEmpty) {
return rddPrefs.map(TaskLocation(_))
}
// If the RDD has narrow dependencies, pick the first partition of the first narrow dependency
// that has any placement preferences. Ideally we would choose based on transfer sizes,
// but this will do for now.
rdd.dependencies.foreach {
case n: NarrowDependency[_] =>
for (inPart <- n.getParents(partition)) {
val locs = getPreferredLocsInternal(n.rdd, inPart, visited)
if (locs != Nil) {
return locs
}
}
case _ =>
}
Nil
}
/** Mark a map stage job as finished with the given output stats, and report to its listener. */
def markMapStageJobAsFinished(job: ActiveJob, stats: MapOutputStatistics): Unit = {
// In map stage jobs, we only create a single "task", which is to finish all of the stage
// (including reusing any previous map outputs, etc); so we just mark task 0 as done
job.finished(0) = true
job.numFinished += 1
job.listener.taskSucceeded(0, stats)
cleanupStateForJobAndIndependentStages(job)
listenerBus.post(SparkListenerJobEnd(job.jobId, clock.getTimeMillis(), JobSucceeded))
}
def stop() {
messageScheduler.shutdownNow()
eventProcessLoop.stop()
taskScheduler.stop()
}
eventProcessLoop.start()
}
private[scheduler] class DAGSchedulerEventProcessLoop(dagScheduler: DAGScheduler)
extends EventLoop[DAGSchedulerEvent]("dag-scheduler-event-loop") with Logging {
private[this] val timer = dagScheduler.metricsSource.messageProcessingTimer
/**
* The main event loop of the DAG scheduler.
*/
override def onReceive(event: DAGSchedulerEvent): Unit = {
val timerContext = timer.time()
try {
doOnReceive(event)
} finally {
timerContext.stop()
}
}
private def doOnReceive(event: DAGSchedulerEvent): Unit = event match {
case JobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties) =>
dagScheduler.handleJobSubmitted(jobId, rdd, func, partitions, callSite, listener, properties)
case MapStageSubmitted(jobId, dependency, callSite, listener, properties) =>
dagScheduler.handleMapStageSubmitted(jobId, dependency, callSite, listener, properties)
case StageCancelled(stageId, reason) =>
dagScheduler.handleStageCancellation(stageId, reason)
case JobCancelled(jobId, reason) =>
dagScheduler.handleJobCancellation(jobId, reason)
case JobGroupCancelled(groupId) =>
dagScheduler.handleJobGroupCancelled(groupId)
case AllJobsCancelled =>
dagScheduler.doCancelAllJobs()
case ExecutorAdded(execId, host) =>
dagScheduler.handleExecutorAdded(execId, host)
case ExecutorLost(execId, reason) =>
val workerLost = reason match {
case SlaveLost(_, true) => true
case _ => false
}
dagScheduler.handleExecutorLost(execId, workerLost)
case BeginEvent(task, taskInfo) =>
dagScheduler.handleBeginEvent(task, taskInfo)
case GettingResultEvent(taskInfo) =>
dagScheduler.handleGetTaskResult(taskInfo)
case completion: CompletionEvent =>
dagScheduler.handleTaskCompletion(completion)
case TaskSetFailed(taskSet, reason, exception) =>
dagScheduler.handleTaskSetFailed(taskSet, reason, exception)
case ResubmitFailedStages =>
dagScheduler.resubmitFailedStages()
}
override def onError(e: Throwable): Unit = {
logError("DAGSchedulerEventProcessLoop failed; shutting down SparkContext", e)
try {
dagScheduler.doCancelAllJobs()
} catch {
case t: Throwable => logError("DAGScheduler failed to cancel all jobs.", t)
}
dagScheduler.sc.stopInNewThread()
}
override def onStop(): Unit = {
// Cancel any active jobs in postStop hook
dagScheduler.cleanUpAfterSchedulerStop()
}
}
private[spark] object DAGScheduler {
// The time, in millis, to wait for fetch failure events to stop coming in after one is detected;
// this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one
// as more failure events come in
val RESUBMIT_TIMEOUT = 200
// Number of consecutive stage attempts allowed before a stage is aborted
val DEFAULT_MAX_CONSECUTIVE_STAGE_ATTEMPTS = 4
}
| liutang123/spark | core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala | Scala | apache-2.0 | 76,630 |
package circuitsimulator.core
import java.io.{File, FileNotFoundException}
import org.json4s.JsonAST.JValue
import org.json4s.native.JsonMethods
import scala.io.Source
object util {
def parseJSON(json: String, application: String): JValue = JsonMethods.parse(json)/* match {
//if(false) only so intellij does not complain about never used declarations
case Some(m: Map[String, Any]) => m
case _ => throw new Exception("Error parsing " + application + ", json: |" + json + "|")
}*/
def loadRes(name: String): String = Source.fromResource(name).getLines().mkString("\\n")
def loadFile(name: String): String = if(!new File(name).exists()) throw new FileNotFoundException("Failed to load file: " + name) else Source.fromFile(name).getLines().mkString("\\n")
abstract class jsonConfig(file: String) extends Enumeration {
var data = JValue
protected class myVal extends Val {
override def toString: String = super.toString()
}
def myVal = new myVal
}
}
| colonlc/CircuitSimulator | core/src/main/scala/circuitsimulator/core/util.scala | Scala | mit | 980 |
package spatial.lang
import argon.core._
import argon.nodes._
import spatial.nodes._
import forge._
object BitOps {
/** Constructors **/
@internal def data_as_bits[A:Type:Bits](a: Exp[A])(implicit mV: Type[BitVector]): Exp[BitVector] = stage(DataAsBits[A](a))(ctx)
@internal def bits_as_data[A:Type:Bits](a: Exp[BitVector]): Exp[A] = stage(BitsAsData[A](a,typ[A]))(ctx)
@internal def dataAsBitVector[A:Type:Bits](x: A)(implicit ctx: SrcCtx): BitVector = typ[A] match {
case tp: StructType[_] =>
val fieldTypes = tp.fields.map{case (name,mT) => mT }
val fields = tp.ev(x).fields.map(_._2)
val fieldBits = fields.zip(fieldTypes).map{case (field,mT@Bits(bT)) =>
dataAsBitVector(field)(mtyp(mT),mbits(bT),ctx,state).s
}
val width = fieldTypes.map{case Bits(bT) => bT.length }.sum
implicit val vT = VectorN.typeFromLen[Bit](width)
wrap(Vector.concat[Bit,VectorN](fieldBits)) // big endian (List)
case tp: VectorType[_] =>
val width = bits[A].length
implicit val vT = VectorN.typeFromLen[Bit](width)
// HACK: Specialize for converting from other types of vectors of bits
if (tp.child == BooleanType) {
val sym = x.s.asInstanceOf[Exp[VectorN[Bit]]]
vT.wrapped(sym)
}
else {
val vector = x.asInstanceOf[Vector[_]]
val mT = tp.child
val Bits(bT) = mT
val elems = List.tabulate(tp.width) { i => dataAsBitVector(vector.apply(i))(mtyp(mT), mbits(bT), ctx, state).s }
wrap(Vector.concat[Bit,VectorN](elems)) // big endian (List)
}
case _ =>
val len = bits[A].length
implicit val vT = VectorN.typeFromLen[Bit](len)
wrap(data_as_bits(x.s))
}
@internal def bitVectorSliceOrElseZero[A:Type:Bits](x: BitVector, offset: Int)(implicit ctx: SrcCtx): Exp[A] = {
val mT = typ[A]
val bT = bits[A]
val vecSize = x.width
val length = bT.length
implicit val fvT: Type[VectorN[Bit]] = VectorN.typeFromLen[Bit](length)
if (offset < vecSize && length+offset-1 < vecSize) {
val fieldBits = fvT.wrapped(Vector.slice(x.s, length + offset - 1, offset))
val field = bitVectorAsData(fieldBits, enWarn = false)(mT, bT, fvT, ctx, state)
mT.unwrapped(field)
}
else if (offset >= vecSize) {
mT.unwrapped(bT.zero)
}
else {
val max = vecSize - 1
val remain = length + offset - vecSize // e.g. asked for 34::5 (30 bits) of 32b number, should have 3 bits left
val fvT2 = VectorN.typeFromLen[Bit](remain)
val fvB2 = VectorN.bitsFromLen[Bit](remain,fvT2)
val trueBits = Vector.slice(x.s, max, offset)
val zeroBits = fvB2.zero.s
// Note that this is big-endian concatenation (everything internally is big-endian)
val fieldBits = fvT.wrapped(Vector.concat[Bit,VectorN](Seq(trueBits,zeroBits))(typ[Bit],bits[Bit],fvT,ctx,state))
val field = bitVectorAsData(fieldBits, enWarn = false)(mT, bT, fvT, ctx, state)
mT.unwrapped(field)
}
}
@internal def bitVectorAsData[B:Type:Bits](x: BitVector, enWarn: Boolean)(implicit vT: Type[BitVector]): B = {
val Bits(bT) = vT
if (enWarn) checkLengthMismatch()(vT,bT,typ[B],bits[B],ctx,state)
val vecSize = x.width
typ[B] match {
case tp: StructType[_] =>
val fieldNames = tp.fields.map{case (name,_) => name }
val fieldTypes = tp.fields.map{case (_, mT) => mT }
val sizes = tp.fields.map{case (_, mT@Bits(bT)) => bT.length }
val offsets = List.tabulate(sizes.length){i => sizes.drop(i+1).sum }
val fields = (fieldTypes,offsets).zipped.map{case (mT@Bits(bT),offset) =>
bitVectorSliceOrElseZero(x,offset)(mtyp(mT),mbits(bT),ctx,state)
}
val namedFields = fieldNames.zip(fields)
implicit val sT: StructType[B] = tp.asInstanceOf[StructType[B]]
Struct[B](namedFields:_*)
case tp: VectorType[_] =>
// HACK: Specialize for converting to other vectors of bits
// Assumes that Vector* has the same underlying representation as VectorN
if (tp.child == BooleanType) {
val sym = x.s.asInstanceOf[Exp[B]]
tp.wrapped(sym)
}
else {
val width = tp.width
val mT = tp.child
val Bits(bT) = mT
val elems = List.tabulate(width) { i =>
val offset = i * bT.length
val length = bT.length
bitVectorSliceOrElseZero(x, offset)(mtyp(mT), mbits(bT), ctx, state)
}
tp.wrapped(Vector.fromseq(elems)(mtyp(mT), mbits(bT), mtyp(tp), ctx, state).asInstanceOf[Exp[B]])
}
case _ => wrap(bits_as_data[B](x.s))
}
}
@internal def checkLengthMismatch[A:Type:Bits,B:Type:Bits]()(implicit ctx: SrcCtx): Unit = {
val lenA = bits[A].length
val lenB = bits[B].length
if (lenA != lenB)
warn(ctx, u"Bit length mismatch in conversion between ${typ[A]} and ${typ[B]}.")
if (lenA < lenB) {
warn(s"Bits ($lenB::$lenA) will be set to zero in result.")
warn(ctx)
}
else if (lenA > lenB) {
warn(s"Bits ($lenA::$lenB) will be dropped.")
warn(ctx)
}
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/lang/BitOps.scala | Scala | mit | 5,179 |
package org.atnos.eff.syntax
import org.atnos.eff._
import org.atnos.eff.concurrent.Scheduler
import scala.concurrent.{ExecutionContext, Future}
trait future {
implicit final def toFutureOps[R, A](e: Eff[R, A]): FutureOps[R, A] = new FutureOps[R, A](e)
}
object future extends future
final class FutureOps[R, A](val e: Eff[R, A]) extends AnyVal {
def futureAttempt(implicit future: TimedFuture /= R): Eff[R, Throwable Either A] =
FutureInterpretation.futureAttempt(e)
def futureMemo(key: AnyRef, cache: Cache)(implicit future: TimedFuture /= R): Eff[R, A] =
FutureInterpretation.futureMemo(key, cache, e)
def runAsync(implicit scheduler: Scheduler, exc: ExecutionContext, m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runAsync(e)
def runAsyncOn(executorServices: ExecutorServices)(implicit m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runAsyncOn(executorServices)(e)
def runSequentialOn(executorServices: ExecutorServices)(implicit m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runSequentialOn(executorServices)(e)
def runSequential(implicit scheduler: Scheduler, exc: ExecutionContext, m: Member.Aux[TimedFuture, R, NoFx]): Future[A] =
FutureInterpretation.runSequential(e)
}
| etorreborre/eff | shared/src/main/scala/org/atnos/eff/syntax/future.scala | Scala | mit | 1,306 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import scala.collection.mutable
import scala.io.{Codec, Source}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.json4s.jackson.JsonMethods.parse
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.deploy.history.EventLogTestHelper.writeEventsToRollingWriter
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.status.ListenerEventsTestHelper._
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{JsonProtocol, Utils}
class EventLogFileCompactorSuite extends SparkFunSuite {
import EventLogFileCompactorSuite._
private val sparkConf = new SparkConf()
private val hadoopConf = SparkHadoopUtil.newConfiguration(sparkConf)
test("No event log files") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertNoCompaction(fs, Seq.empty, compactor.compact(Seq.empty),
CompactionResultCode.NOT_ENOUGH_FILES)
}
}
test("No compact file, less origin files available than max files to retain") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
(1 to 2).map(_ => testEvent): _*)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertNoCompaction(fs, fileStatuses, compactor.compact(fileStatuses),
CompactionResultCode.NOT_ENOUGH_FILES)
}
}
test("No compact file, more origin files available than max files to retain") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
(1 to 5).map(_ => testEvent): _*)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertCompaction(fs, fileStatuses, compactor.compact(fileStatuses),
expectedNumOfFilesCompacted = 2)
}
}
test("compact file exists, less origin files available than max files to retain") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
(1 to 2).map(_ => testEvent): _*)
val fileToCompact = fileStatuses.head.getPath
val compactedPath = new Path(fileToCompact.getParent,
fileToCompact.getName + EventLogFileWriter.COMPACTED)
assert(fs.rename(fileToCompact, compactedPath))
val newFileStatuses = Seq(fs.getFileStatus(compactedPath)) ++ fileStatuses.drop(1)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertNoCompaction(fs, newFileStatuses, compactor.compact(newFileStatuses),
CompactionResultCode.NOT_ENOUGH_FILES)
}
}
test("compact file exists, number of origin files are same as max files to retain") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
(1 to 4).map(_ => testEvent): _*)
val fileToCompact = fileStatuses.head.getPath
val compactedPath = new Path(fileToCompact.getParent,
fileToCompact.getName + EventLogFileWriter.COMPACTED)
assert(fs.rename(fileToCompact, compactedPath))
val newFileStatuses = Seq(fs.getFileStatus(compactedPath)) ++ fileStatuses.drop(1)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertNoCompaction(fs, newFileStatuses, compactor.compact(newFileStatuses),
CompactionResultCode.NOT_ENOUGH_FILES)
}
}
test("compact file exists, more origin files available than max files to retain") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
(1 to 10).map(_ => testEvent): _*)
val fileToCompact = fileStatuses.head.getPath
val compactedPath = new Path(fileToCompact.getParent,
fileToCompact.getName + EventLogFileWriter.COMPACTED)
assert(fs.rename(fileToCompact, compactedPath))
val newFileStatuses = Seq(fs.getFileStatus(compactedPath)) ++ fileStatuses.drop(1)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertCompaction(fs, newFileStatuses, compactor.compact(newFileStatuses),
expectedNumOfFilesCompacted = 7)
}
}
test("events for finished job are dropped in new compact file") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
// 1, 2 will be compacted into one file, 3~5 are dummies to ensure max files to retain
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
Seq(
SparkListenerExecutorAdded(0, "exec1", new ExecutorInfo("host1", 1, Map.empty)),
SparkListenerJobStart(1, 0, Seq.empty)),
Seq(
SparkListenerJobEnd(1, 1, JobSucceeded),
SparkListenerExecutorAdded(2, "exec2", new ExecutorInfo("host2", 1, Map.empty))),
testEvent,
testEvent,
testEvent)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
assertCompaction(fs, fileStatuses, compactor.compact(fileStatuses),
expectedNumOfFilesCompacted = 2)
val expectCompactFileBasePath = fileStatuses.take(2).last.getPath
val compactFilePath = getCompactFilePath(expectCompactFileBasePath)
Utils.tryWithResource(EventLogFileReader.openEventLog(compactFilePath, fs)) { is =>
val lines = Source.fromInputStream(is)(Codec.UTF8).getLines().toList
assert(lines.length === 2, "Compacted file should have only two events being accepted")
lines.foreach { line =>
val event = JsonProtocol.sparkEventFromJson(parse(line))
assert(!event.isInstanceOf[SparkListenerJobStart] &&
!event.isInstanceOf[SparkListenerJobEnd])
}
}
}
}
test("Don't compact file if score is lower than threshold") {
withTempDir { dir =>
val fs = new Path(dir.getAbsolutePath).getFileSystem(hadoopConf)
// job 1 having 4 tasks
val rddsForStage1 = createRddsWithId(1 to 2)
val stage1 = createStage(1, rddsForStage1, Nil)
val tasks = createTasks(4, Array("exec1"), 0L).map(createTaskStartEvent(_, 1, 0))
// job 2 having 4 tasks
val rddsForStage2 = createRddsWithId(3 to 4)
val stage2 = createStage(2, rddsForStage2, Nil)
val tasks2 = createTasks(4, Array("exec1"), 0L).map(createTaskStartEvent(_, 2, 0))
// here job 1 is finished and job 2 is still live, hence half of total tasks are considered
// as live
val fileStatuses = writeEventsToRollingWriter(fs, "app", dir, sparkConf, hadoopConf,
Seq(SparkListenerJobStart(1, 0, Seq(stage1)), SparkListenerStageSubmitted(stage1)),
tasks,
Seq(SparkListenerJobStart(2, 0, Seq(stage2)), SparkListenerStageSubmitted(stage2)),
tasks2,
Seq(SparkListenerJobEnd(1, 0, JobSucceeded)),
testEvent,
testEvent,
testEvent)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, 0.7d)
assertNoCompaction(fs, fileStatuses, compactor.compact(fileStatuses),
CompactionResultCode.LOW_SCORE_FOR_COMPACTION)
}
}
test("rewrite files with test filters") {
class TestEventFilter1 extends EventFilter {
override def acceptFn(): PartialFunction[SparkListenerEvent, Boolean] = {
case _: SparkListenerApplicationEnd => true
case _: SparkListenerBlockManagerAdded => true
case _: SparkListenerApplicationStart => false
}
override def statistics(): Option[EventFilter.FilterStatistics] = None
}
class TestEventFilter2 extends EventFilter {
override def acceptFn(): PartialFunction[SparkListenerEvent, Boolean] = {
case _: SparkListenerApplicationEnd => true
case _: SparkListenerEnvironmentUpdate => true
case _: SparkListenerNodeExcluded => true
case _: SparkListenerBlockManagerAdded => false
case _: SparkListenerApplicationStart => false
case _: SparkListenerNodeUnexcluded => false
}
override def statistics(): Option[EventFilter.FilterStatistics] = None
}
def writeEventToWriter(writer: EventLogFileWriter, event: SparkListenerEvent): String = {
val line = EventLogTestHelper.convertEvent(event)
writer.writeEvent(line, flushLogger = true)
line
}
withTempDir { tempDir =>
val sparkConf = new SparkConf
val hadoopConf = SparkHadoopUtil.newConfiguration(sparkConf)
val fs = new Path(tempDir.getAbsolutePath).getFileSystem(hadoopConf)
val writer = new SingleEventLogFileWriter("app", None, tempDir.toURI, sparkConf, hadoopConf)
writer.start()
val expectedLines = new mutable.ArrayBuffer[String]
// filterApplicationEnd: Some(true) & Some(true) => filter in
expectedLines += writeEventToWriter(writer, SparkListenerApplicationEnd(0))
// filterBlockManagerAdded: Some(true) & Some(false) => filter in
expectedLines += writeEventToWriter(writer, SparkListenerBlockManagerAdded(
0, BlockManagerId("1", "host1", 1), 10))
// filterApplicationStart: Some(false) & Some(false) => filter out
writeEventToWriter(writer, SparkListenerApplicationStart("app", None, 0, "user", None))
// filterNodeExcluded: None & Some(true) => filter in
expectedLines += writeEventToWriter(writer, SparkListenerNodeExcluded(0, "host1", 1))
// filterNodeUnexcluded: None & Some(false) => filter out
writeEventToWriter(writer, SparkListenerNodeUnexcluded(0, "host1"))
// other events: None & None => filter in
expectedLines += writeEventToWriter(writer, SparkListenerUnpersistRDD(0))
writer.stop()
val filters = Seq(new TestEventFilter1, new TestEventFilter2)
val logPath = new Path(writer.logPath)
val compactor = new EventLogFileCompactor(sparkConf, hadoopConf, fs,
TEST_ROLLING_MAX_FILES_TO_RETAIN, TEST_COMPACTION_SCORE_THRESHOLD)
val newPath = compactor.rewrite(filters, Seq(fs.getFileStatus(logPath)))
assert(new Path(newPath).getName === logPath.getName + EventLogFileWriter.COMPACTED)
Utils.tryWithResource(EventLogFileReader.openEventLog(new Path(newPath), fs)) { is =>
val lines = Source.fromInputStream(is)(Codec.UTF8).getLines()
var linesLength = 0
lines.foreach { line =>
linesLength += 1
assert(expectedLines.contains(line))
}
assert(linesLength === expectedLines.length)
}
}
}
private def assertCompaction(
fs: FileSystem,
originalFiles: Seq[FileStatus],
compactRet: CompactionResult,
expectedNumOfFilesCompacted: Int): Unit = {
assert(CompactionResultCode.SUCCESS === compactRet.code)
val expectRetainedFiles = originalFiles.drop(expectedNumOfFilesCompacted)
expectRetainedFiles.foreach { status => assert(fs.exists(status.getPath)) }
val expectRemovedFiles = originalFiles.take(expectedNumOfFilesCompacted)
expectRemovedFiles.foreach { status => assert(!fs.exists(status.getPath)) }
val expectCompactFileBasePath = originalFiles.take(expectedNumOfFilesCompacted).last.getPath
val expectCompactFileIndex = RollingEventLogFilesWriter.getEventLogFileIndex(
expectCompactFileBasePath.getName)
assert(Some(expectCompactFileIndex) === compactRet.compactIndex)
val expectCompactFilePath = getCompactFilePath(expectCompactFileBasePath)
assert(fs.exists(expectCompactFilePath))
}
private def getCompactFilePath(expectCompactFileBasePath: Path): Path = {
new Path(expectCompactFileBasePath.getParent,
expectCompactFileBasePath.getName + EventLogFileWriter.COMPACTED)
}
private def assertNoCompaction(
fs: FileSystem,
originalFiles: Seq[FileStatus],
compactRet: CompactionResult,
expectedCompactRet: CompactionResultCode.Value): Unit = {
assert(expectedCompactRet === compactRet.code)
assert(None === compactRet.compactIndex)
originalFiles.foreach { status => assert(fs.exists(status.getPath)) }
}
private def testEvent: Seq[SparkListenerEvent] =
Seq(SparkListenerApplicationStart("app", Some("app"), 0, "user", None))
}
object EventLogFileCompactorSuite {
val TEST_ROLLING_MAX_FILES_TO_RETAIN = 3
// To simplify the tests, we set the score threshold as 0.0d.
// Individual test can use the other value to verify the functionality.
val TEST_COMPACTION_SCORE_THRESHOLD = 0.0d
}
| shaneknapp/spark | core/src/test/scala/org/apache/spark/deploy/history/EventLogFileCompactorSuite.scala | Scala | apache-2.0 | 14,356 |
/*
* Stratio Meta
*
* Copyright (c) 2014, Stratio, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library.
*/
package com.stratio.meta.server.actors
import akka.actor.{Props, ActorRef, ActorLogging, Actor}
import com.stratio.meta.core.utils.MetaQuery
import com.stratio.meta.core.validator.Validator
import org.apache.log4j.Logger
import com.stratio.meta.common.result.{Result, CommandResult}
object ValidatorActor{
def props(planner:ActorRef, validator:Validator): Props= Props(new ValidatorActor(planner,validator))
}
class ValidatorActor(planner:ActorRef, validator:Validator) extends Actor with TimeTracker{
val log= Logger.getLogger(classOf[ValidatorActor])
override val timerName= this.getClass.getName
override def receive: Receive = {
case query:MetaQuery if !query.hasError=> {
log.debug("Init Validator Task")
val timer=initTimer()
planner forward validator.validateQuery(query)
finishTimer(timer)
log.debug("Finish Validator Task")
}
case query:MetaQuery if query.hasError=>{
sender ! query.getResult
}
case _ => {
sender ! Result.createUnsupportedOperationErrorResult("Message not recognized")
}
}
}
| dhiguero/stratio-meta | meta-server/src/main/scala/com/stratio/meta/server/actors/ValidatorActor.scala | Scala | gpl-3.0 | 1,790 |
package breeze.math
import breeze.generic.UFunc
import breeze.generic.UFunc.UImpl2
import breeze.linalg._
import breeze.linalg.operators._
import breeze.linalg.support.CanTraverseValues.ValuesVisitor
import breeze.linalg.support._
import breeze.util.Isomorphism
import breeze.compat._
import breeze.compat.Scala3Compat._
import scala.language.{higherKinds, implicitConversions}
/**
*
*
* @author dlwh
*/
trait MutablizingAdaptor[+VS[_, _], MVS[_, _], V, S] {
type Wrapper
val underlying: VS[V, S]
implicit val mutaVspace: MVS[Wrapper, S]
implicit val isomorphism: Isomorphism[V, Wrapper] = new Isomorphism[V, Wrapper] {
def forward(value: V): Wrapper = wrap(value)
def backward(u: Wrapper): V = unwrap(u)
}
implicit def mutaVSpaceIdent(wrapper: Wrapper): MVS[Wrapper, S] = mutaVspace
def wrap(v: V): Wrapper
def unwrap(w: Wrapper): V
}
object MutablizingAdaptor {
def ensureMutable[V, S](vs: VectorSpace[V, S]): MutablizingAdaptor[VectorSpace, MutableVectorSpace, V, S] = {
if (vs.isInstanceOf[MutableVectorSpace[_, _]])
IdentityWrapper[MutableVectorSpace, V, S](vs.asInstanceOf[MutableVectorSpace[V, S]])
else VectorSpaceAdaptor(vs)
}
def ensureMutable[V, S](vs: InnerProductVectorSpace[V, S])
: MutablizingAdaptor[InnerProductVectorSpace, MutableInnerProductVectorSpace, V, S] = {
if (vs.isInstanceOf[MutableInnerProductVectorSpace[_, _]])
IdentityWrapper[MutableInnerProductVectorSpace, V, S](vs.asInstanceOf[MutableInnerProductVectorSpace[V, S]])
else InnerProductSpaceAdaptor(vs)
}
def ensureMutable[V, S](vs: VectorField[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V]): MutablizingAdaptor[VectorField, MutableVectorField, V, S] = {
if (vs.isInstanceOf[MutableVectorField[_, _]])
IdentityWrapper[MutableVectorField, V, S](vs.asInstanceOf[MutableVectorField[V, S]])
else VectorFieldAdaptor(vs)
}
def ensureMutable[V, S](vs: VectorRing[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V]): MutablizingAdaptor[VectorRing, MutableVectorRing, V, S] = {
if (vs.isInstanceOf[MutableVectorRing[_, _]])
IdentityWrapper[MutableVectorRing, V, S](vs.asInstanceOf[MutableVectorRing[V, S]])
else VectorRingAdaptor(vs)
}
def ensureMutable[V, S](vs: CoordinateField[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V]): MutablizingAdaptor[CoordinateField, MutableCoordinateField, V, S] = {
if (vs.isInstanceOf[MutableCoordinateField[_, _]])
IdentityWrapper[MutableCoordinateField, V, S](vs.asInstanceOf[MutableCoordinateField[V, S]])
else CoordinateFieldAdaptor(vs)
}
case class IdentityWrapper[VS[_, _], V, S](underlying: VS[V, S]) extends MutablizingAdaptor[VS, VS, V, S] {
type Wrapper = V
implicit val mutaVspace: VS[Wrapper, S] = underlying
def wrap(v: V): Wrapper = v
def unwrap(v: Wrapper): V = v
}
case class VectorSpaceAdaptor[V, S](underlying: VectorSpace[V, S])
extends MutablizingAdaptor[VectorSpace, MutableVectorSpace, V, S] {
type Wrapper = Ref[V]
def wrap(v: V): Wrapper = Ref(v)
def unwrap(w: Wrapper): V = w.value
implicit val mutaVspace: MutableVectorSpace[Wrapper, S] = new MutableVectorSpace[Wrapper, S] {
val u = underlying
def scalars: Field[S] = underlying.scalars
override val hasOps: ConversionOrSubtype[Wrapper, NumericOps[Wrapper]] = identity
implicit def zeroLike: CanCreateZerosLike[Wrapper, Wrapper] = new CanCreateZerosLike[Wrapper, Wrapper] {
def apply(from: Wrapper): Wrapper = from.map(underlying.zeroLike.apply)
}
implicit def copy: CanCopy[Wrapper] = new CanCopy[Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from
}
def liftUpdate[Op <: OpType](implicit op: UFunc.UImpl2[Op, V, S, V]): UFunc.InPlaceImpl2[Op, Wrapper, S] =
new UFunc.InPlaceImpl2[Op, Wrapper, S] {
def apply(a: Wrapper, b: S): Unit = {
a.value = op(a.value, b)
}
}
def liftUpdateV[Op <: OpType](implicit op: UFunc.UImpl2[Op, V, V, V]): UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] =
new UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = op(a.value, b.value)
}
}
def liftOp[Op <: OpType](implicit op: UFunc.UImpl2[Op, V, S, V]): UFunc.UImpl2[Op, Wrapper, S, Wrapper] =
new UImpl2[Op, Wrapper, S, Wrapper] {
def apply(a: Wrapper, b: S) = {
a.map(op(_, b))
}
}
def liftOpV[Op <: OpType](implicit op: UFunc.UImpl2[Op, V, V, V]): UFunc.UImpl2[Op, Wrapper, Wrapper, Wrapper] =
new UFunc.UImpl2[Op, Wrapper, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper) = {
a.map(op(_, b.value))
}
}
implicit def mulIntoVS: OpMulScalar.InPlaceImpl2[Wrapper, S] = liftUpdate(u.mulVS)
implicit def divIntoVS: OpDiv.InPlaceImpl2[Wrapper, S] = liftUpdate(u.divVS)
// TODO: we should be able to get rid of these...
implicit def addIntoVV: OpAdd.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.addVV)
implicit def subIntoVV: OpSub.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.subVV)
implicit def setIntoVV: OpSet.InPlaceImpl2[Wrapper, Wrapper] = new OpSet.InPlaceImpl2[Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = b.value
}
}
implicit def scaleAddVV: scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] = {
new scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] {
def apply(y: Wrapper, a: S, x: Wrapper): Unit = { y += x * a }
}
}
implicit def mulVS: OpMulScalar.Impl2[Wrapper, S, Wrapper] = liftOp(u.mulVS)
implicit def divVS: OpDiv.Impl2[Wrapper, S, Wrapper] = liftOp(u.divVS)
implicit def addVV: OpAdd.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.addVV)
implicit def subVV: OpSub.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.subVV)
def close(a: Wrapper, b: Wrapper, tolerance: Double): Boolean = u.close(a.value, b.value, tolerance)
}
}
case class InnerProductSpaceAdaptor[V, S](val underlying: InnerProductVectorSpace[V, S])
extends MutablizingAdaptor[InnerProductVectorSpace, MutableInnerProductVectorSpace, V, S] {
type Wrapper = Ref[V]
def wrap(v: V): Wrapper = Ref(v)
def unwrap(w: Wrapper): V = w.value
implicit val mutaVspace: MutableInnerProductVectorSpace[Wrapper, S] =
new MutableInnerProductVectorSpace[Wrapper, S] {
val u = underlying
def scalars: Field[S] = underlying.scalars
val hasOps: ConversionOrSubtype[Wrapper, NumericOps[Wrapper]] = identity
implicit def zeroLike: CanCreateZerosLike[Wrapper, Wrapper] = new CanCreateZerosLike[Wrapper, Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from.map(underlying.zeroLike.apply)
}
// implicit def zero: CanCreateZeros[Wrapper,I] = new CanCreateZeros[Wrapper,I] {
// override def apply(d: I): Wrapper = wrap(u.zero(d))
// }
implicit def copy: CanCopy[Wrapper] = new CanCopy[Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from
}
// implicit def normImplDouble: norm.Impl2[Wrapper, Double, Double] = new norm.Impl2[Wrapper, Double, Double] {
// def apply(v1: Wrapper, v2: Double): Double = u.normImplDouble(v1.value, v2)
// }
// implicit def iterateValues: CanTraverseValues[Wrapper, S] = new CanTraverseValues[Wrapper,S] {
// /** Traverses all values from the given collection. */
// override def traverse(from: Wrapper, fn: ValuesVisitor[S]): Unit = {
// from.map(u.iterateValues.traverse(_,fn))
// }
//
// override def isTraversableAgain(from: Wrapper): Boolean = u.iterateValues.isTraversableAgain(from.value)
// }
//
// implicit def mapValues: CanMapValues[Wrapper, S, S, Wrapper] = new CanMapValues[Wrapper, S, S, Wrapper] {
// /** Maps all key-value pairs from the given collection. */
// def map(from: Wrapper, fn: (S) => S): Wrapper = {
// from.map(u.mapValues.map(_, fn))
// }
//
// /** Maps all active key-value pairs from the given collection. */
// def mapActive(from: Wrapper, fn: (S) => S): Wrapper = {
// from.map(u.mapValues.mapActive(_, fn))
// }
// }
//
// implicit def zipMapValues: CanZipMapValues[Wrapper, S, S, Wrapper] = new CanZipMapValues[Wrapper, S, S, Wrapper] {
// /** Maps all corresponding values from the two collections. */
// def map(from: Wrapper, from2: Wrapper, fn: (S, S) => S): Wrapper = {
// from.map(u.zipMapValues.map(_, from2.value, fn))
// }
// }
def liftUpdate[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UFunc.InPlaceImpl2[Op, Wrapper, S] =
new UFunc.InPlaceImpl2[Op, Wrapper, S] {
def apply(a: Wrapper, b: S): Unit = {
a.value = op(a.value, b)
}
}
def liftUpdateV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] =
new UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = op(a.value, b.value)
}
}
def liftOp[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UImpl2[Op, Wrapper, S, Wrapper] =
new UImpl2[Op, Wrapper, S, Wrapper] {
def apply(a: Wrapper, b: S) = {
a.map(op(_, b))
}
}
def liftOpV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UImpl2[Op, Wrapper, Wrapper, Wrapper] =
new UImpl2[Op, Wrapper, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper) = {
a.map(op(_, b.value))
}
}
implicit def mulIntoVS: OpMulScalar.InPlaceImpl2[Wrapper, S] = liftUpdate(u.mulVS)
implicit def divIntoVS: OpDiv.InPlaceImpl2[Wrapper, S] = liftUpdate(u.divVS)
implicit def addIntoVV: OpAdd.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.addVV)
implicit def subIntoVV: OpSub.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.subVV)
implicit def setIntoVV: OpSet.InPlaceImpl2[Wrapper, Wrapper] = new OpSet.InPlaceImpl2[Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = b.value
}
}
implicit def scaleAddVV: scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] = {
new scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] {
def apply(y: Wrapper, a: S, x: Wrapper): Unit = { y += x * a }
}
}
implicit def mulVS: OpMulScalar.Impl2[Wrapper, S, Wrapper] = liftOp(u.mulVS)
implicit def divVS: OpDiv.Impl2[Wrapper, S, Wrapper] = liftOp(u.divVS)
implicit def addVV: OpAdd.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.addVV)
implicit def subVV: OpSub.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.subVV)
// override implicit def addVS: OpAdd.Impl2[Wrapper, S, Wrapper] = liftOp(u.addVS)
//
// override implicit def addIntoVS: OpAdd.InPlaceImpl2[Wrapper, S] = liftUpdate(u.addVS)
//
// override implicit def subIntoVS: OpSub.InPlaceImpl2[Wrapper, S] = liftUpdate(u.subVS)
//
// override implicit def subVS: OpSub.Impl2[Wrapper, S, Wrapper] = liftOp(u.subVS)
override def close(a: Wrapper, b: Wrapper, tolerance: Double): Boolean = u.close(a.value, b.value, tolerance)
implicit def dotVV: OpMulInner.Impl2[Wrapper, Wrapper, S] = new OpMulInner.Impl2[Wrapper, Wrapper, S] {
def apply(a: Wrapper, b: Wrapper): S = {
u.dotVV(a.value, b.value)
}
}
}
}
case class VectorFieldAdaptor[V, S](val underlying: VectorField[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V])
extends MutablizingAdaptor[VectorField, MutableVectorField, V, S] {
type Wrapper = Ref[V]
def wrap(v: V): Wrapper = Ref(v)
def unwrap(w: Wrapper): V = w.value
implicit val mutaVspace: MutableVectorField[Wrapper, S] = new MutableVectorField[Wrapper, S] {
val u = underlying
def scalars: Field[S] = underlying.scalars
val hasOps: ConversionOrSubtype[Wrapper, NumericOps[Wrapper]] = identity
implicit def zeroLike: CanCreateZerosLike[Wrapper, Wrapper] = new CanCreateZerosLike[Wrapper, Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from.map(underlying.zeroLike.apply)
}
implicit def copy: CanCopy[Wrapper] = new CanCopy[Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from
}
// implicit def normImplDouble: norm.Impl2[Wrapper, Double, Double] = new norm.Impl2[Wrapper, Double, Double] {
// def apply(v1: Wrapper, v2: Double): Double = u.normImplDouble(v1.value, v2)
// }
implicit def iterateValues: CanTraverseValues[Wrapper, S] = new CanTraverseValues[Wrapper, S] {
/** Traverses all values from the given collection. */
override def traverse(from: Wrapper, fn: ValuesVisitor[S]): fn.type = {
from.map(canIterate.traverse(_, fn))
fn
}
override def isTraversableAgain(from: Wrapper): Boolean = canIterate.isTraversableAgain(from.value)
}
implicit def mapValues: CanMapValues[Wrapper, S, S, Wrapper] = new CanMapValues[Wrapper, S, S, Wrapper] {
override def map(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.map(_, fn))
}
def mapActive(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.mapActive(_, fn))
}
}
implicit def zipMapValues: CanZipMapValues[Wrapper, S, S, Wrapper] = new CanZipMapValues[Wrapper, S, S, Wrapper] {
/** Maps all corresponding values from the two collections. */
def map(from: Wrapper, from2: Wrapper, fn: (S, S) => S): Wrapper = {
from.map(canZipMap.map(_, from2.value, fn))
}
}
def liftUpdate[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UFunc.InPlaceImpl2[Op, Wrapper, S] =
new UFunc.InPlaceImpl2[Op, Wrapper, S] {
def apply(a: Wrapper, b: S): Unit = {
a.value = op(a.value, b)
}
}
def liftUpdateV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] =
new UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = op(a.value, b.value)
}
}
def liftOp[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UImpl2[Op, Wrapper, S, Wrapper] =
new UImpl2[Op, Wrapper, S, Wrapper] {
def apply(a: Wrapper, b: S) = {
a.map(op(_, b))
}
}
def liftOpV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UImpl2[Op, Wrapper, Wrapper, Wrapper] =
new UImpl2[Op, Wrapper, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper) = {
a.map(op(_, b.value))
}
}
implicit def mulIntoVS: OpMulScalar.InPlaceImpl2[Wrapper, S] = liftUpdate(u.mulVS)
implicit def divIntoVS: OpDiv.InPlaceImpl2[Wrapper, S] = liftUpdate(u.divVS)
implicit def addIntoVV: OpAdd.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.addVV)
implicit def subIntoVV: OpSub.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.subVV)
implicit def setIntoVV: OpSet.InPlaceImpl2[Wrapper, Wrapper] = new OpSet.InPlaceImpl2[Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = b.value
}
}
// implicit def addIntoVS: OpAdd.InPlaceImpl2[Wrapper, S] = liftUpdate(u.addVS)
//
// implicit def subIntoVS: OpSub.InPlaceImpl2[Wrapper, S] = liftUpdate(u.subVS)
//
//
// implicit def setIntoVS: OpSet.InPlaceImpl2[Wrapper, S] = new OpSet.InPlaceImpl2[Wrapper,S] {
// override def apply(v: Wrapper, v2: S): Unit = ???
// }
implicit def mulVV: OpMulScalar.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.mulVV)
implicit def mulIntoVV: OpMulScalar.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.mulVV)
// implicit def subVS: OpSub.Impl2[Wrapper, S, Wrapper] = liftOp(u.subVS)
//
// implicit def addVS: OpAdd.Impl2[Wrapper, S, Wrapper] = liftOp(u.addVS)
implicit def divVV: OpDiv.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.divVV)
implicit def divIntoVV: OpDiv.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.divVV)
implicit def scaleAddVV: scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] = {
new scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] {
def apply(y: Wrapper, a: S, x: Wrapper): Unit = { y += x * a }
}
}
implicit def mulVS: OpMulScalar.Impl2[Wrapper, S, Wrapper] = liftOp(u.mulVS)
implicit def divVS: OpDiv.Impl2[Wrapper, S, Wrapper] = liftOp(u.divVS)
implicit def addVV: OpAdd.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.addVV)
implicit def subVV: OpSub.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.subVV)
override def close(a: Wrapper, b: Wrapper, tolerance: Double): Boolean = u.close(a.value, b.value, tolerance)
// default implementations
implicit def neg: OpNeg.Impl[Wrapper, Wrapper] = new OpNeg.Impl[Wrapper, Wrapper] {
def apply(a: Wrapper): Wrapper = a.map(u.neg.apply)
}
implicit def dotVV: OpMulInner.Impl2[Wrapper, Wrapper, S] = new OpMulInner.Impl2[Wrapper, Wrapper, S] {
def apply(a: Wrapper, b: Wrapper): S = {
u.dotVV(a.value, b.value)
}
}
}
}
case class Ref[T](var value: T) extends NumericOps[Ref[T]] {
def repr: Ref[T] = this
def map[U](f: T => U) = Ref(f(value))
}
case class VectorRingAdaptor[V, S](val underlying: VectorRing[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V])
extends MutablizingAdaptor[VectorRing, MutableVectorRing, V, S] {
type Wrapper = Ref[V]
def wrap(v: V): Wrapper = Ref(v)
def unwrap(w: Wrapper): V = w.value
implicit val mutaVspace: MutableVectorRing[Wrapper, S] = new MutableVectorRing[Wrapper, S] {
val u = underlying
def scalars: Ring[S] = underlying.scalars
val hasOps: ConversionOrSubtype[Wrapper, NumericOps[Wrapper]] = identity
implicit def zeroLike: CanCreateZerosLike[Wrapper, Wrapper] = new CanCreateZerosLike[Wrapper, Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from.map(underlying.zeroLike.apply)
}
implicit def copy: CanCopy[Wrapper] = new CanCopy[Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from
}
// implicit def normImplDouble: norm.Impl2[Wrapper, Double, Double] = new norm.Impl2[Wrapper, Double, Double] {
// def apply(v1: Wrapper, v2: Double): Double = u.normImplDouble(v1.value, v2)
// }
implicit def iterateValues: CanTraverseValues[Wrapper, S] = new CanTraverseValues[Wrapper, S] {
/** Traverses all values from the given collection. */
override def traverse(from: Wrapper, fn: ValuesVisitor[S]): fn.type = {
from.map(canIterate.traverse(_, fn))
fn
}
override def isTraversableAgain(from: Wrapper): Boolean = canIterate.isTraversableAgain(from.value)
}
implicit def mapValues: CanMapValues[Wrapper, S, S, Wrapper] = new CanMapValues[Wrapper, S, S, Wrapper] {
/** Maps all key-value pairs from the given collection. */
override def map(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.map(_, fn))
}
override def mapActive(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.mapActive(_, fn))
}
}
implicit def zipMapValues: CanZipMapValues[Wrapper, S, S, Wrapper] = new CanZipMapValues[Wrapper, S, S, Wrapper] {
/** Maps all corresponding values from the two collections. */
def map(from: Wrapper, from2: Wrapper, fn: (S, S) => S): Wrapper = {
from.map(canZipMap.map(_, from2.value, fn))
}
}
def liftUpdate[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UFunc.InPlaceImpl2[Op, Wrapper, S] =
new UFunc.InPlaceImpl2[Op, Wrapper, S] {
def apply(a: Wrapper, b: S): Unit = {
a.value = op(a.value, b)
}
}
def liftUpdateV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] =
new UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = op(a.value, b.value)
}
}
def liftOp[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UImpl2[Op, Wrapper, S, Wrapper] =
new UImpl2[Op, Wrapper, S, Wrapper] {
def apply(a: Wrapper, b: S) = {
a.map(op(_, b))
}
}
def liftOpV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UImpl2[Op, Wrapper, Wrapper, Wrapper] =
new UImpl2[Op, Wrapper, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper) = {
a.map(op(_, b.value))
}
}
implicit def mulIntoVS: OpMulScalar.InPlaceImpl2[Wrapper, S] = liftUpdate(u.mulVS)
implicit def addIntoVV: OpAdd.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.addVV)
implicit def subIntoVV: OpSub.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.subVV)
implicit def setIntoVV: OpSet.InPlaceImpl2[Wrapper, Wrapper] = new OpSet.InPlaceImpl2[Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = b.value
}
}
// implicit def addIntoVS: OpAdd.InPlaceImpl2[Wrapper, S] = liftUpdate(u.addVS)
//
// implicit def subIntoVS: OpSub.InPlaceImpl2[Wrapper, S] = liftUpdate(u.subVS)
//
//
// implicit def setIntoVS: OpSet.InPlaceImpl2[Wrapper, S] = new OpSet.InPlaceImpl2[Wrapper,S] {
// override def apply(v: Wrapper, v2: S): Unit = ???
// }
implicit def mulVV: OpMulScalar.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.mulVV)
implicit def mulIntoVV: OpMulScalar.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.mulVV)
// implicit def subVS: OpSub.Impl2[Wrapper, S, Wrapper] = liftOp(u.subVS)
//
// implicit def addVS: OpAdd.Impl2[Wrapper, S, Wrapper] = liftOp(u.addVS)
implicit def scaleAddVV: scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] = {
new scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] {
def apply(y: Wrapper, a: S, x: Wrapper): Unit = { y += x * a }
}
}
implicit def mulVS: OpMulScalar.Impl2[Wrapper, S, Wrapper] = liftOp(u.mulVS)
implicit def addVV: OpAdd.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.addVV)
implicit def subVV: OpSub.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.subVV)
override def close(a: Wrapper, b: Wrapper, tolerance: Double): Boolean = u.close(a.value, b.value, tolerance)
// default implementations
implicit def neg: OpNeg.Impl[Wrapper, Wrapper] = new OpNeg.Impl[Wrapper, Wrapper] {
def apply(a: Wrapper): Wrapper = a.map(u.neg.apply)
}
implicit def dotVV: OpMulInner.Impl2[Wrapper, Wrapper, S] = new OpMulInner.Impl2[Wrapper, Wrapper, S] {
def apply(a: Wrapper, b: Wrapper): S = {
u.dotVV(a.value, b.value)
}
}
}
}
case class CoordinateFieldAdaptor[V, S](underlying: CoordinateField[V, S])(
implicit canIterate: CanTraverseValues[V, S],
canMap: CanMapValues[V, S, S, V],
canZipMap: CanZipMapValues[V, S, S, V])
extends MutablizingAdaptor[CoordinateField, MutableCoordinateField, V, S] {
type Wrapper = Ref[V]
def wrap(v: V): Wrapper = Ref(v)
def unwrap(w: Wrapper): V = w.value
implicit val mutaVspace: MutableCoordinateField[Wrapper, S] = new MutableCoordinateField[Wrapper, S] {
val u = underlying
def scalars = underlying.scalars
val hasOps: ConversionOrSubtype[Wrapper, NumericOps[Wrapper]] = identity
implicit def zeroLike: CanCreateZerosLike[Wrapper, Wrapper] = new CanCreateZerosLike[Wrapper, Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from.map(underlying.zeroLike.apply)
}
implicit def copy: CanCopy[Wrapper] = new CanCopy[Wrapper] {
// Should not inherit from Form=>To because the compiler will try to use it to coerce types.
def apply(from: Wrapper): Wrapper = from
}
// implicit def normImplDouble: norm.Impl2[Wrapper, Double, Double] = new norm.Impl2[Wrapper, Double, Double] {
// def apply(v1: Wrapper, v2: Double): Double = u.normImplDouble(v1.value, v2)
// }
implicit def iterateValues: CanTraverseValues[Wrapper, S] = new CanTraverseValues[Wrapper, S] {
override def traverse(from: Wrapper, fn: ValuesVisitor[S]): fn.type = {
from.map(canIterate.traverse(_, fn))
fn
}
override def isTraversableAgain(from: Wrapper): Boolean = canIterate.isTraversableAgain(from.value)
}
implicit def mapValues: CanMapValues[Wrapper, S, S, Wrapper] = new CanMapValues[Wrapper, S, S, Wrapper] {
override def map(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.map(_, fn))
}
override def mapActive(from: Wrapper, fn: (S) => S): Wrapper = {
from.map(canMap.mapActive(_, fn))
}
}
override implicit def scalarOf: ScalarOf[Wrapper, S] = ScalarOf.dummy
implicit def zipMapValues: CanZipMapValues[Wrapper, S, S, Wrapper] = new CanZipMapValues[Wrapper, S, S, Wrapper] {
def map(from: Wrapper, from2: Wrapper, fn: (S, S) => S): Wrapper = {
from.map(canZipMap.map(_, from2.value, fn))
}
}
def liftUpdate[Op <: OpType](implicit op: UImpl2[Op, V, S, V]): UFunc.InPlaceImpl2[Op, Wrapper, S] =
new UFunc.InPlaceImpl2[Op, Wrapper, S] {
def apply(a: Wrapper, b: S): Unit = {
a.value = op(a.value, b)
}
}
def liftUpdateV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] =
new UFunc.InPlaceImpl2[Op, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = op(a.value, b.value)
}
}
def liftOp[Op <: OpType, RHS](implicit op: UImpl2[Op, V, RHS, V]): UImpl2[Op, Wrapper, RHS, Wrapper] =
new UImpl2[Op, Wrapper, RHS, Wrapper] {
def apply(a: Wrapper, b: RHS) = {
a.map(op(_, b))
}
}
def liftOpV[Op <: OpType](implicit op: UImpl2[Op, V, V, V]): UImpl2[Op, Wrapper, Wrapper, Wrapper] =
new UImpl2[Op, Wrapper, Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper) = {
a.map(op(_, b.value))
}
}
implicit def mulIntoVS: OpMulScalar.InPlaceImpl2[Wrapper, S] = liftUpdate(u.mulVS)
implicit def addIntoVV: OpAdd.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.addVV)
implicit def subIntoVV: OpSub.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.subVV)
implicit def setIntoVV: OpSet.InPlaceImpl2[Wrapper, Wrapper] = new OpSet.InPlaceImpl2[Wrapper, Wrapper] {
def apply(a: Wrapper, b: Wrapper): Unit = {
a.value = b.value
}
}
implicit def mulVV: OpMulScalar.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.mulVV)
implicit def mulIntoVV: OpMulScalar.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(u.mulVV)
implicit def scaleAddVV: scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] = {
new scaleAdd.InPlaceImpl3[Wrapper, S, Wrapper] {
def apply(y: Wrapper, a: S, x: Wrapper): Unit = { y += x * a }
}
}
implicit def mulVS: OpMulScalar.Impl2[Wrapper, S, Wrapper] = liftOp(u.mulVS)
implicit def addVV: OpAdd.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.addVV)
implicit def subVV: OpSub.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(u.subVV)
override def close(a: Wrapper, b: Wrapper, tolerance: Double): Boolean = u.close(a.value, b.value, tolerance)
// default implementations
implicit def neg: OpNeg.Impl[Wrapper, Wrapper] = new OpNeg.Impl[Wrapper, Wrapper] {
def apply(a: Wrapper): Wrapper = a.map(u.neg.apply)
}
implicit def dotVV: OpMulInner.Impl2[Wrapper, Wrapper, S] = new OpMulInner.Impl2[Wrapper, Wrapper, S] {
def apply(a: Wrapper, b: Wrapper): S = {
u.dotVV(a.value, b.value)
}
}
override implicit def normImpl2: norm.Impl2[Wrapper, Double, Double] = new norm.Impl2[Wrapper, Double, Double] {
override def apply(v: Wrapper, v2: Double): Double = underlying.normImpl2(v.value, v2)
}
override implicit def divIntoVV: OpDiv.InPlaceImpl2[Wrapper, Wrapper] = liftUpdateV(underlying.divVV)
override implicit def divVV: OpDiv.Impl2[Wrapper, Wrapper, Wrapper] = liftOpV(underlying.divVV)
override implicit def divIntoVS: OpDiv.InPlaceImpl2[Wrapper, S] = liftUpdate(underlying.divVS)
override implicit def divVS: OpDiv.Impl2[Wrapper, S, Wrapper] = liftOp(underlying.divVS)
}
}
}
| scalanlp/breeze | math/src/main/scala/breeze/math/MutablizingAdaptor.scala | Scala | apache-2.0 | 30,254 |
package test.kofre
import org.scalatest.freespec.AnyFreeSpec
import kofre.protocol.RaftState.Vote
import kofre.Lattice
import kofre.protocol.RaftState
class RaftLatticeTest extends AnyFreeSpec {
"basic interaction" in {
val initial = RaftState[String](Set("a", "b", "c"), Set(Vote(0, "a", "a"), Vote(0, "a", "b"), Vote(0, "a", "c")))
assert(initial.leader === "a")
assert(initial.nextProposal === 0)
assert(initial.consensusSize === 2)
val proposition = initial.proposeDelta("a", "new proposal")
val proposaled = Lattice.merge(initial, proposition)
val p2 = proposaled.proposeDelta("a", "another proposal")
val proposaled2 = Lattice.merge(proposaled, p2)
assert(proposaled2.values === List())
val s1 = proposaled2.supportProposalDelta("b")
val s2 = proposaled2.supportProposalDelta("c")
assert(Lattice.merge(proposaled2, s1).values === List("new proposal", "another proposal"))
assert(Lattice.merge(proposaled2, s2).values === List("new proposal", "another proposal"))
assert(Lattice.merge(Lattice.merge(proposaled2, s2), s1).values === List("new proposal", "another proposal"))
}
"another interaction" in {
val participants = Set("a", "b", "c")
val afterFirstVote = RaftState[String](participants)
.becomeCandidate("a")
.supportLeader("b")
.supportLeader("c")
assert(afterFirstVote.leader === "a")
assert(afterFirstVote.currentTerm === 1)
// kinda split between a and b, but c is still fine with everyone
val afterProposalAndSplit = afterFirstVote
.propose("a", "As first proposal")
.becomeCandidate("b")
.supportLeader("b")
.supportProposal("c")
assert(afterProposalAndSplit.leader == "a")
assert(afterProposalAndSplit.currentTerm == 1)
assert(afterProposalAndSplit.values == List("As first proposal"))
val bsRiseToPower = afterProposalAndSplit
.propose("b", "Bs proposal before acceptance")
.propose("a", "As second proposal while still leader")
.supportLeader("c")
.propose("b", "Bs proposal after acceptance")
.supportProposal("c")
assert(bsRiseToPower.leader == "b")
assert(bsRiseToPower.currentTerm == 2)
assert(bsRiseToPower.values == List("As first proposal", "Bs proposal after acceptance"))
}
}
| guidosalva/REScala | Code/Extensions/Kofre/src/test/scala/test/kofre/RaftLatticeTest.scala | Scala | apache-2.0 | 2,315 |
package org.broadinstitute.dsde.vault
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, ActorRef, Props}
import akka.event.Logging
import akka.util.Timeout
import org.broadinstitute.dsde.vault.GenericDmClientService._
import org.broadinstitute.dsde.vault.model.{GenericRelEnt, GenericIngest, GenericEntity}
import org.broadinstitute.dsde.vault.services.ClientFailure
import spray.client.pipelining._
import spray.http.HttpHeaders.Cookie
import spray.routing.RequestContext
import spray.httpx.SprayJsonSupport._
import org.broadinstitute.dsde.vault.model.GenericJsonProtocol._
import scala.util.{Failure, Success}
object GenericDmClientService {
case class DMGenericIngest(ingest: GenericIngest, version: Int)
case class DMGenericIngestResponse(guids: List[String])
case class DMGenericDescribe(guid: String, version: Int, describeKey: Int)
case class DMGenericDescribeUp(guid: String, version: Int, describeKey: Int)
case class DMGenericDescribeDown(guid: String, version: Int, describeKey: Int)
case class DMGenericDescribeResponse(entity: GenericEntity, describeKey: Int)
case class DMGenericDescribeMultipleResponse(entities: List[GenericRelEnt], describeKey: Int) // up or down
def props(requestContext: RequestContext): Props = Props(new GenericDmClientService(requestContext))
}
case class GenericDmClientService(requestContext: RequestContext) extends Actor{
import system.dispatcher
implicit val timeout = Timeout(5, TimeUnit.SECONDS)
implicit val system = context.system
val log = Logging(system, getClass)
override def receive: Receive = {
case DMGenericIngest(ingest, version) =>
genericIngest(sender(), ingest, version)
case DMGenericDescribe(guid, version, key) =>
genericDescribe(sender(), guid, version, key)
case DMGenericDescribeUp(guid, version, key) =>
genericDescribeUp(sender(), guid, version, key)
case DMGenericDescribeDown(guid, version, key) =>
genericDescribeDown(sender(), guid, version, key)
}
def genericIngest(senderRef: ActorRef, ingest: GenericIngest, version: Int): Unit = {
log.debug("DM Generic Ingest")
val pipeline = addHeader(Cookie(requestContext.request.cookies)) ~> sendReceive ~> unmarshal[List[String]]
val responseFuture = pipeline {
Post(VaultConfig.DataManagement.genericIngestUrl(version), ingest)
}
responseFuture onComplete {
case Success(guids) =>
log.debug("DM Generic Ingest successful")
senderRef ! DMGenericIngestResponse(guids)
case Failure(error) =>
log.error(error, "DM Generic Ingest Failure")
senderRef ! ClientFailure(error.getMessage)
}
}
def genericDescribe(senderRef: ActorRef, guid: String, version: Int, describeKey: Int): Unit = {
log.debug("DM Generic Describe for " + guid)
val pipeline = addHeader(Cookie(requestContext.request.cookies)) ~> sendReceive ~> unmarshal[GenericEntity]
val responseFuture = pipeline {
Get(VaultConfig.DataManagement.genericDescribeUrl(version, guid))
}
responseFuture onComplete {
case Success(entity) =>
log.debug("DM Generic Describe for %s Successful".format(guid))
senderRef ! DMGenericDescribeResponse(entity, describeKey)
case Failure(error) =>
log.error(error, "DM Generic Describe for %s Failed".format(guid))
senderRef ! ClientFailure(error.getMessage)
}
}
def genericDescribeUp(senderRef: ActorRef, guid: String, version: Int, describeKey: Int): Unit = {
log.debug("DM Generic Describe Up for " + guid)
val pipeline = addHeader(Cookie(requestContext.request.cookies)) ~> sendReceive ~> unmarshal[List[GenericRelEnt]]
val responseFuture = pipeline {
Get(VaultConfig.DataManagement.genericDescribeUpUrl(version, guid))
}
responseFuture onComplete {
case Success(entities) =>
log.debug("DM Generic Describe Up for %s Successful".format(guid))
senderRef ! DMGenericDescribeMultipleResponse(entities, describeKey)
case Failure(error) =>
log.error(error, "DM Generic Describe Up for %s Failed".format(guid))
senderRef ! ClientFailure(error.getMessage)
}
}
def genericDescribeDown(senderRef: ActorRef, guid: String, version: Int, describeKey: Int): Unit = {
log.debug("DM Generic Describe Down for " + guid)
val pipeline = addHeader(Cookie(requestContext.request.cookies)) ~> sendReceive ~> unmarshal[List[GenericRelEnt]]
val responseFuture = pipeline {
Get(VaultConfig.DataManagement.genericDescribeDownUrl(version, guid))
}
responseFuture onComplete {
case Success(entities) =>
log.debug("DM Generic Describe Down for %s Successful".format(guid))
senderRef ! DMGenericDescribeMultipleResponse(entities, describeKey)
case Failure(error) =>
log.error(error, "DM Generic Describe Down for %s Failed".format(guid))
senderRef ! ClientFailure(error.getMessage)
}
}
}
| broadinstitute/vault-api | src/main/scala/org/broadinstitute/dsde/vault/GenericDmClientService.scala | Scala | bsd-3-clause | 4,965 |
/*
* Copyright 2011 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl
import com.gs.collections.api.InternalIterable
import block.procedure.CollectionAddProcedure
import list.mutable.FastList
import test.Verify
import Prelude._
import org.junit.{Assert, Test}
trait InternalIterableTestTrait extends IterableTestTrait
{
val classUnderTest: InternalIterable[String]
@Test
def forEach
{
val result = FastList.newList[String]
this.classUnderTest.forEach(CollectionAddProcedure.on(result))
Verify.assertSize(3, result)
Verify.assertContainsAll(result, "1", "2", "3")
}
@Test
def forEachWithIndex
{
val result = FastList.newList[String]
val indices = FastList.newList[Integer]
var count = 0
this.classUnderTest.forEachWithIndex
{
(each: String, index: Int) =>
Assert.assertEquals(index, count)
count += 1
result.add(each)
indices.add(index)
()
}
Verify.assertSize(3, result)
Verify.assertContainsAll(result, "1", "2", "3")
Verify.assertSize(3, indices)
Verify.assertContainsAll(indices, Integer.valueOf(0), Integer.valueOf(1), Integer.valueOf(2))
}
@Test
def forEachWith
{
val result = FastList.newList[String]
this.classUnderTest.forEachWith(
(each: String, parameter: String) =>
{
result.add(each + parameter)
()
},
"!")
Verify.assertSize(3, result)
Verify.assertContainsAll(result, "1!", "2!", "3!")
}
}
| jlz27/gs-collections | scala-unit-tests/src/test/scala/com/gs/collections/impl/InternalIterableTestTrait.scala | Scala | apache-2.0 | 2,244 |
package com.codacy.client.bitbucket.v1
import java.time.LocalDateTime
import play.api.libs.functional.syntax._
import play.api.libs.json._
case class PullRequest(id: Long, title: String, description: String,
authorUsername: Option[String], authorAvatar: Option[String],
state: String, created_on: LocalDateTime, updated_on: LocalDateTime,
sourceRepository: String, sourceBranch: String, sourceCommit: String,
destRepository: String, destBranch: String, destCommit: Option[String],
apiUrls: Seq[ApiUrl], authorUUID: Option[String] = None) {
val url = s"https://bitbucket.org/$destRepository/pull-request/$id"
}
object ApiUrlType extends Enumeration {
val Commits = Value("commits")
val Decline = Value("decline")
val Self = Value("self")
val Comments = Value("comments")
val Patch = Value("patch")
val Merge = Value("merge")
val Html = Value("html")
val Activity = Value("activity")
val Diff = Value("diff")
val Approve = Value("approve")
def find(urlType: String): Option[Value] = {
values.find(_.toString == urlType)
}
}
case class ApiUrl(urlType: ApiUrlType.Value, link: String)
object PullRequest {
val dateFormat = "yyyy-MM-dd'T'HH:mm:ss.SSSSSSXXX"
implicit val dateTimeReads: Reads[LocalDateTime] = Reads.localDateTimeReads(dateFormat)
implicit def optionStringReader: Reads[Option[String]] = Reads { (json: JsValue) =>
json match {
case JsString(value) => JsSuccess(Some(value))
case _ => JsSuccess(None)
}
}
implicit val reader: Reads[PullRequest] = (
(__ \\ "id").read[Long] and
(__ \\ "title").read[String] and
(__ \\ "description").read[String] and
(__ \\ "author" \\ "username").readNullable[String] and
(__ \\ "author" \\ "links" \\ "avatar" \\ "href").readNullable[String].orElse((__ \\ "author" \\ "links").readNullable[String]) and
(__ \\ "state").read[String] and
(__ \\ "created_on").read[LocalDateTime] and
(__ \\ "updated_on").read[LocalDateTime] and
(__ \\ "source" \\ "repository" \\ "full_name").read[String] and
(__ \\ "source" \\ "branch" \\ "name").read[String] and
(__ \\ "source" \\ "commit" \\ "hash").read[String] and
(__ \\ "destination" \\ "repository" \\ "full_name").read[String] and
(__ \\ "destination" \\ "branch" \\ "name").read[String] and
(__ \\ "destination" \\ "commit" \\ "hash").readNullable[String] and
// TODO: (__ \\ "destination" \\ "commit" \\ "hash").read[Option[String]] and
(__ \\ "links").read[Map[String, Map[String, String]]].map(parseLinks) and
(__ \\ "author" \\ "uuid").readNullable[String]
) (PullRequest.apply _)
private def parseLinks(links: Map[String, Map[String, String]]): Seq[ApiUrl] = {
(for {
(linkName, linkMap) <- links
urlType <- ApiUrlType.find(linkName)
linkUrl <- linkMap.get("href")
} yield ApiUrl(urlType, linkUrl)).toSeq
}
}
| rtfpessoa/bitbucket-scala-client | src/main/scala/com/codacy/client/bitbucket/v1/PullRequest.scala | Scala | apache-2.0 | 2,982 |
/* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \ /__\ (_ )(_ _)( ___)/ __) ( _ \( )( )( _ \ Read
* ) / /(__)\ / /_ _)(_ )__) \__ \ )___/ )(__)( ) _ < README.txt
* (_)\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.diesel.expr
/** A simple parser for our simple specs
*
* DomParser is the actual Diesel/Dom parser.
* We extend from it to include its functionality and then we add its parsing rules with withBlocks()
*/
class SimpleExprParser extends ExprParser {
def parseExpr (input: String):Option[Expr] = {
parseAll(expr, input) match {
case Success(value, _) => Some(value)
case NoSuccess(msg, next) => None
//todo ? throw new DieselExprException("Parsing error: " + msg)
}
}
def parseIdent (input: String):Option[AExprIdent] = {
parseAll(aidentExpr, input) match {
case Success(value, _) => Some(value)
case NoSuccess(msg, next) => None
//todo ? throw new DieselExprException("Parsing error: " + msg)
}
}
def parseCond (input: String):Option[BoolExpr] = {
parseAll(cond, input) match {
case Success(value, _) => Some(value)
case NoSuccess(msg, next) => None
//todo ? throw new DieselExprException("Parsing error: " + msg)
}
}
}
| razie/diesel-hydra | diesel/src/main/scala/razie/diesel/expr/SimpleExprParser.scala | Scala | apache-2.0 | 1,346 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.z3
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import org.geotools.util.factory.Hints
import org.locationtech.geomesa.curve.BinnedTime.TimeToBinnedTime
import org.locationtech.geomesa.curve.{BinnedTime, XZ3SFC}
import org.locationtech.geomesa.filter.FilterValues
import org.locationtech.geomesa.index.api.IndexKeySpace.IndexKeySpaceFactory
import org.locationtech.geomesa.index.api.ShardStrategy.{NoShardStrategy, ZShardStrategy}
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.conf.QueryProperties
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.geotools.{GeometryUtils, WholeWorldPolygon}
import org.locationtech.geomesa.utils.index.ByteArrays
import org.locationtech.jts.geom.{Geometry, Point}
import org.locationtech.sfcurve.IndexRange
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import scala.util.control.NonFatal
class XZ3IndexKeySpace(val sft: SimpleFeatureType, val sharding: ShardStrategy, geomField: String, dtgField: String)
extends IndexKeySpace[XZ3IndexValues, Z3IndexKey] with LazyLogging {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
require(classOf[Geometry].isAssignableFrom(sft.getDescriptor(geomField).getType.getBinding),
s"Expected field $geomField to have a geometry binding, but instead it has: " +
sft.getDescriptor(geomField).getType.getBinding.getSimpleName)
require(classOf[Date].isAssignableFrom(sft.getDescriptor(dtgField).getType.getBinding),
s"Expected field $dtgField to have a date binding, but instead it has: " +
sft.getDescriptor(dtgField).getType.getBinding.getSimpleName)
protected val geomIndex: Int = sft.indexOf(geomField)
protected val dtgIndex: Int = sft.indexOf(dtgField)
protected val sfc: XZ3SFC = XZ3SFC(sft.getXZPrecision, sft.getZ3Interval)
protected val timeToIndex: TimeToBinnedTime = BinnedTime.timeToBinnedTime(sft.getZ3Interval)
private val dateToIndex = BinnedTime.dateToBinnedTime(sft.getZ3Interval)
private val boundsToDates = BinnedTime.boundsToIndexableDates(sft.getZ3Interval)
private val isPoints = classOf[Point].isAssignableFrom(sft.getDescriptor(geomIndex).getType.getBinding)
override val attributes: Seq[String] = Seq(geomField, dtgField)
override val indexKeyByteLength: Right[(Array[Byte], Int, Int) => Int, Int] = Right(10 + sharding.length)
override val sharing: Array[Byte] = Array.empty
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[Z3IndexKey] = {
val geom = writable.getAttribute[Geometry](geomIndex)
if (geom == null) {
throw new IllegalArgumentException(s"Null geometry in feature ${writable.feature.getID}")
}
val envelope = geom.getEnvelopeInternal
// TODO support date intervals (remember to remove disjoint data check in getRanges)
val dtg = writable.getAttribute[Date](dtgIndex)
val time = if (dtg == null) { 0L } else { dtg.getTime }
val BinnedTime(b, t) = timeToIndex(time)
val xz = try {
sfc.index(envelope.getMinX, envelope.getMinY, t, envelope.getMaxX, envelope.getMaxY, t, lenient)
} catch {
case NonFatal(e) => throw new IllegalArgumentException(s"Invalid xz value from geometry/time: $geom,$dtg", e)
}
val shard = sharding(writable)
// create the byte array - allocate a single array up front to contain everything
// ignore tier, not used here
val bytes = Array.ofDim[Byte](shard.length + 10 + id.length)
if (shard.isEmpty) {
ByteArrays.writeShort(b, bytes, 0)
ByteArrays.writeLong(xz, bytes, 2)
System.arraycopy(id, 0, bytes, 10, id.length)
} else {
bytes(0) = shard.head // shard is only a single byte
ByteArrays.writeShort(b, bytes, 1)
ByteArrays.writeLong(xz, bytes, 3)
System.arraycopy(id, 0, bytes, 11, id.length)
}
SingleRowKeyValue(bytes, sharing, shard, Z3IndexKey(b, xz), tier, id, writable.values)
}
override def getIndexValues(filter: Filter, explain: Explainer): XZ3IndexValues = {
import org.locationtech.geomesa.filter.FilterHelper._
// standardize the two key query arguments: polygon and date-range
val geometries: FilterValues[Geometry] = {
val extracted = extractGeometries(filter, geomField, isPoints)
if (extracted.nonEmpty) { extracted } else { FilterValues(Seq(WholeWorldPolygon)) }
}
// since we don't apply a temporal filter, we pass handleExclusiveBounds to
// make sure we exclude the non-inclusive endpoints of a during filter.
// note that this isn't completely accurate, as we only index down to the second
val intervals = extractIntervals(filter, dtgField, handleExclusiveBounds = true)
explain(s"Geometries: $geometries")
explain(s"Intervals: $intervals")
// disjoint geometries are ok since they could still intersect a polygon
if (intervals.disjoint) {
explain("Disjoint dates extracted, short-circuiting to empty query")
return XZ3IndexValues(sfc, FilterValues.empty, Seq.empty, FilterValues.empty, Map.empty, Seq.empty)
}
// compute our ranges based on the coarse bounds for our query
val xy: Seq[(Double, Double, Double, Double)] = {
val multiplier = QueryProperties.PolygonDecompMultiplier.toInt.get
val bits = QueryProperties.PolygonDecompBits.toInt.get
geometries.values.flatMap(GeometryUtils.bounds(_, multiplier, bits))
}
// calculate map of weeks to time intervals in that week
val timesByBin = scala.collection.mutable.Map.empty[Short, (Double, Double)]
val unboundedBins = Seq.newBuilder[(Short, Short)]
def updateTime(bin: Short, lt: Double, ut: Double): Unit = {
val times = timesByBin.get(bin) match {
case None => (lt, ut)
case Some((min, max)) => (math.min(min, lt), math.max(max, ut))
}
timesByBin(bin) = times
}
// note: intervals shouldn't have any overlaps
intervals.foreach { interval =>
val (lower, upper) = boundsToDates(interval.bounds)
val BinnedTime(lb, lt) = dateToIndex(lower)
val BinnedTime(ub, ut) = dateToIndex(upper)
if (interval.isBoundedBothSides) {
if (lb == ub) {
updateTime(lb, lt, ut)
} else {
updateTime(lb, lt, sfc.zBounds._2)
updateTime(ub, sfc.zBounds._1, ut)
Range.inclusive(lb + 1, ub - 1).foreach(b => timesByBin(b.toShort) = sfc.zBounds)
}
} else if (interval.lower.value.isDefined) {
updateTime(lb, lt, sfc.zBounds._2)
unboundedBins += (((lb + 1).toShort, Short.MaxValue))
} else if (interval.upper.value.isDefined) {
updateTime(ub, sfc.zBounds._1, ut)
unboundedBins += ((0, (ub - 1).toShort))
}
}
// make our underlying index values available to other classes in the pipeline for processing
XZ3IndexValues(sfc, geometries, xy, intervals, timesByBin.toMap, unboundedBins.result())
}
override def getRanges(values: XZ3IndexValues, multiplier: Int): Iterator[ScanRange[Z3IndexKey]] = {
val XZ3IndexValues(sfc, _, xy, _, timesByBin, unboundedBins) = values
// note: `target` will always be Some, as ScanRangesTarget has a default value
val target = QueryProperties.ScanRangesTarget.option.map { t =>
math.max(1, if (timesByBin.isEmpty) { t.toInt } else { t.toInt / timesByBin.size } / multiplier)
}
def toZRanges(t: (Double, Double)): Seq[IndexRange] =
sfc.ranges(xy.map { case (xmin, ymin, xmax, ymax) => (xmin, ymin, t._1, xmax, ymax, t._2) }, target)
lazy val wholePeriodRanges = toZRanges(sfc.zBounds)
val bounded = timesByBin.iterator.flatMap { case (bin, times) =>
val zs = if (times.eq(sfc.zBounds)) { wholePeriodRanges } else { toZRanges(times) }
zs.map(r => BoundedRange(Z3IndexKey(bin, r.lower), Z3IndexKey(bin, r.upper)))
}
val unbounded = unboundedBins.iterator.map {
case (lower, Short.MaxValue) => LowerBoundedRange(Z3IndexKey(lower, 0L))
case (0, upper) => UpperBoundedRange(Z3IndexKey(upper, Long.MaxValue))
case (lower, upper) =>
logger.error(s"Unexpected unbounded bin endpoints: $lower:$upper")
UnboundedRange(Z3IndexKey(0, 0L))
}
bounded ++ unbounded
}
override def getRangeBytes(ranges: Iterator[ScanRange[Z3IndexKey]], tier: Boolean): Iterator[ByteRange] = {
if (sharding.length == 0) {
ranges.map {
case BoundedRange(lo, hi) =>
BoundedByteRange(ByteArrays.toBytes(lo.bin, lo.z), ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z))
case LowerBoundedRange(lo) =>
BoundedByteRange(ByteArrays.toBytes(lo.bin, lo.z), ByteRange.UnboundedUpperRange)
case UpperBoundedRange(hi) =>
BoundedByteRange(ByteRange.UnboundedLowerRange, ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z))
case UnboundedRange(_) =>
BoundedByteRange(ByteRange.UnboundedLowerRange, ByteRange.UnboundedUpperRange)
case r =>
throw new IllegalArgumentException(s"Unexpected range type $r")
}
} else {
ranges.flatMap {
case BoundedRange(lo, hi) =>
val lower = ByteArrays.toBytes(lo.bin, lo.z)
val upper = ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z)
sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper)))
case LowerBoundedRange(lo) =>
val lower = ByteArrays.toBytes(lo.bin, lo.z)
val upper = ByteRange.UnboundedUpperRange
sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper)))
case UpperBoundedRange(hi) =>
val lower = ByteRange.UnboundedLowerRange
val upper = ByteArrays.toBytesFollowingPrefix(hi.bin, hi.z)
sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper)))
case UnboundedRange(_) =>
Seq(BoundedByteRange(ByteRange.UnboundedLowerRange, ByteRange.UnboundedUpperRange))
case r =>
throw new IllegalArgumentException(s"Unexpected range type $r")
}
}
}
// always apply the full filter to xz queries
override def useFullFilter(values: Option[XZ3IndexValues],
config: Option[GeoMesaDataStoreConfig],
hints: Hints): Boolean = true
}
object XZ3IndexKeySpace extends IndexKeySpaceFactory[XZ3IndexValues, Z3IndexKey] {
override def supports(sft: SimpleFeatureType, attributes: Seq[String]): Boolean =
attributes.lengthCompare(2) == 0 && attributes.forall(sft.indexOf(_) != -1) &&
classOf[Geometry].isAssignableFrom(sft.getDescriptor(attributes.head).getType.getBinding) &&
classOf[Date].isAssignableFrom(sft.getDescriptor(attributes.last).getType.getBinding)
override def apply(sft: SimpleFeatureType, attributes: Seq[String], tier: Boolean): XZ3IndexKeySpace = {
val shards = if (tier) { NoShardStrategy } else { ZShardStrategy(sft) }
new XZ3IndexKeySpace(sft, shards, attributes.head, attributes.last)
}
}
| locationtech/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/z3/XZ3IndexKeySpace.scala | Scala | apache-2.0 | 11,893 |
package Chapter13
import org.scalameter
object Collections {
// topics:
// the main collections traits
// mutable and immutable collections
// sequences
// lists
// sets
// operators for adding or removing elements
// common methods
// mapping a function
// reducing, folding and scanning
// zipping
// iterators
// streams
// lazy views
// interoperability with java collections
// parallel collections
// library users point of view;
// coll. extends the 'Iterable' trait;
// three major categories: sequence, set, map;
// mutable/immutable versions;
// list: head :: tail;
// LinkedHashSet: insertion order; SortedSet: sorted iterator;
// operators: + add to unordered; +:, :+ prepend or append; ++ concatenate, - and -- remove elements;
// Iterable, Seq traits with dozens of useful methods;
// use mapping, folding, zipping techniques
// the main collections traits
def theMainCollectionsTraits = {
import scala.collection.mutable
// https://docs.scala-lang.org/overviews/collections/overview.html
// Iterable:
// Seq, Set, Map
// Seq <- IndexedSeq
// Set <- SortedSet
// Map <- SortedMap
// Iterable can yield an Iterator { hasNext; next }
val coll = Seq(1,2,3)
val iter = coll.iterator
while (iter.hasNext) println( iter.next() )
// most basic way of traversing a collection
// Seq: ordered sequence (array, list, etc);
// IndexedSeq: fast random access through an index;
// Set: unordered collection of distinct values; SortedSet -- visited in sorted order;
// Map: is a set of pairs (key, value); SortedMap -- visits as sorted by keys;
// similar to java but with improvements:
// - maps are a part of the hierarchy;
// - IndexedSeq is the supertype of arrays but not lists.
// in java ArrayList and LinkedList implement a common List interface,
// RandomAccess marker interface was added later.
// uniform creation principle:
// companion objects with 'apply' for constructing instances
val i = Iterable(1, 2, 3)
val s = Set(1, 2, 3)
val m = Map('a'->1, 'b'->2)
// translate between coll. types
val (s1, s2, s3) = (i.toSet, s.toSeq, m.to[mutable.ArrayBuffer])
// can use '==' operator to compare any iterables with same type;
// use 'sameElements' method for other
s2 == coll
s2 sameElements s1
}
// mutable and immutable collections
def mutableAndImmutableCollections = {
import scala.collection.mutable
// https://docs.scala-lang.org/overviews/collections/overview.html
// implemented with immutable.Map
val supertype = scala.collection.Map.empty[String, String]
// extends scala.collection.Map
val immutabletype = scala.collection.immutable.Map.empty[String, String]
// extends scala.collection.Map
val mutabletype = scala.collection.mutable.Map.empty[String, String]
// Predef val Map = immutable.Map
val defaulttype = Map.empty[String, String]
// immutable collections are useful in recursions
def digits(n: Int): Set[Int] = {
if (n < 0) digits(-n)
else if (n < 10) Set(n)
else digits(n / 10) + (n % 10) // + for unordered collections
// construct a new set
}
}
// sequences
def sequences = {
// Seq:
// IndexedSeq <- Vector, Range
// (unindexed) List, Stream, Stack, Queue
// Vector: immutable version of ArrayBuffer, indexed sequence with fast random access;
// implemented as a tree with up to 32 values in each node; 4 hops max for apply(i) in 1M elements
// scala> math.pow(32, 4) // res0: Double = 1 048 576
// Range: monotonic sequence represented by start, stop, step;
val r = 1 to 100 by 10
// mutable sequences, most useful:
// Seq:
// IndexedSeq <- ArrayBuffer
// Stack, Queue, PriorityQueue, ListBuffer
}
// lists
def lists = {
// recursive data structure:
// list is Nil or head :: tail, where tail is list
val digits = List(4,2)
digits.head // 4
digits.tail // List(2)
digits.tail.head // 2
digits.tail.tail // Nil
// :: operator, right associative
val list = 9 :: List(4, 2) // List(9,4,2)
val list2 = 9 :: 4 :: 2 :: Nil // starts from Nil and goes to left
// natural for recursion
def sum(lst: List[Int]): Int = lst match {
case Nil => 0
case h :: t => h + sum(t) // h: head, t: tail
}
// mutable list: ListBuffer, linked list with ref. to the last node.
// with java LinkedList you can remove item after every second call to next;
// no such operator in ListBuffer, better generate a new list ? not really ? see exercises
// deprecated LinkedList, DoubleLinkedList
}
// sets
def sets = {
import scala.collection.mutable
// set: unordered collection of distinct elements;
// adding an existing el. has no effect
Set(1,2) + 1 == Set(1,2)
// sets implemented as hash sets by default (hashCode method)
// LinkedHashSet: keeps in linked list the order of insertion
val weekdays = mutable.LinkedHashSet("Mo", "Tu", "We", "Th", "Fr")
// sorted order: SortedSet
val numbers = mutable.SortedSet(5, 4, 3, 1, 2)
// bit set: set of non-negative integers as a sequence of bits,
// effective as long as max value is not too large
val bits = scala.collection.immutable.BitSet(1,2,3)
// contains, subsetOf, union, intersect, diff, etc
weekdays contains "Sa" // false
Set("Mo") subsetOf weekdays // true
Set("Sa", "Su") union weekdays ++ weekdays -- weekdays diff weekdays intersect weekdays
}
// operators for adding or removing elements
def operatorsForAddingOrRemovingElements = {
import scala.collection.mutable
// adding, removing operators: depending on the collection type
// apply(i) :+ + - ++ ++: :: :::
// mutable: += ++= -= +-: ++=:
// + for unordered collection, generally
// +: :+ for prepend append to ordered coll.
Vector(1,2,3) :+ 5 // colon to sequence
1 +: Vector(1,2,3)
// mutable
val numbers = mutable.ArrayBuffer(1,2,3)
numbers += 5
// immutable collection, mutable reference
var numbers2 = Set(1,2,3)
numbers2 += 5 // creates a new set
var numberV = Vector(1,2,3)
numberV :+= 5 // creates a new vector; += does not work since Vector immutable
// summary:
// :+ or +: for append or prepend
// + for unordered coll.
// - to remove
// ++ -- for bulk add remove
// mutations: += ++= -= --=
// for lists :: ::: // pattern matching won't work with +:
// stay away from ++: +=: ++=:
}
// common methods
def commonMethods = {
// Iterable methods
// https://www.scala-lang.org/api/current/scala/collection/Iterable.html
// n.b.
// headOption/lastOption
// tail/init
// transform, collect
// aggregate
// partition, span
// splitAt
// slice, view
// grouped, sliding
// groupBy
// addString
// Seq methods
// https://www.scala-lang.org/api/current/scala/collection/Seq.html
// n.b.
// containsSlice
// lastIndexOfSlice
// indexWhere
// prefixLength, segmentLength
// intersect, diff
// permitations, combinations
// uniform return type principle:
// methods return a new collection of the same type
}
// mapping a function
def mapping_a_function = {
import scala.collection.mutable
// transform elements of a collection, apply unary function
val names = List("Peter", "Paul", "Mary")
names.map(_.toUpperCase) // yields a collection of transformed items
// exactly the same:
for (n <- names) yield n.toUpperCase
// if transformation yields a collection, you may want to concatenate: flatMap
def ulcase(s: String) = Seq(s.toUpperCase, s.toLowerCase) // yields a collection
names.map(ulcase) // List[Seq[String]] = List(List(PETER, peter), List(PAUL, paul), List(MARY, mary))
names.flatMap(ulcase) // List[String] = List(PETER, peter, PAUL, paul, MARY, mary)
// useful: flatMap with Option transformation result: map + filter
// map, flatMap are important: used for translating 'for expressions'
for (i <- 1 to 10) yield i * i
// translated to
(1 to 10).map(i => i * i)
// and
for (i <- 1 to 10; j <- 1 to i) yield i * j
// becomes
(1 to 10).flatMap(i => (1 to i).map(j => i * j))
// method 'transform' like 'map' only in-place, for mutable collections
val buf = mutable.ArrayBuffer("Peter", "Paul", "Mary")
buf.transform(_.toUpperCase)
// foreach: elem => Unit, for side effect
names foreach println
// collect: works with partial functions
"-3+4".collect { case '+' => 1; case '-' => -1 } // Vector(-1, 1)
// groupBy: yields a map
val map = buf.groupBy(_.substring(0, 1).toUpperCase)
// Map(M -> ArrayBuffer(Mary), P -> ArrayBuffer(Peter, Paul))
}
// reducing, folding and scanning
def reducingFoldingAndScanning = {
import scala.collection.mutable
// combine elements with a binary function,
// operations on adjacent elements
// reduceLeft // can't work with empty list
List(1,7,2,9).reduceLeft(_ - _) // ((1 - 7) -2) - 9
// reduceRight : useful for growing a list
List(1,7,2,9).reduceRight(_ - _) // 1 - (7 - (2 - 9))
// but foldLeft or foldRight can work with empty list
(0 /: List(1,7,2,9))(_ - _) // is equivalent to (colon to collection)
List(1,7,2,9).foldLeft(0)(_ - _) // (((0 - 1) - 7) -2) - 9
// init and op are curried: for type inference
// folding as replacement for a loop:
// instead of
val freq = mutable.Map.empty[Char, Int].withDefaultValue(0)
for (char <- "Mississippi") freq(char) = 1 + freq(char)
// freq = Map(M -> 1, s -> 4, p -> 2, i -> 4)
// you can do in f style // n.b. immutable map
(Map.empty[Char, Int].withDefaultValue(0) /: "Mississippi") { (map, char) => map + (char -> (1 + map(char)))}
// Map(M -> 1, i -> 4, s -> 4, p -> 2)
// it is possible to replace any loop with a fold,
// just build a data structure to hold a state and define a operation that
// implements one step
// scanLeft, scanRight: combine fold and map, yielding a coll. of all intermediate results
(1 to 10).scanLeft(0)(_ + _) // Vector(0, 1, 3, 6, 10, 15, 21, 28, 36, 45, 55)
}
// zipping
def zipping = {
// two collections, operations on pairs of corresponding elements
val prices = Seq(5d, 20d, 9.95)
val quantities = Seq(10, 2, 1)
// zip: res.length = shortest.length
prices zip quantities // Seq[(Double, Int)] = List((5.0,10), (20.0,2), (9.95,1))
// list of prices
(prices zip quantities) map { case (p, c) => p * c } // List(50.0, 40.0, 9.95)
// total price
(prices zip quantities) map { case (p, c) => p * c } sum // Double = 99.95
// zipAll: let you specify defaults for shorter collection
Seq(1,2,3).zipAll(Seq(4,5), -42, 42) // Seq[(Int, Int)] = List((1,4), (2,5), (3,42))
// zipWithIndex: useful if you want index for an element with a certain property
"Scala".zipWithIndex.max // (Char, Int) = (l,3)
}
// iterators
def iterators = {
// not very useful, but basic: 'iterator' method;
// useful for "lazy" computations: read file or expensive computations;
// fragile, 'next' mutates the iterator, no cache, one pass only.
// 'grouped', 'sliding' returns an iterator
// while (iter.hasNext) ... iter.next()
// or
// for (elem <- iter) ...
// 'buffered': cache iter.head
val iter = scala.io.Source.fromFile("/tmp/test").buffered
while (iter.hasNext && iter.head.isWhitespace) iter.next()
// iter points to the first non-whitespace char
// you can copy iter to collection
iter.toVector
}
// streams
def streams = {
// immutable list in which the tail is computed lazily;
// stream methods are evaluated lazily;
// stream caches the visited values;
// scala streams != java8 streams
// scala lazy views == java8 streams
// stream lazy tail
def numsFrom(n: BigInt): Stream[BigInt] = n #:: numsFrom(n + 1)
val tenOrMore = numsFrom(10) // Stream(10, ?)
// tail is unevaluated
tenOrMore.tail.tail.tail // Stream(13, ?)
// and cache
// scala> tenOrMore
// res19: Stream[BigInt] = Stream(10, 11, 12, 13, ?)
// lazy methods
val squares = numsFrom(1).map(x => x * x) // Stream(1, ?)
// take + force methods to get a collection
squares.take(5).force // Stream(1, 4, 9, 16, 25)
// squares.force // No!
// stream from iterator
val words = scala.io.Source.fromString(
"""a
|b
|c
|d
|e
|f
""".stripMargin).getLines.toStream
// stream caches the visited lines!
words // Stream(a, ?)
words(5) // f
words // Stream(a, b, c, d, e, f, ?)
}
// lazy views
def lazyViews = {
// 'view' method: yields a collection on which methods are applied lazily;
// no cache; even first elem is unevaluated;
// 'apply' method forces evaluation of the entire view;
// mutating view of mutable collection you mutate original coll;
val palindromicSquares = (1 to 10000000).view
.map(x => x*x)
.filter(x => x.toString == x.toString.reverse)
// scala.collection.SeqView[Int,Seq[_]] = SeqViewMF(...)
palindromicSquares.take(10).mkString(",") // String = 1,4,9,121,484,676,10201,12321,14641,40804
palindromicSquares.take(10).force // Seq[Int] = Vector(1, 4, 9, 121, 484, 676, 10201, 12321, 14641, 40804)
// don't call
// palindromicSquares(3)
// 'apply' method forces evaluation of the entire view;
// call
palindromicSquares.take(3).last
// mutating view of mutable collection you mutate original coll;
import scala.collection.mutable
val buff = (1 to 30).to[mutable.ArrayBuffer]
buff.view(10, 20).transform(x => 0)
// scala> buff
// ArrayBuffer(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 21, ...
}
// interoperability with java collections
def interoperabilityWithJavaCollections = {
import scala.collection.mutable
// https://www.scala-lang.org/api/current/scala/collection/JavaConverters$.html
// import scala.collection.JavaConversions // deprecated
import scala.collection.JavaConverters._
// use 'asScala'
val props: mutable.Map[String, String] = System.getProperties.asScala
// or, import converters you needed only
import scala.collection.JavaConverters.propertiesAsScalaMap
// n.b. conversions yield wrappers, no transformation processes involved
}
// parallel collections
def parallelCollections = {
// concurrent programs
// parallel collections use a global fork-join pool,
// well suited for processor-bound programs.
// for IO tasks or some other blocking/waiting you should choose a different
// execution context
// n.b. to run this code in REPL you will need a crutch (I'm using a def foo = ...; foo)
// https://github.com/scala/scala-parallel-collections/issues/34
// https://stackoverflow.com/questions/15176199/scala-parallel-collection-in-object-initializer-causes-a-program-to-hang/15176433#15176433
// https://github.com/scala/bug/issues/8119
// split collection to chunks, process chunks in parallel, combine chunks back to single collection
val largecoll = (1 to 10000000).toArray
val largecollPar = largecoll.par // wrapper for array, other coll. types may need copying
// computes concurrently
println(largecollPar.sum)
println(largecollPar.count(_ % 2 == 0))
// parallelize a 'for loop'
for (i <- (0 until 1000).par) print(s" $i")
// n.b. numbers are printed
// out of order: first in, first printed
// but, in 'for loop' constructing a new collection, results are
// assembled in order
assert( (for (i <- (0 until 10000000).par) yield i) == (0 until 10000000) )
// in parallel computations do NOT mutate shared variables
var count = 0; for (i <- (1 to 1000000).par) { if (i % 2 == 0) count += 1 }
// 'seq' method:
// method 'par' return ParSeq, ParSet, ParMap objects
// these are NOT subtypes of Seq, Set, Map and can't be passed to not par methods.
// so, you need to convert it back
val res = largecollPar.seq
// not all methods can be parallelized:
// operator must be associative: (a op b) op c == a op (b op c)
// parallel: fold, reduce, aggregate
val str = (' ' to 'z').foldLeft("")(_ :+ _)
str.par.aggregate(Set.empty[Char])(_ + _, _ ++ _)
}
}
object Collections_Exercises {
// 1. Write a function that, given a string, produces a map of the indexes of all characters.
// For example, indexes("Mississippi") should return a map associating
// 'M' with the set {0},
// 'i' with the set {1, 4, 7, 10}, and so on.
// Use a mutable map of characters to mutable sets.
// How can you ensure that the set is sorted?
def ex1 = {
// SortedSet
import scala.collection.mutable
def charsToIndexes(str: String) = {
val map = mutable.Map.empty[Char, mutable.SortedSet[Int]].withDefault(_ => mutable.SortedSet.empty)
str.zipWithIndex.foreach { case (char, idx) =>
map += (char -> (map(char) += idx))
}
map
}
// test
val str = "Mississippi"
val map = charsToIndexes(str)
println(map)
assert(map('M') == Set(0))
assert(map('i') == Set(1,4,7,10))
assert(map('s') == Set(2,3,5,6))
assert(map('p') == Set(8,9))
}
//2. Repeat the preceding exercise, using an immutable map of characters to lists.
def ex2 = {
def charsToIndexes(str: String): Map[Char, List[Int]] = {
// group by char, get map char=>(char,idx)
val pairsmap = str.zipWithIndex.groupBy { case (char, idx) => char }
// extract result
pairsmap.map { case (char, pairs) =>
char -> pairs.map{ case (_, idx) => idx}.toList }
}
// test
val str = "Mississippi"
val map = charsToIndexes(str)
println(map)
assert(map('M') == List(0))
assert(map('i') == List(1,4,7,10))
assert(map('s') == List(2,3,5,6))
assert(map('p') == List(8,9))
}
//3. Write a function that removes every second element from a ListBuffer.
// Try it two ways.
// Call remove(i) for all even i starting at the end of the list.
// Copy every second element to a new list.
// Compare the performance.
def ex3 = {
import scala.collection.mutable
def removeEverySecond(lst: mutable.ListBuffer[Int], first: Boolean): mutable.ListBuffer[Int] = {
def firstway = {
println("way 1")
val res = lst map identity
for (i <- res.indices.reverse if i % 2 != 0) res.remove(i)
res
}
def secondway = {
println("way 2")
// fastest method
// lst.zipWithIndex.withFilter { case (x, idx) => idx % 2 == 0 }.map(_._1)
// first way (remove bad): 7263 ms
// second way (copy good): 10 ms
// lst.zipWithIndex.flatMap { case (x, idx) => if (idx % 2 == 0) Some(x) else None }
// first way (remove bad): 7256 ms
// second way (copy good): 45 ms
// slowest method
val res = mutable.ListBuffer.empty[Int]
res.sizeHint(1 + lst.length / 2)
for (i <- lst.indices if i % 2 == 0) res += lst(i)
res
// first way (remove bad): 7296 ms
// second way (copy good): 11953 ms
}
if (first) firstway
else secondway
}
// test
val lst = (0 to 10).to[mutable.ListBuffer]
val expected = Seq(0, 2, 4, 6, 8, 10).to[mutable.ListBuffer]
Seq(true, false) foreach { way =>
val res = removeEverySecond(lst, way)
println(res)
assert(res == expected)
}
assert(lst == (0 to 10).to[mutable.ListBuffer])
def performanceTest(): Unit = {
// https://stackoverflow.com/questions/9160001/how-to-profile-methods-in-scala
case class Result[T](result: T, elapsedNs: Long)
def time[R](block: => R): Result[R] = {
val t0 = System.nanoTime()
val result = block
val t1 = System.nanoTime()
Result(result, t1 - t0)
}
val lstsize = 50000
val count = 5
val lst = (0 to lstsize).to[mutable.ListBuffer]
val first = time {
(0 to count) foreach (_ => removeEverySecond(lst, first = true))
}
val second = time {
(0 to count) foreach (_ => removeEverySecond(lst, first = false))
}
println(s"first way (remove bad): ${first.elapsedNs / 1000000} ms")
println(s"second way (copy good): ${second.elapsedNs / 1000000} ms")
// first way (remove bad): 7296 ms
// second way (copy good): 11953 ms
// second is slower because for each copied element list creates a new Cons object;
// on first way list just switch references.
}
def performanceTest2(): Unit = {
// http://scalameter.github.io/home/gettingstarted/0.7/inline/index.html
import org.scalameter._
val timeBench = config(
Key.exec.minWarmupRuns -> 5,
Key.exec.maxWarmupRuns -> 20,
Key.exec.benchRuns -> 15,
Key.verbose -> true
).withWarmer(new Warmer.Default)
val memBench = timeBench.withMeasurer(new Measurer.MemoryFootprint)
val lst = (0 to 30000).to[mutable.ListBuffer]
val firstTime = timeBench measure { removeEverySecond(lst, first = true) }
val secondTime = timeBench measure { removeEverySecond(lst, first = false) }
val firstMem = memBench measure { removeEverySecond(lst, first = true) }
val secondMem = memBench measure { removeEverySecond(lst, first = false) }
println(Console.YELLOW)
println(s"first way (remove bad from-end-to-start): $firstTime, $firstMem")
println(s"second way (copy good from start-to-end): $secondTime, $secondMem")
println(Console.RESET)
// first way (remove bad from-end-to-start): 467.3844768 ms, 599.8 kB
// second way (copy good from start-to-end): 849.0186980666668 ms, 358.05 kB
}
performanceTest()
performanceTest2()
}
// 4. Write a function that receives a collection of strings and a map from strings to integers.
// Return a collection of integers that are values of the map corresponding to
// one of the strings in the collection.
// For example, given Array("Tom", "Fred", "Harry") and
// Map("Tom" -> 3, "Dick" -> 4, "Harry" -> 5),
// return Array(3, 5).
// Hint: Use flatMap to combine the Option values returned by get.
def ex4 = {
def str2int(strs: Iterable[String], map: Map[String, Int]): Iterable[Int] =
strs.flatMap(map.get)
// test
val res = str2int(Array("Tom", "Fred", "Harry"), Map("Tom" -> 3, "Dick" -> 4, "Harry" -> 5))
println(res.toList)
assert(res == Array(3, 5).toIterable)
}
// 5. Implement a function that works just like mkString, using reduceLeft.
def ex5 = {
def mkString(lst: Iterable[String], start: String = "", sep: String = "", end: String = ""): String =
start + lst.reduceLeft(_ + sep + _) + end
// test
val lst = Seq("a", "b", "c")
assert(lst.mkString("[", ",", "]") == mkString(lst, "[", ",", "]"))
}
// 6. Given a list of integers lst, what is
// (lst :\ List[Int]())(_ :: _) ?
// (List[Int]() /: lst)(_ :+ _) ?
// How can you modify one of them to reverse the list?
def ex6 = {
val lst = List(1, 2, 3)
def emptyList = List.empty[Int]
// apply 'cons' to lst elements, right to left, building copy of lst
val foldRightCons = (lst :\ emptyList)(_ :: _)
assert(foldRightCons == lst)
// apply 'append' to lst elements, left to right, building copy of lst
val foldLeftAppend = (emptyList /: lst)(_ :+ _)
assert(foldRightCons == lst)
// to revert lst: apply foldLeft with cons or foldRight with append
assert(lst.reverse == (emptyList /: lst)((xs, x) => x :: xs ) )
assert(lst.reverse == (lst :\ emptyList)((x, xs) => xs :+ x) )
}
// 7. In Section 13.10, “Zipping,” on page 187, the expression
// (prices zip quantities) map { p => p._1 * p._2 }
// is a bit inelegant.
// We can’t do
// (prices zip quantities) map { _ * _ }
// because _ * _ is a function with two arguments, and we need a function with
// one argument that is a tuple.
// The tupled method of the Function object changes a function with two arguments
// to one that takes a tuple.
// Apply tupled to the multiplication function so you can map it over the list of pairs.
def ex7 = {
val prices = Seq(5d, 20d, 9.95)
val quantities = Seq(10, 2, 1)
val res = (prices zip quantities) map Function.tupled(_ * _)
assert(res == List(50.0, 40.0, 9.95))
}
// 8. Write a function that turns an array of Double values into a two-dimensional array.
// Pass the number of columns as a parameter.
// For example, with
// Array(1, 2, 3, 4, 5, 6) and three columns,
// return Array(Array(1, 2, 3), Array(4, 5, 6)).
// Use the grouped method.
def ex8 = {
implicit def int2double(i: Int): Double = i.toDouble
def splitToRows(arr: Array[Double], ncols: Int): Array[Array[Double]] =
arr.grouped(ncols).toArray
// test
val data: Array[Double] = Array(1, 2, 3, 4, 5, 6)
val expected: Array[Array[Double]] = Array(
Array(1, 2, 3),
Array(4, 5, 6)
)
val res = splitToRows(data, 3)
assert(res.length == 2)
assert(res.head.toList == expected.head.toList)
assert(res.last.toList == expected.last.toList)
}
// 9. The Scala compiler transforms a for/yield expression
// for (i <- 1 to 10; j <- 1 to i) yield i * j
// to invocations of flatMap and map, like this:
// (1 to 10).flatMap(i => (1 to i).map(j => i * j))
// Explain the use of flatMap.
// Hint: What is (1 to i).map(j => i * j) when i is 1, 2, 3?
// What happens when there are three generators in the for/yield expression?
def ex9 = {
// second generator makes collections, we don't want 10 collections,
// we want unwrapped/flattened values.
// for three generators we need two flatMap
val res = for (i <- 1 to 3; j <- 1 to i; k <- 1 to j) yield s"i: $i, j: $j, k: $k"
res foreach println
assert(res.mkString ==
(1 to 3).flatMap(i =>
(1 to i).flatMap(j =>
(1 to j).map(k =>
s"i: $i, j: $j, k: $k"))).mkString
)
}
// 10. The method
// java.util.TimeZone.getAvailableIDs
// yields time zones such as Africa/Cairo and Asia/Chungking.
// Which continent has the most time zones?
// Hint: groupBy.
def ex10 = {
import java.util.TimeZone
val continentZonesMap = TimeZone.getAvailableIDs.groupBy(
//_.split("/").headOption.getOrElse("unknown")
_.takeWhile(_ != '/')
)
val res = continentZonesMap.maxBy(_._2.length)
// America
println(res._1 + " : " + res._2.toList)
}
// 11. Harry Hacker reads a file into a string and wants to use a parallel collection
// to update the letter frequencies concurrently on portions of the string.
// He uses the following code:
// val frequencies = new scala.collection.mutable.HashMap[Char, Int]
// for (c <- str.par) frequencies(c) = frequencies.getOrElse(c, 0) + 1
// Why is this a terrible idea?
// How can he really parallelize the computation? (Hint: Use aggregate.)
def ex11 = {
// Why is this a terrible idea?
// mutating a shared value/state in concurrent environment: always a bad idea.
// we need to combine partial results: immutable maps [Char, Int]
def frequencesConcurrent(str: String): Map[Char, Int] = {
// HashMap has 'merged' method
import scala.collection.immutable.HashMap
type FMap = HashMap[Char, Int]
val zero = new FMap
val op: (FMap, Char) => FMap = { case (map, char) =>
map + Tuple2(char, 1 + map.getOrElse(char, 0))
}
val combine: (FMap, FMap) => FMap = { case (a, b) =>
// println(s"combine a,b: a: ${a}; b: ${b}")
a.merged(b) { case ((c, f1), (_, f2)) => (c, f1 + f2) }
//(a /: b)((map, rec) => map + (rec._1 -> (rec._2 + map.getOrElse(rec._1, 0))))
}
str.par.aggregate(zero)(op, combine)
}
// test
val res = frequencesConcurrent("Mississippi")
assert(res == Map(
'M' -> 1,
'i' -> 4,
's' -> 4,
'p' -> 2
))
}
}
| vasnake/scala-for-the-impatient | src/main/scala/Chapter13/Collections.scala | Scala | gpl-3.0 | 31,245 |
package keemun.models
import play.api.libs.json._
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2013-2014
* See LICENSE.txt for details.
*/
case class Repo(
name: String,
owner: Account,
description: Option[String] = None,
isPrivate: Option[Boolean] = None,
primaryLang: Option[ProgrammingLang] = None,
langsStat: Option[Seq[ProgrammingLangStat]] = None) {
def fullName = s"${owner.name}/$name"
lazy val langsStatIndex = langsStat.map(seq => seq.map{stat => (stat.lang.code, stat)}.toMap)
def url: String = owner.getRepoUrl(this)
}
object Repo {
implicit val repoWrites = new Writes[Repo] {
import ProgrammingLangJson.programmingLangWrites
def writes(r: Repo): JsValue = {
Json.obj(
"id" -> r.fullName,
"name" -> r.name,
"full_name" -> r.fullName,
"description" -> r.description,
"is_private" -> r.isPrivate,
"repo_url" -> r.url,
"primary_lang" -> r.primaryLang
)
}
}
}
| maizy/keemun | app/keemun/models/Repo.scala | Scala | mit | 993 |
package skabele.screenshare.controllers
import org.scalatestplus.play._
import play.api.mvc.AnyContentAsEmpty
import play.api.test.Helpers._
import play.api.test._
class HomeControllerSpec extends PlaySpec with OneAppPerTest {
"HomeController GET" should {
"render the index page for Provider when requesting from localhost" in {
val result = route(app, FakeRequest(GET, "/")).get
status(result) mustBe OK
contentType(result) mustBe Some("text/html")
contentAsString(result) must include ("Simple Screen Share")
}
"render the index page for Master when requesting from other host" in {
val result = route(app, FakeRequest(GET, "/", FakeHeaders(), AnyContentAsEmpty, remoteAddress = "some.io")).get
status(result) mustBe OK
contentType(result) mustBe Some("text/html")
contentAsString(result) must include ("View controlls")
}
}
}
| skabele/simple-screen-share | test/skabele/screenshare/controllers/HomeControllerSpec.scala | Scala | mit | 904 |
package io.buoyant.linkerd.admin
import com.twitter.finagle.http.{Request, Status}
import io.buoyant.admin.ConfigHandler
import io.buoyant.linkerd._
import io.buoyant.linkerd.protocol.ThriftInitializer
import io.buoyant.namer.TestNamerInitializer
import io.buoyant.test.Awaits
import org.scalatest.FunSuite
class ConfigHandlerTest extends FunSuite with Awaits {
test("reserializes config") {
val initializers = Linker.Initializers(
protocol = Seq(TestProtocol.Plain, TestProtocol.Fancy, ThriftInitializer),
namer = Seq(TestNamerInitializer)
)
val linker = Linker.parse("""
namers:
- kind: test
buh: true
routers:
- protocol: plain
servers:
- ip: 127.0.0.1
port: 1
- protocol: thrift
client:
thriftProtocol: binary
servers:
- port: 2
thriftProtocol: compact
""", initializers)
val handler = new ConfigHandler(linker, initializers.iter)
val req = Request()
val rsp = await(handler(req))
assert(rsp.status == Status.Ok)
assert(rsp.contentString == """
|{
| "namers":[
| {"buh":true, "kind": "test"}
| ],
| "routers":[
| {"protocol":"plain","servers":[{"port":1, "ip":"localhost"}]},
| {
| "protocol":"thrift",
| "servers":[{"thriftProtocol":"compact", "port":2}],
| "client":{"thriftProtocol":"binary"}
| }
| ]
|}""".stripMargin.replaceAll("\\\\s", ""))
}
}
| hhtpcd/linkerd | linkerd/admin/src/test/scala/io/buoyant/linkerd/admin/ConfigHandlerTest.scala | Scala | apache-2.0 | 1,466 |
package objsets
import org.scalatest.FunSuite
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TweetSetSuite extends FunSuite {
trait TestSets {
val set1 = new Empty
val set2 = set1.incl(new Tweet("a", "a body", 20))
val set3 = set2.incl(new Tweet("b", "b body", 20))
val c = new Tweet("c", "c body", 7)
val d = new Tweet("d", "d body", 9)
val set4c = set3.incl(c)
val set4d = set3.incl(d)
val set5 = set4c.incl(d)
}
def asSet(tweets: TweetSet): Set[Tweet] = {
var res = Set[Tweet]()
tweets.foreach(res += _)
res
}
def size(set: TweetSet): Int = asSet(set).size
test("filter: on empty set") {
new TestSets {
assert(size(set1.filter(tw => tw.user == "a")) === 0)
}
}
test("filter: a on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.user == "a")) === 1)
}
}
test("filter: 20 on set5") {
new TestSets {
assert(size(set5.filter(tw => tw.retweets == 20)) === 2)
}
}
test("union: set4c and set4d") {
new TestSets {
assert(size(set4c.union(set4d)) === 4)
}
}
test("union: with empty set (1)") {
new TestSets {
assert(size(set5.union(set1)) === 4)
}
}
test("union: with empty set (2)") {
new TestSets {
assert(size(set1.union(set5)) === 4)
}
}
test("mostRetweeted: set5") {
new TestSets {
val mr = set5.mostRetweeted
assert(mr.user == "a" || mr.user == "b")
assert(mr.retweets == 20)
}
}
test("descending: set5") {
new TestSets {
val trends = set5.descendingByRetweet
assert(!trends.isEmpty)
assert(trends.head.user == "a" || trends.head.user == "b")
}
}
}
| marlanbar/scala-coursera | objsets/src/test/scala/objsets/TweetSetSuite.scala | Scala | unlicense | 1,758 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.spark.datasources
import java.io.ByteArrayInputStream
import org.apache.avro.Schema
import org.apache.avro.Schema.Type._
import org.apache.avro.generic.GenericDatumReader
import org.apache.avro.generic.GenericDatumWriter
import org.apache.avro.generic.GenericRecord
import org.apache.avro.generic.{GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.io._
import org.apache.commons.io.output.ByteArrayOutputStream
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.types._
// TODO: This is not really used in code.
trait SerDes {
def serialize(value: Any): Array[Byte]
def deserialize(bytes: Array[Byte], start: Int, end: Int): Any
}
// TODO: This is not really used in code.
class DoubleSerDes extends SerDes {
override def serialize(value: Any): Array[Byte] = Bytes.toBytes(value.asInstanceOf[Double])
override def deserialize(bytes: Array[Byte], start: Int, end: Int): Any = {
Bytes.toDouble(bytes, start)
}
}
| vincentpoon/hbase | hbase-spark/src/main/scala/org/apache/hadoop/hbase/spark/datasources/SerDes.scala | Scala | apache-2.0 | 1,811 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.actormonitor
import java.lang.management.ManagementFactory
import javax.management.ObjectName
import akka.actor._
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.LazyLogging
import org.scalatest._
import org.scalatest.concurrent.AsyncAssertions
import org.squbs.lifecycle.GracefulStop
import org.squbs.unicomplex.JMX._
import org.squbs.unicomplex.{JMX, Unicomplex, UnicomplexBoot}
import spray.util.Utils
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Try
object ActorMonitorSpec extends LazyLogging {
val dummyJarsDir = getClass.getClassLoader.getResource("classpaths").getPath
val classPaths = Array(
"ActorMonitorCube",
"TestCube"
) map (dummyJarsDir + "/" + _)
val (_, port) = Utils.temporaryServerHostnameAndPort()
val config = ConfigFactory.parseString(
s"""
|squbs {
| actorsystem-name = ActorMonitorSpec
| ${JMX.prefixConfig} = true
|}
|default-listener.bind-port = $port
""".stripMargin)
val boot = UnicomplexBoot(config)
.createUsing {(name, config) => ActorSystem(name, config)}
.scanComponents(classPaths)
.initExtensions.start()
def getActorMonitorBean(actorName: String, att: String) =
Try {
ManagementFactory.getPlatformMBeanServer.getAttribute(getObjName(actorName), att).asInstanceOf[String]
} .toOption
def getActorMonitorConfigBean(att: String) =
Try {
val o = new ObjectName(prefix(boot.actorSystem) + "org.squbs.unicomplex:type=ActorMonitor")
ManagementFactory.getPlatformMBeanServer.getAttribute(o, att).asInstanceOf[Int]
} .toOption
def getObjName(name: String) = new ObjectName(prefix(boot.actorSystem) + ActorMonitorBean.Pattern + name)
}
class ActorMonitorSpec extends TestKit(ActorMonitorSpec.boot.actorSystem) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll
with AsyncAssertions with LazyLogging {
import org.squbs.testkit.Timeouts._
import system.dispatcher
override def beforeAll() {
// Make sure all actors are indeed alive.
val idFuture1 = (system.actorSelection("/user/TestCube/TestActor") ? Identify(None)).mapTo[ActorIdentity]
val idFuture2 = (system.actorSelection("/user/TestCube/TestActorWithRoute") ? Identify(None)).mapTo[ActorIdentity]
val idFuture3 = (system.actorSelection("/user/TestCube/TestActorWithRoute/$a") ? Identify(None)).mapTo[ActorIdentity]
val idFuture4 = (system.actorSelection("/user/TestCube/TestActor1") ? Identify(None)).mapTo[ActorIdentity]
val futures = Future.sequence(Seq(idFuture1, idFuture2, idFuture3, idFuture4))
val idList = Await.result(futures, awaitMax)
idList foreach {
case ActorIdentity(_, Some(actor)) => logger.info(s"beforeAll identity: $actor")
case other => logger.warn(s"beforeAll invalid identity: $other")
}
}
override def afterAll() {
Unicomplex(system).uniActor ! GracefulStop
}
"ActorMonitor" must {
"0.0) Register all necessary base actors and have an up-to-date count" in {
awaitAssert({
import ActorMonitorSpec.{getActorMonitorBean, getActorMonitorConfigBean}
getActorMonitorBean("system", "Actor") should be (Some("Actor[akka://ActorMonitorSpec/system]"))
getActorMonitorBean("user", "Actor") should be (Some("Actor[akka://ActorMonitorSpec/user]"))
getActorMonitorBean("system/deadLetterListener", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/system/deadLetterListener#")
getActorMonitorBean("user/unicomplex", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/unicomplex#")
getActorMonitorBean("user/ActorMonitorCube", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/ActorMonitorCube#")
getActorMonitorBean("user/squbs-actormonitor", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/squbs-actormonitor#")
/*
Note: The following actor beans are just checked in the other tests below, no need to repeat:
1. user/TestCube
2. user/TestCube/TestActor
3. user/TestCube/TestActorWithRoute
4. user/TestCube/TestActorWithRoute/$a
5. user/TestCube/TestActor1
We just check the count to make sure we have adequate beans registered.
*/
val cfgBeanCount = getActorMonitorConfigBean("Count").getOrElse(-100)
if (cfgBeanCount < 11) {
system.actorSelection("/user/squbs-actormonitor") ! "refresh"
logger.warn("Did not register all relevant actors just yet. Refreshing...")
}
cfgBeanCount should be >= 11
}, max = awaitMax, interval = 2 seconds)
}
"1.0) getMailBoxSize of unicomplex" in {
ActorMonitorSpec.getActorMonitorBean("user/unicomplex", "MailBoxSize") should be (Some("0"))
}
"1.1) getActor of TestCube/TestActor" in {
awaitAssert(
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActor#"),
max = awaitMax)
}
"2.1) getClassName of TestCube/TestActor" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "ClassName")
bean should be (Some("org.squbs.actormonitor.testcube.TestActor"))
}
"2.2) getRouteConfig of TestCube/TestActor" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "RouteConfig") should be (Some("NoRouter"))
}
"2.3) getParent of TestCube/TestActor" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "Parent")
bean shouldBe defined
bean getOrElse "" should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube#")
}
"2.4) getChildren of TestCube/TestActor" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "Children") should be (Some(""))
}
"2.5) getDispatcher of TestCube/TestActor" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "Dispatcher")
bean should be (Some("akka.actor.default-dispatcher"))
}
"2.6) getMailBoxSize of TestCube/TestActor" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "MailBoxSize") should be (Some("0"))
}
"3.0) getActor of TestCube" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube", "Actor")
bean shouldBe defined
bean getOrElse "" should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube#")
}
"3.1) check ActorBean ClassName of TestCube" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube", "ClassName")
bean should be (Some("org.squbs.unicomplex.CubeSupervisor"))
}
"3.2) getRouteConfig of TestCube" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube", "RouteConfig") should be (Some("NoRouter"))
}
"3.3) getParent of TestCube" in {
ActorMonitorSpec.
getActorMonitorBean("user/TestCube", "Parent") should be (Some("Actor[akka://ActorMonitorSpec/user]"))
}
"3.4) getChildren of TestCube" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube", "Children")
bean getOrElse "" should include ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActor#")
bean getOrElse "" should include ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActorWithRoute#")
}
"3.5) getDispatcher of TestCube" in {
ActorMonitorSpec.
getActorMonitorBean("user/TestCube", "Dispatcher") should be (Some("akka.actor.default-dispatcher"))
}
"3.6) getMailBoxSize of TestCube" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube", "MailBoxSize") should be (Some("0"))
}
"4.0) getActor of TestCube/TestActorWithRoute" in {
awaitAssert (
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActorWithRoute#"),
max = awaitMax)
}
"4.1) getClassName of TestCube/TestActorWithRoute" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute", "ClassName")
bean should be (Some("akka.routing.RouterActor"))
}
"4.2) getRouteConfig of TestCube/TestActorWithRoute" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute", "RouteConfig")
bean should be (Option("RoundRobinPool(1,Some(DefaultResizer(1,10,1,0.2,0.3,0.1,10))," +
"OneForOneStrategy(-1,Duration.Inf,true),akka.actor.default-dispatcher,false)"))
}
"4.3) getParent of TestCube/TestActorWithRoute" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute", "Parent")
bean getOrElse "" should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube#")
}
"4.4) getChildren of TestCube/TestActorWithRoute" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute", "Children")
bean getOrElse "" should include ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActorWithRoute/$a#")
}
"4.5) getDispatcher of TestCube/TestActorWithRoute" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "Dispatcher")
bean should be (Some("akka.actor.default-dispatcher"))
}
"4.6) getMailBoxSize of TestCube/TestActorWithRoute" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor", "MailBoxSize") should be (Some("0"))
}
"5.0) getActor of TestCube/TestActorWithRoute/$a" in {
awaitAssert(
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "Actor")
.getOrElse("") should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActorWithRoute/$a#"),
max = awaitMax)
}
"5.1) getClassName of TestCube/TestActorWithRoute/$a" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "ClassName")
bean should be (Some("org.squbs.actormonitor.testcube.TestActorWithRoute"))
}
"5.2) getRouteConfig of TestCube/TestActorWithRoute/$a" in {
ActorMonitorSpec.
getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "RouteConfig") should be (Some("NoRouter"))
}
"5.3) getParent of TestCube/TestActorWithRoute/$a" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "Parent") getOrElse ""
bean should startWith ("Actor[akka://ActorMonitorSpec/user/TestCube/TestActorWithRoute#")
}
"5.4) getChildren of TestCube/TestActorWithRoute/$a" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "Children") should be (Some(""))
}
"5.5) getDispatcher of TestCube/TestActorWithRoute/$a" in {
val bean = ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "Dispatcher")
bean should be (Some("blocking-dispatcher"))
}
"5.6) getMailBoxSize of TestCube/TestActorWithRoute/$a" in {
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActorWithRoute/$a", "MailBoxSize") should be (Some("0"))
}
"6.1) getBean after actor has been stop" in {
awaitAssert(ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor1", "Actor") shouldBe defined,
max = awaitMax)
import ActorMonitorSpec._
val originalNum = getActorMonitorConfigBean("Count").getOrElse(-100)
originalNum should be > 0
system.actorSelection("/user/TestCube/TestActor1") ! PoisonPill
awaitAssert({
ActorMonitorSpec.getActorMonitorBean("user/TestCube/TestActor1", "Actor") shouldBe 'empty
getActorMonitorConfigBean("Count") should contain (originalNum - 1)
}, max = awaitMax)
}
"7.0) ActorMonitorConfigBean" in {
import ActorMonitorSpec._
getActorMonitorConfigBean("MaxCount") should be (Some(500))
getActorMonitorConfigBean("MaxChildrenDisplay") should be (Some(20))
}
}
}
| tutufool/squbs | squbs-actormonitor/src/test/scala/org/squbs/actormonitor/ActorMonitorSpec.scala | Scala | apache-2.0 | 12,991 |
package org.jetbrains.plugins.scala.autoImport.quickFix
import com.intellij.psi._
import org.jetbrains.annotations.Nls
import org.jetbrains.plugins.scala.extensions.{PsiClassExt, PsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.ScPackage
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScNamedElement, ScPackaging}
sealed trait ElementToImport {
protected type E <: PsiNamedElement
def element: E
def qualifiedName: String
final def presentation: String = s"<html><body>$presentationBody</body></html>"
def presentationBody: String = qualifiedName
final def name: String = element.name
final def isValid: Boolean = element.isValid
}
object ElementToImport {
@Nls
def messageByType(toImport: Seq[ElementToImport])
(@Nls classes: String,
@Nls packages: String,
@Nls mixed: String): String =
if (toImport.forall(_.element.isInstanceOf[PsiClass]))
classes
else if (toImport.forall(_.element.isInstanceOf[PsiPackage]))
packages
else
mixed
}
final case class ClassToImport(override val element: PsiClass) extends ElementToImport {
override protected type E = PsiClass
override def qualifiedName: String = element.qualifiedName
override def presentationBody: String = Presentation.withDeprecation(element)
}
final case class ExtensionMethodToImport(override val element: ScFunction,
owner: PsiElement,
pathToOwner: String) extends ElementToImport {
override protected type E = ScFunction
override def qualifiedName: String = pathToOwner + "." + name
override def presentationBody: String =
Presentation.withDeprecations(element, owner, pathToOwner)
}
object ExtensionMethodToImport {
def apply(element: ScFunction, owner: ScPackaging): ExtensionMethodToImport =
new ExtensionMethodToImport(element, owner, owner.fqn)
def apply(element: ScFunction, owner: PsiClass): ExtensionMethodToImport =
new ExtensionMethodToImport(element, owner, owner.qualifiedName)
}
final case class MemberToImport(override val element: PsiNamedElement,
owner: PsiNamedElement,
pathToOwner: String) extends ElementToImport {
override protected type E = PsiNamedElement
override def qualifiedName: String = pathToOwner + "." + name
override def presentationBody: String =
Presentation.withDeprecations(element, owner, pathToOwner)
}
object MemberToImport {
def apply(element: PsiNamedElement, owner: PsiClass): MemberToImport =
MemberToImport(element, owner, owner.qualifiedName)
}
final case class PrefixPackageToImport(override val element: ScPackage) extends ElementToImport {
override protected type E = ScPackage
override def qualifiedName: String = element.getQualifiedName
}
final case class ImplicitToImport(found: FoundImplicit) extends ElementToImport {
protected type E = ScNamedElement
override def element: ScNamedElement = found.instance.named
override def qualifiedName: String = found.instance.qualifiedName
override def presentationBody: String =
Presentation.withDeprecations(element, found.instance.owner, found.instance.pathToOwner)
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/autoImport/quickFix/ElementToImport.scala | Scala | apache-2.0 | 3,364 |
package edu.uchicago.cs.encsel.dataset.feature
import org.junit.Assert._
import org.junit.Test
/**
* Created by harper on 4/27/17.
*/
class FilterTest {
@Test
def testFirstNFilter: Unit = {
val input = (0 to 1000).map(_.toString).toIterator
val filtered = Filter.firstNFilter(50)(input).toArray
assertEquals(50, filtered.size)
for (i <- 0 until 50) {
assertEquals(filtered(i), i.toString)
}
}
@Test
def testIidSamplingFilter: Unit = {
val input = (0 to 5000).map(_.toString).toIterator
val filtered = Filter.iidSamplingFilter(0.1)(input).toArray
assertTrue(400 <= filtered.size)
assertTrue(filtered.size <= 600)
filtered.foreach(i => {
assertTrue(i.toInt >= 0)
assertTrue(i.toInt <= 5000)
})
val set = filtered.toSet
assertEquals(set.size, filtered.size)
}
@Test
def testSizeFilter: Unit = {
val input1 = (0 until 20).map(i => "abcde").toIterator
val input2 = (0 until 1000).map(i => "abcde").toIterator
val filtered1 = Filter.sizeFilter(500)(input1).toArray
val filtered2 = Filter.sizeFilter(500)(input2).toArray
assertEquals(20, filtered1.size)
assertEquals(100, filtered2.size)
}
@Test
def testMinSizeFilter: Unit = {
val input1 = (0 until 20).map(i => "abcde").toIterator
val filtered1 = Filter.minSizeFilter(100, 0.1)(input1).toArray
assertEquals(20, filtered1.size)
val input2 = (0 until 1000).map(i => "a").toIterator
val filtered2 = Filter.minSizeFilter(100, 0.1)(input2).toArray
assertTrue(170 <= filtered2.size)
assertTrue(filtered2.size <= 210)
}
}
| harperjiang/enc-selector | src/test/scala/edu/uchicago/cs/encsel/dataset/feature/FilterTest.scala | Scala | apache-2.0 | 1,621 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.view.parameters
import com.esofthead.mycollab.module.project.domain.Project
import com.esofthead.mycollab.vaadin.mvp.ScreenData
/**
* @author MyCollab Ltd.
* @since 5.0.3
*/
object ProjectScreenData {
class Goto(params: Integer) extends ScreenData[Integer](params) {}
class Edit(params: Project) extends ScreenData[Project](params) {}
class GotoTagList(params: Object) extends ScreenData[Object](params) {}
class SearchItem(params: String) extends ScreenData[String](params) {}
}
| uniteddiversity/mycollab | mycollab-web/src/main/scala/com.esofthead.mycollab.module.project.view.parameters/ProjectScreenData.scala | Scala | agpl-3.0 | 1,239 |
package downstream
import japgolly.microlibs.testutil.TestUtil._
import japgolly.microlibs.testutil.TestUtilInternals._
import scala.Console._
import sourcecode.Line
import utest._
object JsOutputTest extends TestSuite {
import Props._
private def assertLegalValue(str: String)(legal: String*)(implicit l: Line): Unit =
if (!legal.contains(str))
fail(s"Illegal value: '$str'. Legal values are: " + legal.sorted.mkString("", ", ", "."))
private def contentTest(prop: String, legalValuesByComma: String)
(propValueToSubstr: (String, String)*)
(implicit l: Line): Unit = {
assertLegalValue(prop)(legalValuesByComma.split(',').toSeq: _*)
val expectToSubstr =
propValueToSubstr.map { case (pv, substr) =>
val expect = (pv != null) && prop.matches(pv)
expect -> substr
}
contentTest(expectToSubstr: _*)
}
private def never: String =
null
private def fgrep(term: String): Unit = {
println(s"> fgrep '$term'")
content
.linesIterator
.zipWithIndex
.filter(_._1.contains(term))
.map { case (s, l) => s"$GREEN$l:$RESET " + s.replace(term, MAGENTA_B + WHITE + term + RESET) }
.foreach(println)
}
private def contentTest(expectToSubstr: (Boolean, String)*)(implicit l: Line): Unit = {
System.out.flush()
System.err.flush()
var errors = List.empty[String]
for ((expect, substr) <- expectToSubstr) {
val actual = content.contains(substr)
val pass = actual == expect
val result = if (pass) s"${GREEN}pass$RESET" else s"${RED_B}${WHITE}FAIL$RESET"
val should = if (expect) "should" else "shouldn't"
val strCol = if (expect) (GREEN + BRIGHT_GREEN) else BRIGHT_BLACK
println(s"[$result] JS $should contain $strCol$substr$RESET")
if (!pass) errors ::= s"JS $should contain $substr"
}
System.out.flush()
if (errors.nonEmpty) {
for ((_, substr) <- expectToSubstr)
fgrep(substr)
fail(errors.sorted.mkString(", "))
}
}
override def tests = Tests {
"size" - "%,d bytes".format(content.length)
"carrot" - {
val t = if (dsCfg1) "custom" else compnameAll
contentTest(t, "allow,blank,custom")(
"allow" -> "\"CarRot!\"",
"custom" -> "\"CarRot!-MOD\"",
)
}
"pumpkin" - {
val t = if (dsCfg1) "custom" else if (compnameAll == "blank") "blank" else compnameAuto
contentTest(t, "full,short,blank,custom")(
"short" -> "\"Pumpkin\"",
"full" -> "\"downstream.Pumpkin\"",
"custom" -> "\"downstream.Pumpkin-AUTO-MOD\"",
never -> "automaticComponentName__T__T"
)
}
"ReusabilityOverlay" - {
val expect = fastOptJS && reusabilityDev.contains("overlay") && configClass.isEmpty
contentTest(expect -> "ReusabilityOverlay")
}
"devAssertWarn" - contentTest(
true -> "http://some.url",
fastOptJS -> "Consider using BaseUrl.fromWindowOrigin",
)
}
}
| japgolly/scalajs-react | downstream-tests/jvm/src/test/scala/downstream/JsOutputTest.scala | Scala | apache-2.0 | 3,031 |
package jp.ijufumi.openreports.vo
import scala.beans.BeanProperty
case class GroupInfo(@BeanProperty groupId: Long,
@BeanProperty groupName: String,
@BeanProperty versions: Long,
@BeanProperty isBelong: Boolean) {
def this(groupId: Long, groupName: String, versions: Long) = {
this(groupId, groupName, versions, false)
}
}
object GroupInfo {
def apply(groupId: Long, groupName: String, versions: Long): GroupInfo = {
GroupInfo(groupId, groupName, versions, false)
}
}
| ijufumi/openreports_scala | src/main/scala/jp/ijufumi/openreports/vo/GroupInfo.scala | Scala | mit | 551 |
package Adventure.o1.adventure
/** The class `Action` represents actions that a player may take in a text adventure game.
* `Action` objects are constructed on the basis of textual commands and are, in effect,
* parsers for such commands. An action object is immutable after creation.
*
* @param input a textual in-game command such as "go east" or "rest"
*/
class Action(input: String) {
private val commandText = input.trim.toLowerCase
private val verb = commandText.takeWhile( _ != ' ' )
private val modifiers = commandText.drop(verb.length).trim
/** Causes the given player to take the action represented by this object, assuming
* that the command was understood. Returns a description of what happened as a result
* of the action (such as "You go west."). The description is returned in an `Option`
* wrapper; if the command was not recognized, `None` is returned. */
def execute(actor: Player) = {
if (this.verb == "go") {
Some(actor.go(this.modifiers))
} else if (this.verb == "rest") {
Some(actor.rest())
} else if (this.verb == "xyzzy") {
Some("The grue tastes yummy.")
} else if (this.verb == "get") {
Some(actor.get(modifiers))
} else if (this.verb == "drop") {
Some(actor.drop(modifiers))
} else if (this.verb == "examine") {
Some(actor.examine(modifiers))
} else if (this.verb == "inventory") {
Some(actor.inventory)
}
else if (this.verb == "quit") {
Some(actor.quit())
} else {
None
}
}
/** Returns a textual description of the action object, for debugging purposes. */
override def toString = this.verb + " (modifiers: " + this.modifiers + ")"
}
| Ronin748/MOOC_s15 | src/Adventure/o1/adventure/Action.scala | Scala | gpl-2.0 | 1,763 |
package system.cell.processor.route.actors
import com.actors.CustomActor
import system.ontologies.messages._
import scala.collection.mutable
/**
* This actor manages a cache of the Routes. Routes are valid for a time of X seconds
*
*/
class CacheManager(val cacheKeepAlive: Long) extends CustomActor {
private val routeCache: mutable.Map[(String, String), List[RoomID]] = mutable.HashMap.empty
private val routesTimelife: mutable.Map[(String, String), Long] = mutable.HashMap.empty
override def receive: Receive = {
case msg@RouteInfo(req@RouteRequest(_, from, to, _), _) =>
log.info("Evaluating cache...")
sender ! (
if (routesTimelife.get((from.name, to.name)).nonEmpty
&& System.currentTimeMillis - routesTimelife((from.name, to.name)) < cacheKeepAlive) {
log.info("Match found in Cache...")
RouteResponse(req, routeCache(from.name, to.name))
} else if (routesTimelife.get((to.name, from.name)).nonEmpty
&& System.currentTimeMillis - routesTimelife((to.name, from.name)) < cacheKeepAlive) {
log.info("Match found in Cache...")
RouteResponse(req, routeCache(from.name, to.name).reverse)
} else {
log.info("No match found in Cache...")
msg
})
case RouteResponse(RouteRequest(_, from, to, _), route) =>
log.info("Caching new route... ")
routeCache += (from.name, to.name) -> route
routesTimelife += (from.name, to.name) -> System.currentTimeMillis
case _ =>
}
}
| albertogiunta/arianna | src/main/scala/system/cell/processor/route/actors/CacheManager.scala | Scala | gpl-3.0 | 1,758 |
/*
* Copyright (c) 2016. StulSoft, Yuriy Stul
*/
package com.stulsoft.ysps.pimplicit
import com.stulsoft.ysps.pimplicit.StringUtils._
import com.typesafe.scalalogging.LazyLogging
/**
* Created by Yuriy Stul on 11/8/2016.
*/
object ImplicitClassTest extends App with LazyLogging {
logger.info("Start")
logger.info(""""Hey".increment: {}""", "Hey".increment)
logger.info(""""Hey".decrement: {}""", "Hey".decrement)
logger.info(""""Hey".replacement: {}""", "Hey".replacement('X'))
logger.info("End")
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/pimplicit/ImplicitClassTest.scala | Scala | mit | 518 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.directdictionary
import java.sql.Date
import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.HiveContext
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
/**
* Test Class for detailed query on timestamp datatypes
*
*
*/
class DateDataTypeNullDataTest extends QueryTest with BeforeAndAfterAll {
var hiveContext: HiveContext = _
override def beforeAll {
try {
sql(
"""CREATE TABLE IF NOT EXISTS timestampTyeNullData
(ID Int, dateField date, country String,
name String, phonetype String, serialname String, salary Int)
STORED BY 'org.apache.carbondata.format'"""
)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy/MM/dd")
val csvFilePath = s"$resourcesPath/datasamplenull.csv"
sql("LOAD DATA LOCAL INPATH '" + csvFilePath + "' INTO TABLE timestampTyeNullData").collect();
} catch {
case x: Throwable =>
x.printStackTrace()
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
}
}
test("SELECT max(dateField) FROM timestampTyeNullData where dateField is not null") {
checkAnswer(
sql("SELECT max(dateField) FROM timestampTyeNullData where dateField is not null"),
Seq(Row(Date.valueOf("2015-07-23"))
)
)
}
test("SELECT * FROM timestampTyeNullData where dateField is null") {
checkAnswer(
sql("SELECT dateField FROM timestampTyeNullData where dateField is null"),
Seq(Row(null)
))
}
override def afterAll {
sql("drop table timestampTyeNullData")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
CarbonProperties.getInstance().addProperty("carbon.direct.dictionary", "false")
}
} | sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/directdictionary/DateDataTypeNullDataTest.scala | Scala | apache-2.0 | 2,984 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api.util
/**
* Credentials to authenticate with.
*
* @param identifier The unique identifier to authenticate with.
* @param password The password to authenticate with.
*/
case class Credentials(identifier: String, password: String)
| mohiva/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/util/Credentials.scala | Scala | apache-2.0 | 905 |
package ee.cone.c4gate
import java.time.Instant
import java.util.UUID
import ee.cone.c4actor.QProtocol.S_Firstborn
import ee.cone.c4actor.Types.SrcId
import ee.cone.c4actor._
import ee.cone.c4assemble.Types.{Each, Values}
import ee.cone.c4assemble._
import ee.cone.c4di.{c4, c4multi}
import ee.cone.c4gate.ActorAccessProtocol.C_ActorAccessKey
import ee.cone.c4gate.AvailabilitySettingProtocol.C_AvailabilitySetting
import ee.cone.c4gate.HttpProtocol._
import ee.cone.c4proto.{Id, protocol}
import okio.ByteString
@protocol("ActorAccessApp") object ActorAccessProtocol {
@Id(0x006A) case class C_ActorAccessKey(
@Id(0x006B) srcId: String,
@Id(0x006C) value: String
)
}
@c4assemble("ActorAccessApp") class ActorAccessAssembleBase(
actorAccessCreateTxFactory: ActorAccessCreateTxFactory,
){
def join(
key: SrcId,
first: Each[S_Firstborn],
accessKeys: Values[C_ActorAccessKey]
): Values[(SrcId, TxTransform)] =
if (accessKeys.nonEmpty) Nil
else List(WithPK(actorAccessCreateTxFactory.create(s"ActorAccessCreateTx-${first.srcId}", first)))
}
@c4multi("ActorAccessApp") final case class ActorAccessCreateTx(srcId: SrcId, first: S_Firstborn)(
txAdd: LTxAdd,
) extends TxTransform {
def transform(local: Context): Context =
txAdd.add(LEvent.update(C_ActorAccessKey(first.srcId, s"${UUID.randomUUID}")))(local)
}
/*
@c4assemble("PrometheusApp") class PrometheusAssembleBase(compressor: PublishFullCompressor, metricsFactories: List[MetricsFactory]) {
def join(
key: SrcId,
first: Each[S_Firstborn],
accessKey: Each[C_ActorAccessKey]
): Values[(SrcId,TxTransform)] = {
val path = s"/${accessKey.value}-metrics"
println(s"Prometheus metrics at $path")
List(WithPK(PrometheusTx(path)(compressor.value, metricsFactories)))
}
}
case class PrometheusTx(path: String)(compressor: Compressor, metricsFactories: List[MetricsFactory]) extends TxTransform {
def transform(local: Context): Context = {
val time = System.currentTimeMillis
val metrics = metricsFactories.flatMap(_.measure(local))
val bodyStr = PrometheusMetricBuilder.withTimeStamp(metrics, time)
val body = compressor.compress(okio.ByteString.encodeUtf8(bodyStr))
val headers = List(N_Header("content-encoding", compressor.name))
Monitoring.publish(time, 15000, 5000, path, headers, body)(local)
}
}*/
@c4("AvailabilityApp") final class Monitoring(
publisher: Publisher,
txAdd: LTxAdd,
) {
def publish(
time: Long, updatePeriod: Long, timeout: Long,
path: String, headers: List[N_Header], body: okio.ByteString
): Context => Context = {
val nextTime = time + updatePeriod
val pubEvents = publisher.publish(ByPathHttpPublication(path, headers, body), _+updatePeriod+timeout)
txAdd.add(pubEvents).andThen(SleepUntilKey.set(Instant.ofEpochMilli(nextTime)))
}
}
@c4assemble("AvailabilityApp") class AvailabilityAssembleBase(updateDef: Long = 3000, timeoutDef: Long = 3000)(
monitoring: Monitoring
) {
def join(
key: SrcId,
first: Each[S_Firstborn],
settings: Values[C_AvailabilitySetting]
): Values[(SrcId, TxTransform)] = {
val (updatePeriod, timeout) = Single.option(settings.map(s => s.updatePeriod -> s.timeout)).getOrElse((updateDef, timeoutDef))
List(WithPK(AvailabilityTx(s"AvailabilityTx-${first.srcId}", updatePeriod, timeout)(monitoring)))
}
}
@protocol("AvailabilityApp") object AvailabilitySettingProtocol {
@Id(0x00f0) case class C_AvailabilitySetting(
@Id(0x0001) srcId: String,
@Id(0x0002) updatePeriod: Long,
@Id(0x0003) timeout: Long
)
}
case class AvailabilityTx(srcId: SrcId, updatePeriod: Long, timeout: Long)(
monitoring: Monitoring
) extends TxTransform {
def transform(local: Context): Context =
monitoring.publish(
System.currentTimeMillis, updatePeriod, timeout,
"/availability", Nil, ByteString.EMPTY
)(local)
} | conecenter/c4proto | base_lib/src/main/scala/ee/cone/c4gate/MonitoringImpl.scala | Scala | apache-2.0 | 3,898 |
// These are meant to be typed into the REPL. You can also run
// scala -Xnojline < repl-session.scala to run them all at once.
val f: PartialFunction[Char, Int] = { case '+' => 1 ; case '-' => -1 }
f('-')
f.isDefinedAt('0')
f('0') // Throws MatchError
"-3+4".collect { case '+' => 1 ; case '-' => -1 }
| P7h/ScalaPlayground | Scala for the Impatient/examples/ch14/sec17/repl-session.scala | Scala | apache-2.0 | 307 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import scala.reflect.ClassTag
import org.scalacheck.Gen
import org.scalactic.TripleEqualsSupport.Spread
import org.scalatest.exceptions.TestFailedException
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow}
import org.apache.spark.sql.catalyst.analysis.{ResolveTimeZone, SimpleAnalyzer}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.optimizer.SimpleTestOptimizer
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.plans.logical.{OneRowRelation, Project}
import org.apache.spark.sql.catalyst.util.{ArrayData, MapData}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* A few helper functions for expression evaluation testing. Mixin this trait to use them.
*/
trait ExpressionEvalHelper extends GeneratorDrivenPropertyChecks with PlanTestBase {
self: SparkFunSuite =>
protected def create_row(values: Any*): InternalRow = {
InternalRow.fromSeq(values.map(CatalystTypeConverters.convertToCatalyst))
}
private def prepareEvaluation(expression: Expression): Expression = {
val serializer = new JavaSerializer(new SparkConf()).newInstance
val resolver = ResolveTimeZone(new SQLConf)
resolver.resolveTimeZones(serializer.deserialize(serializer.serialize(expression)))
}
protected def checkEvaluation(
expression: => Expression, expected: Any, inputRow: InternalRow = EmptyRow): Unit = {
// Make it as method to obtain fresh expression everytime.
def expr = prepareEvaluation(expression)
val catalystValue = CatalystTypeConverters.convertToCatalyst(expected)
checkEvaluationWithoutCodegen(expr, catalystValue, inputRow)
checkEvaluationWithGeneratedMutableProjection(expr, catalystValue, inputRow)
if (GenerateUnsafeProjection.canSupport(expr.dataType)) {
checkEvaluationWithUnsafeProjection(expr, catalystValue, inputRow)
}
checkEvaluationWithOptimization(expr, catalystValue, inputRow)
}
/**
* Check the equality between result of expression and expected value, it will handle
* Array[Byte], Spread[Double], MapData and Row.
*/
protected def checkResult(result: Any, expected: Any, exprDataType: DataType): Boolean = {
val dataType = UserDefinedType.sqlType(exprDataType)
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
case (result: Double, expected: Spread[Double @unchecked]) =>
expected.asInstanceOf[Spread[Double]].isWithin(result)
case (result: ArrayData, expected: ArrayData) =>
result.numElements == expected.numElements && {
val et = dataType.asInstanceOf[ArrayType].elementType
var isSame = true
var i = 0
while (isSame && i < result.numElements) {
isSame = checkResult(result.get(i, et), expected.get(i, et), et)
i += 1
}
isSame
}
case (result: MapData, expected: MapData) =>
val kt = dataType.asInstanceOf[MapType].keyType
val vt = dataType.asInstanceOf[MapType].valueType
checkResult(result.keyArray, expected.keyArray, ArrayType(kt)) &&
checkResult(result.valueArray, expected.valueArray, ArrayType(vt))
case (result: Double, expected: Double) =>
if (expected.isNaN) result.isNaN else expected == result
case (result: Float, expected: Float) =>
if (expected.isNaN) result.isNaN else expected == result
case (result: UnsafeRow, expected: GenericInternalRow) =>
val structType = exprDataType.asInstanceOf[StructType]
result.toSeq(structType) == expected.toSeq(structType)
case (result: Row, expected: InternalRow) => result.toSeq == expected.toSeq(result.schema)
case _ =>
result == expected
}
}
protected def checkExceptionInExpression[T <: Throwable : ClassTag](
expression: => Expression,
expectedErrMsg: String): Unit = {
checkExceptionInExpression[T](expression, InternalRow.empty, expectedErrMsg)
}
protected def checkExceptionInExpression[T <: Throwable : ClassTag](
expression: => Expression,
inputRow: InternalRow,
expectedErrMsg: String): Unit = {
def checkException(eval: => Unit, testMode: String): Unit = {
withClue(s"($testMode)") {
val errMsg = intercept[T] {
eval
}.getMessage
if (!errMsg.contains(expectedErrMsg)) {
fail(s"Expected error message is `$expectedErrMsg`, but `$errMsg` found")
}
}
}
// Make it as method to obtain fresh expression everytime.
def expr = prepareEvaluation(expression)
checkException(evaluateWithoutCodegen(expr, inputRow), "non-codegen mode")
checkException(evaluateWithGeneratedMutableProjection(expr, inputRow), "codegen mode")
if (GenerateUnsafeProjection.canSupport(expr.dataType)) {
checkException(evaluateWithUnsafeProjection(expr, inputRow), "unsafe mode")
}
}
protected def evaluateWithoutCodegen(
expression: Expression, inputRow: InternalRow = EmptyRow): Any = {
expression.foreach {
case n: Nondeterministic => n.initialize(0)
case _ =>
}
expression.eval(inputRow)
}
protected def generateProject(
generator: => Projection,
expression: Expression): Projection = {
try {
generator
} catch {
case e: Throwable =>
fail(
s"""
|Code generation of $expression failed:
|$e
|${Utils.exceptionString(e)}
""".stripMargin)
}
}
protected def checkEvaluationWithoutCodegen(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val actual = try evaluateWithoutCodegen(expression, inputRow) catch {
case e: Exception => fail(s"Exception evaluating $expression", e)
}
if (!checkResult(actual, expected, expression.dataType)) {
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
fail(s"Incorrect evaluation (codegen off): $expression, " +
s"actual: $actual, " +
s"expected: $expected$input")
}
}
protected def checkEvaluationWithGeneratedMutableProjection(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val actual = evaluateWithGeneratedMutableProjection(expression, inputRow)
if (!checkResult(actual, expected, expression.dataType)) {
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
fail(s"Incorrect evaluation: $expression, actual: $actual, expected: $expected$input")
}
}
protected def evaluateWithGeneratedMutableProjection(
expression: Expression,
inputRow: InternalRow = EmptyRow): Any = {
val plan = generateProject(
GenerateMutableProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
plan.initialize(0)
plan(inputRow).get(0, expression.dataType)
}
protected def checkEvaluationWithUnsafeProjection(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val modes = Seq(CodegenObjectFactoryMode.CODEGEN_ONLY, CodegenObjectFactoryMode.NO_CODEGEN)
for (fallbackMode <- modes) {
withSQLConf(SQLConf.CODEGEN_FACTORY_MODE.key -> fallbackMode.toString) {
val unsafeRow = evaluateWithUnsafeProjection(expression, inputRow)
val input = if (inputRow == EmptyRow) "" else s", input: $inputRow"
if (expected == null) {
if (!unsafeRow.isNullAt(0)) {
val expectedRow = InternalRow(expected, expected)
fail("Incorrect evaluation in unsafe mode: " +
s"$expression, actual: $unsafeRow, expected: $expectedRow$input")
}
} else {
val lit = InternalRow(expected, expected)
val expectedRow =
UnsafeProjection.create(Array(expression.dataType, expression.dataType)).apply(lit)
if (unsafeRow != expectedRow) {
fail("Incorrect evaluation in unsafe mode: " +
s"$expression, actual: $unsafeRow, expected: $expectedRow$input")
}
}
}
}
}
protected def evaluateWithUnsafeProjection(
expression: Expression,
inputRow: InternalRow = EmptyRow): InternalRow = {
// SPARK-16489 Explicitly doing code generation twice so code gen will fail if
// some expression is reusing variable names across different instances.
// This behavior is tested in ExpressionEvalHelperSuite.
val plan = generateProject(
UnsafeProjection.create(
Alias(expression, s"Optimized($expression)1")() ::
Alias(expression, s"Optimized($expression)2")() :: Nil),
expression)
plan.initialize(0)
plan(inputRow)
}
protected def checkEvaluationWithOptimization(
expression: Expression,
expected: Any,
inputRow: InternalRow = EmptyRow): Unit = {
val plan = Project(Alias(expression, s"Optimized($expression)")() :: Nil, OneRowRelation())
// We should analyze the plan first, otherwise we possibly optimize an unresolved plan.
val analyzedPlan = SimpleAnalyzer.execute(plan)
val optimizedPlan = SimpleTestOptimizer.execute(analyzedPlan)
checkEvaluationWithoutCodegen(optimizedPlan.expressions.head, expected, inputRow)
}
protected def checkDoubleEvaluation(
expression: => Expression,
expected: Spread[Double],
inputRow: InternalRow = EmptyRow): Unit = {
checkEvaluationWithoutCodegen(expression, expected)
checkEvaluationWithGeneratedMutableProjection(expression, expected)
checkEvaluationWithOptimization(expression, expected)
var plan = generateProject(
GenerateMutableProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
plan.initialize(0)
var actual = plan(inputRow).get(0, expression.dataType)
assert(checkResult(actual, expected, expression.dataType))
plan = generateProject(
GenerateUnsafeProjection.generate(Alias(expression, s"Optimized($expression)")() :: Nil),
expression)
plan.initialize(0)
actual = FromUnsafeProjection(expression.dataType :: Nil)(
plan(inputRow)).get(0, expression.dataType)
assert(checkResult(actual, expected, expression.dataType))
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against unary expressions by feeding them arbitrary literals of `dataType`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: Expression => Expression,
dataType: DataType): Unit = {
forAll (LiteralGenerator.randomGen(dataType)) { (l: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against binary expressions by feeding them arbitrary literals of `dataType1`
* and `dataType2`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: (Expression, Expression) => Expression,
dataType1: DataType,
dataType2: DataType): Unit = {
forAll (
LiteralGenerator.randomGen(dataType1),
LiteralGenerator.randomGen(dataType2)
) { (l1: Literal, l2: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l1, l2))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against ternary expressions by feeding them arbitrary literals of `dataType1`,
* `dataType2` and `dataType3`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: (Expression, Expression, Expression) => Expression,
dataType1: DataType,
dataType2: DataType,
dataType3: DataType): Unit = {
forAll (
LiteralGenerator.randomGen(dataType1),
LiteralGenerator.randomGen(dataType2),
LiteralGenerator.randomGen(dataType3)
) { (l1: Literal, l2: Literal, l3: Literal) =>
cmpInterpretWithCodegen(EmptyRow, c(l1, l2, l3))
}
}
/**
* Test evaluation results between Interpreted mode and Codegen mode, making sure we have
* consistent result regardless of the evaluation method we use.
*
* This method test against expressions take Seq[Expression] as input by feeding them
* arbitrary length Seq of arbitrary literal of `dataType`.
*/
def checkConsistencyBetweenInterpretedAndCodegen(
c: Seq[Expression] => Expression,
dataType: DataType,
minNumElements: Int = 0): Unit = {
forAll (Gen.listOf(LiteralGenerator.randomGen(dataType))) { (literals: Seq[Literal]) =>
whenever(literals.size >= minNumElements) {
cmpInterpretWithCodegen(EmptyRow, c(literals))
}
}
}
private def cmpInterpretWithCodegen(inputRow: InternalRow, expr: Expression): Unit = {
val interpret = try {
evaluateWithoutCodegen(expr, inputRow)
} catch {
case e: Exception => fail(s"Exception evaluating $expr", e)
}
val plan = generateProject(
GenerateMutableProjection.generate(Alias(expr, s"Optimized($expr)")() :: Nil),
expr)
val codegen = plan(inputRow).get(0, expr.dataType)
if (!compareResults(interpret, codegen)) {
fail(s"Incorrect evaluation: $expr, interpret: $interpret, codegen: $codegen")
}
}
/**
* Check the equality between result of expression and expected value, it will handle
* Array[Byte] and Spread[Double].
*/
private[this] def compareResults(result: Any, expected: Any): Boolean = {
(result, expected) match {
case (result: Array[Byte], expected: Array[Byte]) =>
java.util.Arrays.equals(result, expected)
case (result: Double, expected: Double) if result.isNaN && expected.isNaN =>
true
case (result: Double, expected: Double) =>
relativeErrorComparison(result, expected)
case (result: Float, expected: Float) if result.isNaN && expected.isNaN =>
true
case _ => result == expected
}
}
/**
* Private helper function for comparing two values using relative tolerance.
* Note that if x or y is extremely close to zero, i.e., smaller than Double.MinPositiveValue,
* the relative tolerance is meaningless, so the exception will be raised to warn users.
*
* TODO: this duplicates functions in spark.ml.util.TestingUtils.relTol and
* spark.mllib.util.TestingUtils.relTol, they could be moved to common utils sub module for the
* whole spark project which does not depend on other modules. See more detail in discussion:
* https://github.com/apache/spark/pull/15059#issuecomment-246940444
*/
private def relativeErrorComparison(x: Double, y: Double, eps: Double = 1E-8): Boolean = {
val absX = math.abs(x)
val absY = math.abs(y)
val diff = math.abs(x - y)
if (x == y) {
true
} else if (absX < Double.MinPositiveValue || absY < Double.MinPositiveValue) {
throw new TestFailedException(
s"$x or $y is extremely close to zero, so the relative tolerance is meaningless.", 0)
} else {
diff < eps * math.min(absX, absY)
}
}
}
| lxsmnv/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/ExpressionEvalHelper.scala | Scala | apache-2.0 | 16,545 |
object Problem {
def isPentagonal(num: Int): Boolean = {
val foo = (Math.sqrt(1 + 24 * num) + 1.0) / 6.0
foo == foo.toInt
}
def pentagonal(n: Int): Int = n * (3 * n - 1) / 2
def main(args: Array[String]) {
val start = System.currentTimeMillis
val pentagonalNumbers = (1 to 2500).map(pentagonal)
def isOk(x: Int, index: Int): Boolean = isPentagonal(x - pentagonalNumbers(index)) && isPentagonal(x + pentagonalNumbers(index))
val p_k = pentagonalNumbers.zipWithIndex.find(x => (x._2 - 1 to 0 by -1).exists(index => isOk(x._1, index))).get
val p_j_index = (p_k._2 - 1 to 0 by -1).find(index => isOk(p_k._1, index)).get
val p_j = (pentagonal(p_j_index + 1), p_j_index)
println(p_k._1 - p_j._1)
val stop = System.currentTimeMillis
println("Time taken: " + (stop - start) + "ms")
}
}
| Jiri-Kremser/euler | 044/Problem.scala | Scala | gpl-2.0 | 834 |
package workflow
/**
* A PipelineResult is a lazy wrapper around the result of applying a [[Pipeline]] to data.
* Internally it contains the Pipeline's execution plan with data sources inserted,
* and the sink that the Pipeline's output is expected to be produced by.
*
* @param executor The Pipeline's underlying execution plan,
* with the Pipeline's sources inserted into the [[Graph]]
* @param sink The Pipeline's sink
* @tparam T The type of the result.
*/
abstract class PipelineResult[T] private[workflow] (
private[workflow] val executor: GraphExecutor,
private[workflow] val sink: SinkId
) {
private lazy val result: T = executor.execute(sink).get.asInstanceOf[T]
final def get(): T = result
}
| tomerk/keystone | src/main/scala/workflow/PipelineResult.scala | Scala | apache-2.0 | 744 |
package org.jetbrains.plugins.scala
package decompiler
import java.io.IOException
import com.intellij.openapi.vfs.VirtualFile
import scala.annotation.tailrec
/**
* @author ilyas
*/
object ScClsStubBuilderFactory {
def canBeProcessed(file: VirtualFile): Boolean = {
try {
canBeProcessed(file, file.contentsToByteArray())
} catch {
case ex: IOException => false
case u: UnsupportedOperationException => false //why we need to handle this?
}
}
def canBeProcessed(file: VirtualFile, bytes: => Array[Byte]): Boolean = {
val name: String = file.getNameWithoutExtension
if (name.contains("$")) {
val parent: VirtualFile = file.getParent
@tailrec
def checkName(name: String): Boolean = {
val child: VirtualFile = parent.findChild(name + ".class")
if (child != null) {
val res = DecompilerUtil.isScalaFile(child)
if (res) return true //let's handle it separately to avoid giving it for Java.
}
val index = name.lastIndexOf("$")
if (index == -1) return false
var newName = name.substring(0, index)
while (newName.endsWith("$")) newName = newName.dropRight(1)
checkName(newName)
}
checkName(name)
} else {
DecompilerUtil.isScalaFile(file, bytes)
}
}
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/decompiler/ScClsStubBuilderFactory.scala | Scala | apache-2.0 | 1,323 |
package ghpages.secret.tests
import cats.instances.either._
import cats.instances.list._
import cats.instances.option._
import cats.instances.string._
import cats.instances.tuple._
import cats.instances.vector._
import cats.kernel.Eq
import cats.syntax.eq._
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.Ajax
import japgolly.scalajs.react.vdom.html_<^._
import org.scalajs.dom.{XMLHttpRequest, console}
import scala.util.{Random, Success, Try}
object AsyncTest {
import AsyncCallback.delay
import QuickTest.{Status, TestSuite, TestSuiteBuilder}
def cmpFn[A: Eq](expect: A): A => Status.Result =
a => if (a === expect) Status.Pass else Status.Fail(s"Actual: $a. Expect: $expect.")
def testCmp[A: Eq](x: (AsyncCallback[A], A)) = {
val (body, expect) = x
body.map(cmpFn(expect))
}
def testCmp2[A: Eq](body: AsyncCallback[A], expect1: A, prep2: () => Unit, expect2: A) = {
var complete = null: (Try[A] => Callback)
val subjectCB = body.attemptTry.flatMap(complete(_).asAsyncCallback).toCallback
def run(expect: A): AsyncCallback[Status.Result] =
for {
(p, f) <- AsyncCallback.promise[A].asAsyncCallback
_ <- AsyncCallback.delay{complete = f}
_ <- subjectCB.asAsyncCallback
a <- p
} yield cmpFn(expect).apply(a)
for {
s1 <- run(expect1)
_ <- AsyncCallback.delay(prep2())
s2 <- run(expect2)
} yield s1 && s2
}
private def getUser(userId: Int) =
Ajax("GET", s"https://reqres.in/api/users/$userId")
.setRequestContentTypeJsonUtf8
.send
.validateStatusIsSuccessful(Callback.throwException(_))
.asAsyncCallback
private val get0 = getUser(0)
private val get1 = getUser(1)
private val get2 = getUser(2)
private def xhrToText(xhr: XMLHttpRequest): String = {
val idRegex = "(\\"id\\":\\\\d+)".r
idRegex.findFirstIn(xhr.responseText) match {
case Some(m) =>
val id = m.replace("\\"", "")
s"[${xhr.status}] $id"
case None =>
console.info("Ajax response: ", xhr)
throw new RuntimeException(s"Unable to parse ajax response: [${xhr.responseText}]")
}
}
private implicit val equalThrowable: Eq[Throwable] =
_ eq _
val TestSuite: TestSuite =
TestSuiteBuilder()
.add("zip")(testCmp {
var logs = Vector.empty[String]
def go(n: String) = delay(logs :+= n) >> Callback(logs :+= s"${n}2").delayMs(10)
val t = go("a").zip(go("b")) >> delay(logs :+= "|") >> delay(logs)
t -> "a b a2 b2 |".split(" ").toVector
})
.add("race (1)")(testCmp {
var logs = Vector.empty[String]
def go(n: String, d: Double) = delay(logs :+= n) >> Callback(logs :+= s"${n}2").delayMs(d)
val t = go("a", 10).race(go("b", 20)) >> delay(logs :+= "|") >> delay(logs).delayMs(30)
t -> "a b a2 | b2".split(" ").toVector
})
.add("race (2)")(testCmp {
var logs = Vector.empty[String]
def go(n: String, d: Double) = delay(logs :+= n) >> Callback(logs :+= s"${n}2").delayMs(d)
val t = go("a", 20).race(go("b", 10)) >> delay(logs :+= "|") >> delay(logs).delayMs(30)
t -> "a b b2 | a2".split(" ").toVector
})
.add("race (3)") {
var logs = Vector.empty[String]
def go(n: String, d: => Double) = delay{logs :+= n; Callback(logs :+= s"${n}2").delayMs(d)}.flatten
var da = 10
var db = 20
val t = go("a", da).race(go("b", db)) >> delay(logs :+= "|") >> delay(logs).delayMs(30)
val expect1 = "a b a2 | b2".split(" ").toVector
val expect2 = "a b b2 | a2".split(" ").toVector
val prep2 = () => {da = 20; db = 10; logs = Vector.empty[String]}
testCmp2(t, expect1, prep2, expect2)
}
.add("ajax (ok *> ok)")(testCmp {
val t = (get1 *> get2).map(xhrToText)
t -> s"[200] id:2"
})
.add("ajax (ok <* ok)")(testCmp {
val t = (get1 <* get2).map(xhrToText)
t -> s"[200] id:1"
})
.add("ajax (ko *> ok)")(testCmp {
val t = (get0 *> get2).map(xhrToText).attempt.map(_.toOption)
t -> None
})
.add("ajax (ok *> ko)")(testCmp {
val t = (get2 *> get0).map(xhrToText).attempt.map(_.toOption)
t -> None
})
.add("traverse")(testCmp {
val r = new Random()
val is = (1 to 10).toList
val t = AsyncCallback.traverse(is)(i => AsyncCallback.pure(i * 100).delayMs(r.nextInt(64)))
t -> is.map(_ * 100)
})
.add("jsPromise: to & from")(testCmp {
val a = AsyncCallback.pure(123).delayMs(20)
val t = AsyncCallback.fromJsPromise(a.unsafeToJsPromise())
t -> 123
})
.add("jsPromise: from fixed ok")(testCmp {
val p = AsyncCallback.pure(123).unsafeToJsPromise()
val t1,t2 = AsyncCallback.fromJsPromise(p)
t1.zip(t2) -> ((123, 123))
})
.add("jsPromise: from fixed ko")(testCmp {
val e = new RuntimeException("AH")
val p = AsyncCallback.throwException[Int](e).unsafeToJsPromise()
val t1,t2 = AsyncCallback.fromJsPromise(p).attempt
t1.zip(t2) -> ((Left(e), Left(e)))
})
.add("future")(testCmp {
import org.scalajs.macrotaskexecutor.MacrotaskExecutor.Implicits._
val a = AsyncCallback.pure(123).delayMs(20)
val t = AsyncCallback.fromFuture(a.unsafeToFuture())
t -> 123
})
.add("memo")(testCmp {
var count = 0
val getCount = AsyncCallback.delay(count)
val incCount = AsyncCallback.delay(count += 1).delayMs(400)
val m = incCount.memo()
// start 1
// start 2
// complete 1
// complete 2
// start 3
// complete 3
val t = (m *> m) >> m.memo() >> getCount
t -> 1
})
.add("init")(testCmp {
val x = AsyncCallback.init[Boolean, Int] { f =>
f(Success(123)).delayMs(500).toCallback.ret(true)
}
val y = for {
(b, ac) <- x.asAsyncCallback
i <- ac
} yield (b, i)
y -> ((true, 123))
})
.result()
def Component(): VdomElement =
QuickTest.Component(
QuickTest.Props(
TestSuite, 4))
}
| japgolly/scalajs-react | ghpages/src/main/scala/ghpages/secret/tests/AsyncTest.scala | Scala | apache-2.0 | 6,288 |
package com.ntsdev.run
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.DefaultServlet
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
object JettyLauncher {
def main(args: Array[String]): Unit = {
val port = getServerPort
val context = buildContext
val server = new Server(port)
server.setHandler(context)
server.start()
server.join()
}
private def buildContext = {
val context = new WebAppContext()
context.setContextPath("/")
context.setResourceBase("src/main/webapp")
context.addEventListener(new ScalatraListener)
context.addServlet(classOf[DefaultServlet], "/")
context
}
private def getServerPort = {
Option(System.getenv("PORT")).map(_.toInt).getOrElse(8080)
}
}
| neilshannon/devnexus-microservices | scala/src/main/scala/com/ntsdev/run/JettyLauncher.scala | Scala | mit | 808 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package common.rest
import scala.collection.JavaConverters._
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import com.atlassian.oai.validator.SwaggerRequestResponseValidator
import com.atlassian.oai.validator.model.SimpleRequest
import com.atlassian.oai.validator.model.SimpleResponse
import com.atlassian.oai.validator.report.ValidationReport
import com.atlassian.oai.validator.whitelist.ValidationErrorsWhitelist
import com.atlassian.oai.validator.whitelist.rule.WhitelistRules
trait SwaggerValidator {
private val specWhitelist = ValidationErrorsWhitelist
.create()
.withRule(
"Ignore action and trigger payloads",
WhitelistRules.allOf(
WhitelistRules.messageContains("Object instance has properties which are not allowed by the schema"),
WhitelistRules.anyOf(
WhitelistRules.pathContains("/web/"),
WhitelistRules.pathContains("/actions/"),
WhitelistRules.pathContains("/triggers/")),
WhitelistRules.methodIs(io.swagger.models.HttpMethod.POST)))
.withRule(
"Ignore invalid action kinds",
WhitelistRules.allOf(
WhitelistRules.messageContains("kind"),
WhitelistRules.messageContains("Instance value"),
WhitelistRules.messageContains("not found"),
WhitelistRules.pathContains("/actions/"),
WhitelistRules.methodIs(io.swagger.models.HttpMethod.PUT)))
.withRule(
"Ignore tests that check for invalid DELETEs and PUTs on actions",
WhitelistRules.anyOf(
WhitelistRules.messageContains("DELETE operation not allowed on path '/api/v1/namespaces/_/actions/'"),
WhitelistRules.messageContains("PUT operation not allowed on path '/api/v1/namespaces/_/actions/'")))
private val specValidator = SwaggerRequestResponseValidator
.createFor("apiv1swagger.json")
.withWhitelist(specWhitelist)
.build()
/**
* Validate a HTTP request and response against the Swagger spec. Request
* and response bodies are passed separately so that this validation
* does not have to consume the body content directly from the request
* and response, which would prevent callers from later consuming it.
*
* @param request the HttpRequest
* @param response the HttpResponse
* @return The list of validation error messages, if any
*/
def validateRequestAndResponse(request: HttpRequest, response: HttpResponse): Seq[String] = {
val specRequest = {
val builder = new SimpleRequest.Builder(request.method.value, request.uri.path.toString())
val body = strictEntityBodyAsString(request.entity)
val withBody =
if (body.isEmpty) builder
else
builder
.withBody(body)
.withHeader("content-type", request.entity.contentType.value)
val withHeaders = request.headers.foldLeft(builder)((b, header) => b.withHeader(header.name, header.value))
val andQuery =
request.uri.query().foldLeft(withHeaders) { case (b, (key, value)) => b.withQueryParam(key, value) }
andQuery.build()
}
val specResponse = {
val builder = SimpleResponse.Builder
.status(response.status.intValue)
val body = strictEntityBodyAsString(response.entity)
val withBody =
if (body.isEmpty) builder
else
builder
.withBody(body)
.withHeader("content-type", response.entity.contentType.value)
val withHeaders = response.headers.foldLeft(builder)((b, header) => b.withHeader(header.name, header.value))
withHeaders.build()
}
specValidator
.validate(specRequest, specResponse)
.getMessages
.asScala
.filter(m => m.getLevel == ValidationReport.Level.ERROR)
.map(_.toString)
}
def strictEntityBodyAsString(entity: HttpEntity): String = entity match {
case s: HttpEntity.Strict => s.data.utf8String
case _ => ""
}
}
| sven-lange-last/openwhisk | tests/src/test/scala/common/rest/SwaggerValidator.scala | Scala | apache-2.0 | 4,799 |
package com.codingkapoor.codingbat
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class StringISpec extends FlatSpec with Matchers {
"""Given a string name, e.g. "Bob", method "helloName"""" should """return a greeting of the form "Hello Bob!".""" in {
StringI.helloName("Bob") should equal("Hello Bob!")
StringI.helloName("Alice") should equal("Hello Alice!")
StringI.helloName("X") should equal("Hello X!")
}
"""Given two strings, a and b, method "makeAbba"""" should """return the result of putting them together in the order abba, e.g. "Hi" and "Bye" returns "HiByeByeHi".""" in {
StringI.makeAbba("Hi", "Bye") should equal("HiByeByeHi")
StringI.makeAbba("Yo", "Alice") should equal("YoAliceAliceYo")
StringI.makeAbba("What", "Up") should equal("WhatUpUpWhat")
}
"""Given tag and word strings, method "makeTags"""" should """create the HTML string with tags around the word, e.g. "<i>Yay</i>".""" in {
StringI.makeTags("i", "Yay") should equal("<i>Yay</i>")
StringI.makeTags("i", "Hello") should equal("<i>Hello</i>")
StringI.makeTags("cite", "Yay") should equal("<cite>Yay</cite>")
}
"""Given an "out" string length 4, such as "<<>>", and a word, method "makeOutWord"""" should """return a new string where the word is in the middle of the out string, e.g. "<<word>>".""" in {
StringI.makeOutWord("<<>>", "Yay") should equal("<<Yay>>")
StringI.makeOutWord("<<>>", "WooHoo") should equal("<<WooHoo>>")
StringI.makeOutWord("[[]]", "word") should equal("[[word]]")
}
"""Given a string, method "extraEnd"""" should """return a new string made of 3 copies of the last 2 chars of the original string. The string length will be at least 2.""" in {
StringI.extraEnd("Hello") should equal("lololo")
StringI.extraEnd("ab") should equal("ababab")
StringI.extraEnd("Hi") should equal("HiHiHi")
}
"""Given a string, method "firstTwo"""" should """return the string made of its first two chars, so the String "Hello" yields "He". If the string is shorter than length 2, return whatever there is, so "X" yields "X", and the empty string "" yields the empty string "".""" in {
StringI.firstTwo("Hello") should equal("He")
StringI.firstTwo("abcdefg") should equal("ab")
StringI.firstTwo("ab") should equal("ab")
}
"""Given a string of even length, method "firstHalf"""" should """return the first half. So the string "WooHoo" yields "Woo".""" in {
StringI.firstHalf("WooHoo") should equal("Woo")
StringI.firstHalf("HelloThere") should equal("Hello")
StringI.firstHalf("abcdef") should equal("abc")
}
"""Given a string, method "withoutEnd"""" should """return a version without the first and last char, so "Hello" yields "ell". The string length will be at least 2.""" in {
StringI.withoutEnd("Hello") should equal("ell")
StringI.withoutEnd("java") should equal("av")
StringI.withoutEnd("coding") should equal("odin")
}
"""Given 2 strings, a and b, method "comboString"""" should """return a string of the form short+long+short, with the shorter string on the outside and the longer string on the inside. The strings will not be the same length, but they may be empty (length 0).""" in {
StringI.comboString("Hello", "hi") should equal("hiHellohi")
StringI.comboString("hi", "Hello") should equal("hiHellohi")
StringI.comboString("aaa", "b") should equal("baaab")
}
"""Given 2 strings, method "nonStart"""" should """return their concatenation, except omit the first char of each. The strings will be at least length 1.""" in {
StringI.nonStart("Hello", "There") should equal("ellohere")
StringI.nonStart("java", "code") should equal("avaode")
StringI.nonStart("shotl", "java") should equal("hotlava")
}
"""Given a string, method "left2"""" should """return a "rotated left 2" version where the first 2 chars are moved to the end. The string length will be at least 2.""" in {
StringI.left2("Hello") should equal("lloHe")
StringI.left2("java") should equal("vaja")
StringI.left2("Hi") should equal("Hi")
}
"""Given a string, method "right2"""" should """return a "rotated right 2" version where the last 2 chars are moved to the start. The string length will be at least 2.""" in {
StringI.right2("Hello") should equal("loHel")
StringI.right2("java") should equal("vaja")
StringI.right2("Hi") should equal("Hi")
}
"""Given a string, method "theEnd"""" should """return a string of length 1 from its front, unless front is false, in which case return a string length 1 from its back. The string will be non-empty.""" in {
StringI.theEnd("Hello", true) should equal("H")
StringI.theEnd("Hello", false) should equal("o")
StringI.theEnd("oh", true) should equal("o")
}
"""Given a string, method "withouEnd2"""" should """return a version without both the first and last char of the string. The string may be any length, including 0.""" in {
StringI.withouEnd2("Hello") should equal("ell")
StringI.withouEnd2("abc") should equal("b")
StringI.withouEnd2("ab") should equal("")
}
"""Given a string of even length, method "middleTwo"""" should """return a string made of the middle two chars, so the string "string" yields "ri". The string length will be at least 2.""" in {
StringI.middleTwo("string") should equal("ri")
StringI.middleTwo("code") should equal("od")
StringI.middleTwo("Practice") should equal("ct")
}
"""Given a string, method "endsLy"""" should """return true if it ends in "ly".""" in {
StringI.endsLy("oddly") should equal(true)
StringI.endsLy("y") should equal(false)
StringI.endsLy("oddy") should equal(false)
}
"""Given a string and an int n, method "nTwice"""" should """return a string made of the first and last n chars from the string. The string length will be at least n.""" in {
StringI.nTwice("Hello", 2) should equal("Helo")
StringI.nTwice("Chocolate", 3) should equal("Choate")
StringI.nTwice("Chocolate", 1) should equal("Ce")
}
"""Given a string and an index, method "twoChar"""" should """return a string length 2 starting at the given index. If the index is too big or too small to define a string length 2, use the first 2 chars. The string length will be at least 2.""" in {
StringI.twoChar("java", 0) should equal("ja")
StringI.twoChar("java", 2) should equal("va")
StringI.twoChar("java", 3) should equal("ja")
}
"""Given a string of odd length, method "middleThree"""" should """return the string length 3 from its middle, so "Candy" yields "and". The string length will be at least 3.""" in {
StringI.middleThree("Candy") should equal("and")
StringI.middleThree("and") should equal("and")
StringI.middleThree("solving") should equal("lvi")
}
"""Given a string, method "hasBad"""" should """return true if "bad" appears starting at index 0 or 1 in the string, such as with "badxxx" or "xbadxx" but not "xxbadxx". The string may be any length, including 0. Note: use .equals() to compare 2 strings.""" in {
StringI.hasBad("badxx") should equal(true)
StringI.hasBad("xbadxx") should equal(true)
StringI.hasBad("xxbadxx") should equal(false)
}
"""Given a string, method "atFirst"""" should """return a string length 2 made of its first 2 chars. If the string length is less than 2, use '@' for the missing chars.""" in {
StringI.atFirst("hello") should equal("he")
StringI.atFirst("hi") should equal("hi")
StringI.atFirst("h") should equal("h@")
}
"""Given 2 strings, a and b, method "lastChars"""" should """return a new string made of the first char of a and the last char of b, so "yo" and "java" yields "ya". If either string is length 0, use '@' for its missing char.""" in {
StringI.lastChars("last", "chars") should equal("ls")
StringI.lastChars("yo", "java") should equal("ya")
StringI.lastChars("hi", "") should equal("h@")
}
"""Given two strings, method "conCat"""" should """append them together (known as "concatenation") and return the result. However, if the concatenation creates a double-char, then omit one of the chars, so "abc" and "cat" yields "abcat".""" in {
StringI.conCat("abc", "cat") should equal("abcat")
StringI.conCat("dog", "cat") should equal("dogcat")
StringI.conCat("abc", "") should equal("abc")
}
"""Given a string of any length, method "lastTwo"""" should """return a new string where the last 2 chars, if present, are swapped, so "coding" yields "codign".""" in {
StringI.lastTwo("coding") should equal("codign")
StringI.lastTwo("cat") should equal("cta")
StringI.lastTwo("ab") should equal("ba")
}
"""Given a string, method "seeColor"""" should """if the string begins with "red" or "blue" return that color string, otherwise return the empty string.""" in {
StringI.seeColor("redxx") should equal("red")
StringI.seeColor("xxred") should equal("")
StringI.seeColor("blueTimes") should equal("blue")
}
"""Given a string, method "frontAgain"""" should """return true if the first 2 chars in the string also appear at the end of the string, such as with "edited".""" in {
StringI.frontAgain("edited") should equal(true)
StringI.frontAgain("edit") should equal(false)
StringI.frontAgain("ed") should equal(true)
}
"""Given two strings, method "minCat"""" should """append them together (known as "concatenation") and return the result. However, if the strings are different lengths, omit chars from the longer string so it is the same length as the shorter string. So "Hello" and "Hi" yield "loHi". The strings may be any length.""" in {
StringI.minCat("Hello", "Hi") should equal("loHi")
StringI.minCat("Hello", "java") should equal("ellojava")
StringI.minCat("java", "Hello") should equal("javaello")
}
"""Given a string, method "extraFront"""" should """return a new string made of 3 copies of the first 2 chars of the original string. The string may be any length. If there are fewer than 2 chars, use whatever is there.""" in {
StringI.extraFront("Hello") should equal("HeHeHe")
StringI.extraFront("ab") should equal("ababab")
StringI.extraFront("H") should equal("HHH")
}
"""Given a string, if a length 2 substring appears at both its beginning and end, method "without2"""" should """return a string without the substring at the beginning, so "HelloHe" yields "lloHe". The substring may overlap with itself, so "Hi" yields "". Otherwise, return the original string unchanged.""" in {
StringI.without2("HelloHe") should equal("lloHe")
StringI.without2("HelloHi") should equal("HelloHi")
StringI.without2("Hi") should equal("")
}
"""Given a string, method "deFront"""" should """return a version without the first 2 chars. Except keep the first char if it is 'a' and keep the second char if it is 'b'. The string may be any length. Harder than it looks.""" in {
StringI.deFront("Hello") should equal("llo")
StringI.deFront("java") should equal("va")
StringI.deFront("away") should equal("aay")
StringI.deFront("abay") should equal("abay")
}
"""Given a string and a second "word" string, method "startWord"""" should """return the front of the string if the word appears at the front of the string, except its first char does not need to match exactly otherwise return the empty string""" in {
StringI.startWord("hippo", "hi") should equal("hi")
StringI.startWord("hippo", "xip") should equal("hip")
StringI.startWord("hippo", "i") should equal("h")
}
"""Given a string, if the first or last chars are 'x', method "withoutX"""" should """return the string without those 'x' chars, and otherwise return the string unchanged.""" in {
StringI.withoutX("xHix") should equal("Hi")
StringI.withoutX("xHi") should equal("Hi")
StringI.withoutX("Hxix") should equal("Hxi")
}
"""Given a string, if one or both of the first 2 chars is 'x', method "withoutX2"""" should """return the string without those 'x' chars, and otherwise return the string unchanged.""" in {
StringI.withoutX2("xHi") should equal("Hi")
StringI.withoutX2("Hxi") should equal("Hi")
StringI.withoutX2("Hi") should equal("Hi")
}
}
| codingkapoor/scala-coding-bat | src/test/scala/com/codingkapoor/codingbat/StringISpec.scala | Scala | mit | 12,228 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2.csv
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.csv.{CSVHeaderChecker, CSVOptions, UnivocityParser}
import org.apache.spark.sql.connector.read.PartitionReader
import org.apache.spark.sql.execution.datasources.PartitionedFile
import org.apache.spark.sql.execution.datasources.csv.CSVDataSource
import org.apache.spark.sql.execution.datasources.v2._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableConfiguration
/**
* A factory used to create CSV readers.
*
* @param sqlConf SQL configuration.
* @param broadcastedConf Broadcasted serializable Hadoop Configuration.
* @param dataSchema Schema of CSV files.
* @param readDataSchema Required data schema in the batch scan.
* @param partitionSchema Schema of partitions.
* @param parsedOptions Options for parsing CSV files.
*/
case class CSVPartitionReaderFactory(
sqlConf: SQLConf,
broadcastedConf: Broadcast[SerializableConfiguration],
dataSchema: StructType,
readDataSchema: StructType,
partitionSchema: StructType,
parsedOptions: CSVOptions) extends FilePartitionReaderFactory {
private val columnPruning = sqlConf.csvColumnPruning
override def buildReader(file: PartitionedFile): PartitionReader[InternalRow] = {
val conf = broadcastedConf.value.value
val actualDataSchema = StructType(
dataSchema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val actualReadDataSchema = StructType(
readDataSchema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val parser = new UnivocityParser(
actualDataSchema,
actualReadDataSchema,
parsedOptions)
val schema = if (columnPruning) actualReadDataSchema else actualDataSchema
val isStartOfFile = file.start == 0
val headerChecker = new CSVHeaderChecker(
schema, parsedOptions, source = s"CSV file: ${file.filePath}", isStartOfFile)
val iter = CSVDataSource(parsedOptions).readFile(
conf,
file,
parser,
headerChecker,
readDataSchema)
val fileReader = new PartitionReaderFromIterator[InternalRow](iter)
new PartitionReaderWithPartitionValues(fileReader, readDataSchema,
partitionSchema, file.partitionValues)
}
}
| jkbradley/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/csv/CSVPartitionReaderFactory.scala | Scala | apache-2.0 | 3,204 |
package polyite.util
import java.util.logging.Logger
import org.junit.Assert.assertTrue
import org.junit.Test
class EchelonFormTest {
@Test
def test {
val matrix : Array[Array[Rat]] = Array(
Array(Rat(1), Rat(1), Rat(1)),
Array(Rat(1), Rat(0), Rat(1)),
Array(Rat(0), Rat(1), Rat(1)),
Array(Rat(0), Rat(2), Rat(2)))
val echelonMatrix : Array[Array[Rat]] = Util.calcRowEchelonForm(matrix, 4, 3)
println(echelonMatrix.map(_.mkString(", ")).mkString("\\n"))
assertTrue(Util.calcRowRank(echelonMatrix) == 3)
}
} | stganser/polyite | test/polyite/util/EchelonFormTest.scala | Scala | mit | 555 |
package t1000976.c
class C(key: String = "", componentStates: String = "") | Kwestor/scala-ide | org.scala-ide.sdt.core.tests/test-workspace/pc/src/t1000976/c/C.scala | Scala | bsd-3-clause | 75 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization
import breeze.collection.mutable.OpenAddressHashArray
import scala.collection.mutable
import breeze.linalg.{DenseVector => BDV, Vector => BV, SparseVector => BSV, HashVector}
import breeze.optimize.{CachedDiffFunction, DiffFunction, LBFGS => BreezeLBFGS}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.linalg.BLAS.axpy
import org.apache.spark.rdd.RDD
/**
* :: DeveloperApi ::
* Class used to solve an optimization problem using Limited-memory BFGS.
* Reference: [[http://en.wikipedia.org/wiki/Limited-memory_BFGS]]
* @param gradient Gradient function to be used.
* @param updater Updater to be used to update weights after every iteration.
*/
@DeveloperApi
class SparseLBFGS(private var gradient: Gradient, private var updater: Updater)
extends LBFGS(gradient, updater) with Logging {
private var numCorrections = 10
var numClasses = 2
/**
* Set the number of corrections used in the LBFGS update. Default 10.
* Values of numCorrections less than 3 are not recommended; large values
* of numCorrections will result in excessive computing time.
* 3 < numCorrections < 10 is recommended.
* Restriction: numCorrections > 0
*/
override def setNumCorrections(corrections: Int): this.type = {
require(corrections > 0,
s"Number of corrections must be positive but got ${corrections}")
this.numCorrections = corrections
this
}
def getNumCorrections: Int = this.numCorrections
override def optimize(data: RDD[(Double, Vector)], initialWeights: Vector): Vector = {
val (weights, _) = SparseLBFGS.runLBFGS(
data,
gradient,
updater,
numCorrections,
this.getConvergenceTol(),
this.getNumIterations(),
this.getRegParam(),
initialWeights,
numClasses
)
weights
}
}
/**
* :: DeveloperApi ::
* Top-level method to run L-BFGS.
*/
@DeveloperApi
object SparseLBFGS extends Logging {
/**
* Run Limited-memory BFGS (L-BFGS) in parallel.
* Averaging the subgradients over different partitions is performed using one standard
* spark map-reduce in each iteration.
*
* @param data - Input data for L-BFGS. RDD of the set of data examples, each of
* the form (label, [feature values]).
* @param gradient - Gradient object (used to compute the gradient of the loss function of
* one single data example)
* @param updater - Updater function to actually perform a gradient step in a given direction.
* @param numCorrections - The number of corrections used in the L-BFGS update.
* @param convergenceTol - The convergence tolerance of iterations for L-BFGS which is must be
* nonnegative. Lower values are less tolerant and therefore generally
* cause more iterations to be run.
* @param maxNumIterations - Maximal number of iterations that L-BFGS can be run.
* @param regParam - Regularization parameter
*
* @return A tuple containing two elements. The first element is a column matrix containing
* weights for every feature, and the second element is an array containing the loss
* computed for every iteration.
*/
def runLBFGS(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
numCorrections: Int,
convergenceTol: Double,
maxNumIterations: Int,
regParam: Double,
initialWeights: Vector,
numClasses: Int): (Vector, Array[Double]) = {
val lossHistory = mutable.ArrayBuilder.make[Double]
val numExamples = data.count()
val costFun =
new CostFun(data, gradient, updater, regParam, numExamples, numClasses)
val lbfgs = new BreezeLBFGS[BV[Double]](maxNumIterations, numCorrections, convergenceTol)
val states =
lbfgs.iterations(new CachedDiffFunction(costFun), initialWeights.asBreeze)
/**
* NOTE: lossSum and loss is computed using the weights from the previous iteration
* and regVal is the regularization value computed in the previous iteration as well.
*/
var state = states.next()
while (states.hasNext) {
lossHistory += state.value
state = states.next()
}
lossHistory += state.value
val weights = Vectors.fromBreeze(state.x)
val lossHistoryArray = lossHistory.result()
logInfo("LBFGS.runLBFGS finished. Last 10 losses %s".format(
lossHistoryArray.takeRight(10).mkString(", ")))
(weights, lossHistoryArray)
}
var nc = 0
/**
* CostFun implements Breeze's DiffFunction[T], which returns the loss and gradient
* at a particular point (weights). It's used in Breeze's convex optimization routines.
*/
private class CostFun(
data: RDD[(Double, Vector)],
gradient: Gradient,
updater: Updater,
regParam: Double,
numExamples: Long,
numClasses: Int) extends DiffFunction[BV[Double]] {
override def calculate(weights: BV[Double]): (Double, BDV[Double]) = {
nc += 1
println(s"iteration $nc")
// Have a local copy to avoid the serialization of CostFun object which is not serializable.
val w = Vectors.fromBreeze(weights)
val n = w.size
val bcW = data.context.broadcast(w)
val localGradient = new SparseLogisticGradient(numClasses)
val initCumGrad = new OpenAddressHashArray[Double](n)
val (gradientSum, lossSum) = data.treeAggregate((initCumGrad, 0.0))(
seqOp = (c, v) => (c, v) match { case ((grad, loss), (label, features)) =>
val l = localGradient.compute(
features, label, bcW.value, grad)
(grad, loss + l)
},
combOp = (c1, c2) => (c1, c2) match { case ((grad1, loss1), (grad2, loss2)) =>
// axpy(1.0, grad2, grad1)
grad2.activeIterator.foreach { case (index, value) =>
grad1(index) += value
}
(grad1, loss1 + loss2)
})
/**
* regVal is sum of weight squares if it's L2 updater;
* for other updater, the same logic is followed.
*/
val regVal = updater.compute(w, Vectors.zeros(n), 0, 1, regParam)._2
val loss = lossSum / numExamples + regVal
/**
* It will return the gradient part of regularization using updater.
*
* Given the input parameters, the updater basically does the following,
*
* w' = w - thisIterStepSize * (gradient + regGradient(w))
* Note that regGradient is function of w
*
* If we set gradient = 0, thisIterStepSize = 1, then
*
* regGradient(w) = w - w'
*
* TODO: We need to clean it up by separating the logic of regularization out
* from updater to regularizer.
*/
// The following gradientTotal is actually the regularization part of gradient.
// Will add the gradientSum computed from the data with weights in the next step.
val gradientTotal = w.toDense.copy
axpy(-1.0, updater.compute(w, Vectors.zeros(n), 1, 1, regParam)._1, gradientTotal)
// gradientTotal = gradientSum / numExamples + gradientTotal
axpy(1.0 / numExamples, Vectors.dense(gradientSum.toArray), gradientTotal)
(loss, gradientTotal.asBreeze.asInstanceOf[BDV[Double]])
}
}
}
| hhbyyh/SparseML | LogisticRegression/sparseLR_2.0/SparseLBFGS.scala | Scala | apache-2.0 | 8,230 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.feature.SQLTransformer
// $example off$
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
object SQLTransformerExample {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("SQLTransformerExample")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
// $example on$
val df = sqlContext.createDataFrame(
Seq((0, 1.0, 3.0), (2, 2.0, 5.0))).toDF("id", "v1", "v2")
val sqlTrans = new SQLTransformer().setStatement(
"SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
sqlTrans.transform(df).show()
// $example off$
}
}
// scalastyle:on println
| chenc10/Spark-PAF | examples/src/main/scala/org/apache/spark/examples/ml/SQLTransformerExample.scala | Scala | apache-2.0 | 1,582 |
package tuner.gui.util
import tuner.Config
import scala.util.Try
import org.sittingbull.gt.util.XWilkinson
import org.sittingbull.gt.util.NiceStepSizeGenerator
import scala.collection.JavaConverters._
import com.typesafe.scalalogging.slf4j.LazyLogging
object AxisTicks extends LazyLogging {
// Set up the labeling library
val labeler = new XWilkinson(new NiceStepSizeGenerator)
labeler.setLooseFlag(true)
def numTicks(width:Float, fontSize:Float) : Int = {
val labelSpace = 3
math.min(
Config.axisNumTicks,
math.floor((width - labelSpace) / (fontSize + labelSpace)).toInt - 1
)
}
def ticks(min:Float, max:Float, width:Float, fontSize:Float) : List[Float] =
ticks(min, max, numTicks(width, fontSize))
def ticks(min:Float, max:Float, n:Int=Config.axisNumTicks) : List[Float] = {
if(min == max) {
throw new Exception("min cannot equal max")
}
if(min.isNaN) {
throw new Exception("min cannot be NaN")
}
if(max.isNaN) {
throw new Exception("max cannot be NaN")
}
if(n < 2) {
Nil
} else {
Try({
val ticks = labeler.search(min, max, n)
ticks.toList.asScala.map {_.toFloat} toList
}) getOrElse {
logger.warn(s"wilkinson failed with (min,max,n) at ($min,$max,$n)")
List(min, max)
}
}
}
def ticksAndRange(min:Float, max:Float, width:Float, fontSize:Float)
: (List[Float], (Float,Float)) = {
val myTicks = ticks(min, max, width, fontSize)
val range = if(myTicks.isEmpty) {
(min, max)
} else {
(myTicks.min, myTicks.max)
}
(myTicks, range)
}
}
| gabysbrain/tuner | src/main/scala/tuner/gui/util/AxisTicks.scala | Scala | mit | 1,640 |
/*
* This file is part of the sohva project.
* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gnieh.sohva
import strategy.Strategy
import akka.http.scaladsl.model._
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import spray.json._
import scala.concurrent._
import org.slf4j.LoggerFactory
abstract class DocumentOps {
import SohvaProtocol._
import SprayJsonSupport._
implicit val ec: ExecutionContext
val credit: Int
val strategy: Strategy
/** Returns the document identified by the given id if it exists */
def getDocById[T: JsonReader](id: String, revision: Option[String] = None): Future[Option[T]] =
for (raw <- optHttp(HttpRequest(uri = uri / id <<? revision.flatMap(r => if (r.nonEmpty) Some("rev" -> r) else None))).withFailureMessage(f"Failed to fetch document by ID $id and revision $revision"))
yield raw.map(_.convertTo[T])
/** Creates or updates the given object as a document into this database
* The given object must have an `_id` and an optional `_rev` fields
* to conform to the couchdb document structure.
* The saved revision is returned. If something went wrong, an exception is raised
*/
def saveDoc[T: CouchFormat](doc: T): Future[T] = {
val format = implicitly[CouchFormat[T]]
(for {
upd <- resolver(credit, format._id(doc), format._rev(doc), doc.toJson)
res <- update[T](upd.convertTo[DocUpdate])
} yield res) withFailureMessage f"Unable to save document with ID ${format._id(doc)} at revision ${format._rev(doc)}"
}
protected[this] def saveRawDoc(doc: JsValue): Future[JsValue] = doc match {
case JsObject(fields) =>
val idRev = for {
id <- fields.get("_id").map(_.convertTo[String])
rev = fields.get("_rev").map(_.convertTo[String])
} yield (id, rev)
idRev match {
case Some((id, rev)) =>
(for {
upd <- resolver(credit, id, rev, doc)
res <- updateRaw(upd.convertTo[DocUpdate])
} yield res) withFailureMessage f"Failed to update raw document with ID $id and revision $rev"
case None =>
Future.failed(new SohvaException(f"Not a couchdb document: ${doc.prettyPrint}"))
}
case _ =>
Future.failed(new SohvaException(f"Not a couchdb document: ${doc.prettyPrint}"))
}
/* the resolver is responsible for applying the merging strategy on conflict and retrying
* to save the document after resolution process */
private def resolver(credit: Int, docId: String, baseRev: Option[String], current: JsValue): Future[JsValue] = current match {
case JsNull =>
LoggerFactory.getLogger(getClass).info("No document to save")
Future.successful(DocUpdate(true, docId, baseRev.getOrElse("")).toJson)
case _ =>
(for {
entity <- Marshal(current).to[RequestEntity]
res <- http(HttpRequest(HttpMethods.PUT, uri = uri / docId, entity = entity))
} yield res).recoverWith {
case exn @ ConflictException(_) if credit > 0 =>
LoggerFactory.getLogger(getClass).info("Conflict occurred, try to resolve it")
// try to resolve the conflict and save again
for {
// get the base document if any
base <- getDocById[JsValue](docId, baseRev)
// get the last document
last <- getDocById[JsValue](docId)
// apply the merge strategy between base, last and current revision of the document
lastRev = last collect {
case JsObject(fs) if fs.contains("_rev") => fs("_rev").convertTo[String]
}
resolved = strategy(base, last, current)
res <- resolved match {
case Some(resolved) => resolver(credit - 1, docId, lastRev, resolved)
case None => Future.failed(exn)
}
} yield res
} withFailureMessage f"Unable to resolve document with ID $docId at revision $baseRev"
}
private[this] def update[T: JsonReader](res: DocUpdate) = res match {
case DocUpdate(true, id, rev) =>
getDocById[T](id, Some(rev)).map(_.get)
case DocUpdate(false, id, _) =>
Future.failed(new SohvaException(f"Document $id could not be saved"))
}
private[this] def updateRaw(res: DocUpdate) = res match {
case DocUpdate(true, id, rev) =>
getDocById[JsValue](id, Some(rev)).map(_.get)
case DocUpdate(false, id, _) =>
Future.failed(new SohvaException("Document $id could not be saved"))
}
/** Deletes the document from the database.
* The document will only be deleted if the caller provided the last revision
*/
def deleteDoc[T: CouchFormat](doc: T): Future[Boolean] = {
val format = implicitly[CouchFormat[T]]
for (
res <- http(HttpRequest(HttpMethods.DELETE, uri = uri / format._id(doc) <<? Map("rev" -> format._rev(doc).getOrElse("")))) withFailureMessage
f"Failed to delete document with ID ${format._id(doc)} at revision ${format._rev(doc)} from $uri"
) yield res.convertTo[OkResult].ok
}
protected[sohva] def http(req: HttpRequest): Future[JsValue]
protected[sohva] def optHttp(req: HttpRequest): Future[Option[JsValue]]
protected[sohva] val uri: Uri
}
| gnieh/sohva | src/main/scala/gnieh/sohva/DocumentOps.scala | Scala | apache-2.0 | 5,790 |
package edu.cmu.cs.oak.exceptions
class NotFoundException extends OakException {
} | smba/oak | edu.cmu.cs.oak/src/main/scala/edu/cmu/cs/oak/exceptions/NotFoundException.scala | Scala | lgpl-3.0 | 86 |
package models
import java.util.UUID
import com.mohiva.play.silhouette.api.{ Identity, LoginInfo }
import play.api.libs.json._
import play.api.libs.functional.syntax._
trait Identifiable {
def loginInfo: LoginInfo
}
/**
* The user object.
*
* @param loginInfo The linked login info. Uniquely identifies a user.
* @param username the github username ex: callicles
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
* @param karma current karma of the user
*/
case class User(
loginInfo: LoginInfo,
username: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String],
karma: Int) extends Identity with Identifiable
object User {
implicit val userWrites: Writes[User] = (
(JsPath \\ "loginInfo").write[LoginInfo] and
(JsPath \\ "username").writeNullable[String] and
(JsPath \\ "fullName").writeNullable[String] and
(JsPath \\ "email").writeNullable[String] and
(JsPath \\ "avatarURL").writeNullable[String] and
(JsPath \\ "karma").write[Int]
)(unlift(User.unapply))
}
case class Contributor(
user: User,
contributions: Seq[Contribution])
| antonini/gitrank-web | app/models/User.scala | Scala | apache-2.0 | 1,406 |
package actors.supervised
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.pattern.BackoffOpts
import scala.concurrent.duration.FiniteDuration
case class RestartOnStop(minBackoff: FiniteDuration, maxBackoff: FiniteDuration) {
def actorOf(props: Props, name: String)(implicit system: ActorSystem): ActorRef = {
val onStopOptions = BackoffOpts.onStop(
childProps = props,
childName = name,
minBackoff = minBackoff,
maxBackoff = maxBackoff,
randomFactor = 0
)
system.actorOf(Props(classOf[RestartOnStopActor], onStopOptions), s"$name-supervisor")
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/main/scala/actors/supervised/RestartOnStop.scala | Scala | apache-2.0 | 610 |
/**
* Copyright 2013 Brian Porter (poornerd at gmail dot com) - twitter: @poornerd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package securesocial.core.providers
import play.api.libs.json.JsObject
import securesocial.core._
import securesocial.core.providers.XingProvider._
import securesocial.core.services.{CacheService, RoutesService}
import scala.concurrent.Future
/**
* A Xing Provider
*/
class XingProvider(
routesService: RoutesService,
cacheService: CacheService,
client: OAuth1Client
) extends OAuth1Provider(
routesService,
cacheService,
client
)
{
override val id = XingProvider.Xing
override def fillProfile(info: OAuth1Info): Future[BasicProfile] = {
import scala.concurrent.ExecutionContext.Implicits.global
client.retrieveProfile(XingProvider.VerifyCredentials,info).map { json=>
val me = (json \\ Users).as[Seq[JsObject]].head
val userId = (me \\ Id).as[String]
val displayName = (me \\ Name).asOpt[String]
val lastName = (me \\ LastName).asOpt[String]
val firstName = (me \\ FirstName).asOpt[String]
val profileImage = (me \\ ProfileImage \\ Large).asOpt[String]
val email = (me \\ ActiveEmail).asOpt[String]
BasicProfile(id, userId, displayName, firstName, lastName, email, profileImage, authMethod, Some(info))
} recover {
case e =>
logger.error("[securesocial] error retrieving profile information from Xing", e)
throw new AuthenticationException()
}
}
}
object XingProvider {
val VerifyCredentials = "https://api.xing.com/v1/users/me"
val Xing = "xing"
val Id = "id"
val Name = "display_name"
val FirstName = "first_name"
val LastName = "last_name"
val Users = "users"
val ProfileImage = "photo_urls"
val Large = "large"
val ActiveEmail = "active_email"
}
| matthewchartier/securesocial | module-code/app/securesocial/core/providers/XingProvider.scala | Scala | apache-2.0 | 2,373 |
package akka.guice.annotation.test.examples
import akka.actor.{Actor, ActorLogging}
import akka.guice.annotation.injectableActor
/**
* Example Actor that can be injected via Guice.
*/
@injectableActor
class ActorWithReply extends Actor
with ActorLogging
{
override def receive: Receive = {
case x : String =>
log.info(x)
sender ! "reply"
}
}
| Equiem/akka-guice | macros/src/test/scala/akka/guice/annotation/test/examples/ActorWithReply.scala | Scala | mit | 370 |
package mesosphere.mesos
import mesosphere.marathon.MarathonTestHelper
import mesosphere.marathon.tasks.{ PortsMatcher, PortsMatch }
import org.scalatest.{ Matchers, GivenWhenThen, FunSuite }
import scala.collection.immutable.Seq
class ResourceMatchTest
extends FunSuite with GivenWhenThen with Matchers {
test("resources include all matched reservations") {
Given("a resource match with reservations")
val memReservation = MarathonTestHelper.reservation(principal = "memPrincipal", labels = Map("resource" -> "mem"))
val portReservation = MarathonTestHelper.reservation(principal = "portPrincipal", labels = Map("resource" -> "ports"))
val resourceMatch = ResourceMatcher.ResourceMatch(
scalarMatches = Iterable(
ScalarMatch(
"mem", 128.0,
consumed = Iterable(ScalarMatch.Consumption(128.0, "role1", reservation = Some(memReservation))),
scope = ScalarMatchResult.Scope.NoneDisk
)
),
portsMatch = PortsMatch(Seq(PortsMatcher.PortWithRole("role2", 80, reservation = Some(portReservation))))
)
When("converting it to resources")
val resources = resourceMatch.resources
Then("the resources should refer to the reservations")
resources should equal(
Iterable(
MarathonTestHelper.scalarResource("mem", 128, "role1", reservation = Some(memReservation)),
MarathonTestHelper.portsResource(80, 80, "role2", reservation = Some(portReservation))
)
)
}
}
| pgkelley4/marathon | src/test/scala/mesosphere/mesos/ResourceMatchTest.scala | Scala | apache-2.0 | 1,489 |
fmap(g compose f) == fmap(g) compose fmap(f) | hmemcpy/milewski-ctfp-pdf | src/content/1.7/code/scala/snippet09.scala | Scala | gpl-3.0 | 44 |
package views.html.tournament
import controllers.routes
import play.api.libs.json.Json
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import lila.common.String.html.safeJsonValue
import lila.tournament.Schedule.Freq
import lila.tournament.Tournament
object home {
def apply(
scheduled: List[Tournament],
finished: List[Tournament],
winners: lila.tournament.AllWinners,
json: play.api.libs.json.JsObject
)(implicit ctx: Context) =
views.html.base.layout(
title = trans.tournaments.txt(),
moreCss = cssTag("tournament.home"),
wrapClass = "full-screen-force",
moreJs = frag(
infiniteScrollTag,
jsModule("tournament.schedule"),
embedJsUnsafeLoadThen(s"""LichessTournamentSchedule(${safeJsonValue(
Json.obj(
"data" -> json,
"i18n" -> bits.jsI18n
)
)})""")
),
openGraph = lila.app.ui
.OpenGraph(
url = s"$netBaseUrl${routes.Tournament.home.url}",
title = trans.tournamentHomeTitle.txt(),
description = trans.tournamentHomeDescription.txt()
)
.some
) {
main(cls := "tour-home")(
st.aside(cls := "tour-home__side")(
h2(
a(href := routes.Tournament.leaderboard)(trans.leaderboard())
),
ul(cls := "leaderboard")(
winners.top.map { w =>
li(
userIdLink(w.userId.some),
a(title := w.tourName, href := routes.Tournament.show(w.tourId))(
scheduledTournamentNameShortHtml(w.tourName)
)
)
}
),
p(cls := "tour__links")(
ctx.me map { me =>
frag(
a(href := routes.UserTournament.path(me.username, "created"))("My tournaments"),
br
)
},
a(href := routes.Tournament.calendar)(trans.tournamentCalendar()),
br,
a(href := routes.Tournament.history(Freq.Unique.name))(trans.arena.history()),
br,
a(href := routes.Tournament.help("arena".some))(trans.tournamentFAQ())
),
h2(trans.lichessTournaments()),
div(cls := "scheduled")(
scheduled.map { tour =>
tour.schedule.filter(s => s.freq != lila.tournament.Schedule.Freq.Hourly) map { s =>
a(href := routes.Tournament.show(tour.id), dataIcon := tournamentIconChar(tour))(
strong(tour.name(full = false)),
momentFromNow(s.at)
)
}
}
)
),
st.section(cls := "tour-home__schedule box")(
div(cls := "box__top")(
h1(trans.tournaments()),
ctx.isAuth option div(cls := "box__top__actions")(
a(
href := routes.Tournament.form,
cls := "button button-green text",
dataIcon := ""
)(trans.createANewTournament())
)
),
div(cls := "tour-chart")
),
div(cls := "arena-list box")(
table(cls := "slist slist-pad")(
thead(
tr(
th(colspan := 2, cls := "large")(trans.finished()),
th(cls := "date"),
th(cls := "players")
)
),
finishedList(finished)
)
)
)
}
}
| luanlv/lila | app/views/tournament/home.scala | Scala | mit | 3,544 |
// Copyright 2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.test
import commbank.grimlock.framework._
import commbank.grimlock.framework.content._
import commbank.grimlock.framework.encoding._
import commbank.grimlock.framework.environment.implicits._
import commbank.grimlock.framework.extract._
import commbank.grimlock.framework.metadata._
import commbank.grimlock.framework.position._
import shapeless.{ ::, HNil }
import shapeless.nat.{ _0, _1 }
class TestExtractWithDimension extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
val cell = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc") -> 3.14)
"A ExtractWithDimension" should "extract with _0" in {
ExtractWithDimension[P, _0, Double].extract(cell, ext) shouldBe Option(3.14)
}
it should "extract with _1" in {
ExtractWithDimension[P, _1, Double].extract(cell, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithDimension[P, _0, Double].andThenPresent(d => Option(d * 2)).extract(cell, ext) shouldBe Option(6.28)
}
}
class TestExtractWithDimensionAndKey extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
val cell = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc") -> Map(Position(123) -> 3.14))
"A ExtractWithDimensionAndKey" should "extract with _0" in {
ExtractWithDimensionAndKey[P, _0, Int, Double](123).extract(cell, ext) shouldBe Option(3.14)
}
it should "extract with missing key" in {
ExtractWithDimensionAndKey[P, _0, Int, Double](456).extract(cell, ext) shouldBe None
}
it should "extract with _1" in {
ExtractWithDimensionAndKey[P, _1, Int, Double](123).extract(cell, ext) shouldBe None
}
it should "extract with _1 and missing key" in {
ExtractWithDimensionAndKey[P, _1, Int, Double](456).extract(cell, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithDimensionAndKey[P, _0, Int, Double](123)
.andThenPresent(d => Option(d * 2))
.extract(cell, ext) shouldBe Option(6.28)
}
}
class TestExtractWithKey extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
val cell = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("ghi") -> 3.14)
"A ExtractWithKey" should "extract with key" in {
ExtractWithKey[P, String, Double]("ghi").extract(cell, ext) shouldBe Option(3.14)
}
it should "extract with missing key" in {
ExtractWithKey[P, String, Double]("jkl").extract(cell, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithKey[P, String, Double]("ghi").andThenPresent(d => Option(d * 2)).extract(cell, ext) shouldBe Option(6.28)
}
}
class TestExtractWithPosition extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
val cell1 = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val cell2 = Cell(Position("cba", "fed"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc", "def") -> 3.14)
"A ExtractWithPosition" should "extract with key" in {
ExtractWithPosition[P, Double]().extract(cell1, ext) shouldBe Option(3.14)
}
it should "extract with missing position" in {
ExtractWithPosition[P, Double]().extract(cell2, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithPosition[P, Double]().andThenPresent(d => Option(d * 2)).extract(cell1, ext) shouldBe Option(6.28)
}
}
class TestExtractWithPositionAndKey extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
val cell1 = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val cell2 = Cell(Position("cba", "fed"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc", "def") -> Map(Position("xyz") -> 3.14))
"A ExtractWithPositionAndKey" should "extract with key" in {
ExtractWithPositionAndKey[P, String, Double]("xyz").extract(cell1, ext) shouldBe Option(3.14)
}
it should "extract with missing position" in {
ExtractWithPositionAndKey[P, String, Double]("xyz").extract(cell2, ext) shouldBe None
}
it should "extract with missing key" in {
ExtractWithPositionAndKey[P, String, Double]("abc").extract(cell1, ext) shouldBe None
}
it should "extract with missing position and key" in {
ExtractWithPositionAndKey[P, String, Double]("abc").extract(cell2, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithPositionAndKey[P, String, Double]("xyz")
.andThenPresent(d => Option(d * 2))
.extract(cell1, ext) shouldBe Option(6.28)
}
}
class TestExtractWithSelected extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: HNil
val cell = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc") -> 3.14)
"A ExtractWithSelected" should "extract with Over" in {
ExtractWithSelected[P, S, R, Double](Over(_0)).extract(cell, ext) shouldBe Option(3.14)
}
it should "extract with Along" in {
ExtractWithSelected[P, S, R, Double](Along(_0)).extract(cell, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithSelected[P, S, R, Double](Over(_0))
.andThenPresent(d => Option(d * 2))
.extract(cell, ext) shouldBe Option(6.28)
}
}
class TestExtractWithSelectedAndKey extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: HNil
val cell = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc") -> Map(Position("xyz") -> 3.14))
"A ExtractWithSelectedAndKey" should "extract with Over" in {
ExtractWithSelectedAndKey[P, S, R, String, Double](Over(_0), "xyz").extract(cell, ext) shouldBe Option(3.14)
}
it should "extract with Along" in {
ExtractWithSelectedAndKey[P, S, R, String, Double](Along(_0), "xyz").extract(cell, ext) shouldBe None
}
it should "extract with missing key" in {
ExtractWithSelectedAndKey[P, S, R, String, Double](Over(_0), "abc").extract(cell, ext) shouldBe None
}
it should "extract with Along and missing key" in {
ExtractWithSelectedAndKey[P, S, R, String, Double](Along(_0), "abc").extract(cell, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithSelectedAndKey[P, S, R, String, Double](Over(_0), "xyz")
.andThenPresent(d => Option(d * 2))
.extract(cell, ext) shouldBe Option(6.28)
}
}
class TestExtractWithSlice extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: HNil
val cell1 = Cell(Position("abc", "def"), Content(ContinuousSchema[Double](), 1.0))
val cell2 = Cell(Position("cba", "def"), Content(ContinuousSchema[Double](), 1.0))
val cell3 = Cell(Position("abc", "fed"), Content(ContinuousSchema[Double](), 1.0))
val ext = Map(Position("abc") -> Map(Position("def") -> 3.14))
"A ExtractWithSlice" should "extract with Over" in {
ExtractWithSlice[P, S, R, Double](Over(_0)).extract(cell1, ext) shouldBe Option(3.14)
}
it should "extract with Along" in {
ExtractWithSlice[P, S, R, Double](Along(_0)).extract(cell1, ext) shouldBe None
}
it should "extract with missing selected" in {
ExtractWithSlice[P, S, R, Double](Over(_0)).extract(cell2, ext) shouldBe None
}
it should "extract with missing remaider" in {
ExtractWithSlice[P, S, R, Double](Over(_0)).extract(cell3, ext) shouldBe None
}
it should "extract and present" in {
ExtractWithSlice[P, S, R, Double](Over(_0))
.andThenPresent(d => Option(d * 2))
.extract(cell1, ext) shouldBe Option(6.28)
}
}
| CommBank/grimlock | grimlock-core/src/test/scala/commbank/grimlock/TestExtract.scala | Scala | apache-2.0 | 8,455 |
package com.github.etacassiopeia.s99.list
/**
* <h1>P02</h1>
* Find the last but one element of a list
*
* @author Mohsen Zainalpour
* @version 1.0
* @since 28/04/16
*/
object P02 {
def main(args: Array[String]): Unit = {
val result = penultimate(List(1, 1, 2, 3, 5, 8))
//val result2 = penultimate(List.empty[Int])
val result3 = penultimate(List(1))
println(result3)
}
def penultimate[T](list: List[T]): T = list match {
case pen :: _ :: Nil => pen
case _ :: tail => penultimate(tail)
case _ => throw new NoSuchElementException
}
}
| EtaCassiopeia/S-99 | src/main/scala/com/github/etacassiopeia/s99/list/P02.scala | Scala | apache-2.0 | 587 |
/**
* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \ /__\ (_ )(_ _)( ___)/ __) ( _ \( )( )( _ \ Read
* ) / /(__)\ / /_ _)(_ )__) \__ \ )___/ )(__)( ) _ < README.txt
* (_)\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.wiki.model
import com.mongodb.DBObject
import com.mongodb.casbah.Imports._
import razie.db.RazMongo
import razie.diesel.dom.{RDOM, RDomain, WikiDomain}
import razie.diesel.engine.nodes.{EMsg, ExpectM}
import razie.hosting.Website
import razie.tconf.TagQuery
import razie.tconf.Visibility.PUBLIC
import scala.Array.canBuildFrom
import scala.collection.mutable.ListBuffer
/** search utilities */
object WikiSearch {
/**
*
* @param category
* @param utags
* @param page
* @param qt - first array is and, second is or
* @return
*/
def filterTags (category:String, utags:String, page:Option[WikiEntry], qt:Array[Array[String]]) = {
def checkT (b:String) = {
utags.contains(b) ||
b == "*" ||
category.toLowerCase == b ||
(b == "draft" && page.exists(_.isDraft)) ||
(b == "public" && page.exists(_.visibility == PUBLIC))
}
qt.size > 0 &&
qt.foldLeft(true)((a, b) => a && (
if(b(0).startsWith("-")) ! checkT(b(0).substring(1))
else b.foldLeft(false)((a, b) => a || checkT(b)))
)
}
/** extract associations from the page
* @return Tuple(left, middle, right, mkLink)
*/
def extractAssocs (realm:String, page:Option[WikiEntry]) = {
// website prop p
def p(p:String) = Website.forRealm(realm).flatMap(_.prop(p)).mkString
if(p("wbrowser.query") == "dieselMsg") {
val domList = page.get.collector.getOrElse(RDomain.DOM_LIST, List[Any]()).asInstanceOf[List[Any]].reverse
val colEnt = new ListBuffer[(RDOM.A, String)]()
val colMsg = new ListBuffer[(RDOM.A, String)]()
val mkl = (s : String) => ""
// def mkLink (s:String) = routes.Wiki.wikiBrowse (s, path+"/"+s).toString()
val all = domList.collect {
case m:EMsg => {
colEnt append ((
RDOM.A(page.get.getLabel, page.get.name, m.entity, "me", "service"),
""
))
colMsg append ((
RDOM.A(page.get.getLabel, page.get.name, m.entity+"."+m.met, "me", "msg"),
""
))
}
case m:ExpectM => {
colEnt append ((
RDOM.A(page.get.getLabel, page.get.name, m.m.cls, "me", "service"),
""
))
colMsg append ((
RDOM.A(page.get.getLabel, page.get.name, m.m.cls + "." + m.m.met, "me", "msg"),
""
))
}
}
(colEnt.distinct.toList.map(_._1), colMsg.distinct.toList.map(_._1), Nil, mkl)
} else {
// normal browse mode tagQuery
val all = page.get.ilinks.distinct.collect {
case link if link.wid.page.isDefined =>
(RDOM.A(page.get.getLabel, page.get.name, link.wid.name, "me", link.role.mkString), link.wid.page.get.tags.mkString)
}
def qt(t:String) = {
t.split("[/&]").filter(_ != "tag").map(_.split("[,|]"))
}
def filter(cat:String, tags:String, s:String) = {
filterTags(cat, tags, page, qt(p(s)))
}
// todo beef it up - include topic and stuff
val mkl = (s : String) => "/wiki/"+s
// def mkLink (s:String) = routes.Wiki.wikiBrowse (s, path+"/"+s).toString()
// def mkLink (s:String) = routes.Wiki.showWid (CMDWID(Option(s), none, "", ""), 1, "?").toString()
(
all.filter(x=>filter("", x._2, "wbrowser.left")).map(_._1),
all.filter(x=>filter("", x._2, "wbrowser.middle")).map(_._1),
all.filter(x=>filter("", x._2, "wbrowser.right")).map(_._1),
mkl
)
}
}
/** search all topics provide either q or curTags
*
* @param realm the realm
* @param q - query string
* @param scope - wpath of scope, if any
* @param curTags = querytags
* @return list[WikiEntry]
*/
def getList(realm:String, q: String, scope:String, curTags:String="", max:Int=2000) : List[WikiEntry] = {
//TODO optimize - index or whatever
// val realm = if("all" != irealm) getRealm(irealm) else irealm
//TODO limit the number of searches - is this performance critical?
val qi = if(q.length > 0 && q(0) == '-') q.substring(1).toLowerCase else q.toLowerCase
val doesNotContain = q.length > 0 && q(0) == '-'
def qnot(x:Boolean) = if(doesNotContain) !x else x
//todo only filter if first is tag ?
// array of array - first is AND second is OR
val tagQuery = new TagQuery(curTags)
val qt = tagQuery.qt
//todo optimize: run query straight in the database ?
def filter (u:DBObject) = {
def uf(n:String) = if(u.containsField(n)) u.get(n).asInstanceOf[String] else ""
def hasTags = tagQuery.matches(u)
if (qi.length <= 0) // just a tag search
hasTags
else
qnot(
(qi.length > 1 && uf("name").toLowerCase.contains(qi)) ||
(qi.length > 1 && uf("label").toLowerCase.contains(qi)) ||
((qi.length() > 3) && uf("content").toLowerCase.contains(qi))
) && hasTags
}
lazy val parent = WID.fromPath(scope).flatMap(x=>Wikis.find(x).orElse(Wikis(realm).findAnyOne(x.name)))
val REALM = if("all" == realm) Map.empty[String,String] else Map("realm"->realm)
val wikis =
if(scope.length > 0 && parent.isDefined) {
val p = parent.get
def src (t:MongoCollection) = {
for (
u <- t.find(REALM ++ Map("parent" -> p._id)) if filter(u)
) yield u
}.toList
if(WikiDomain(realm).zEnds(p.category, "Child").contains("Item"))
RazMongo.withDb(RazMongo("weItem").m, "query") (src)
else
RazMongo.withDb(RazMongo("WikiEntry").m, "query") (src)
} else {
RazMongo.withDb(RazMongo("WikiEntry").m, "query") { t =>
for (
u <- t.find(REALM) if filter(u)
) yield u
}.toList
}
if (wikis.size == 1)
wikis.map(WikiEntry.grated _)
else {
val wl1 = wikis.map(WikiEntry.grated _).take(max)
//todo optimize - split and sort up-front, not as a separate step
val wl2 = wl1.partition(w=> qnot(w.name.toLowerCase.contains(qi) || w.label.toLowerCase.contains(qi)))
val wl = if(qi.length > 0) wl2._1.sortBy(_.name.length) ::: wl2._2 else wl1
wl
}
}
}
| razie/diesel-rx | diesel/src/main/scala/razie/wiki/model/WikiSearch.scala | Scala | apache-2.0 | 6,521 |
package varys.util
/**
* Created by franklab on 15-5-27.
*/
import java.util
import akka.actor.{ActorRef, Props, ActorSystem, Actor}
import org.jnetpcap.Pcap
import org.jnetpcap.PcapBpfProgram
import org.jnetpcap.PcapIf
import org.jnetpcap.packet.PcapPacket
import org.jnetpcap.packet.PcapPacketHandler
import org.jnetpcap.protocol.tcpip.Tcp
import varys.Logging
import varys.framework._
class ECN (val ifName: String) extends Actor with Logging{
val _ifName = ifName
class controlTask(sender: ActorRef) extends Runnable {
val sender_ = sender
override def run: Unit = {
start(sender_)
}
}
override def receive ={
case StartECN =>
val task = new Thread(new controlTask(sender))
task.run()
case _ =>
logError("Controller of ECN receive something wrong")
}
def start(sender: ActorRef): Unit = {
val devs = new util.ArrayList[PcapIf]()
val errbuf = new java.lang.StringBuilder()
val r = Pcap.findAllDevs(devs, errbuf)
if (r == Pcap.NOT_OK || devs.isEmpty) {
println("Cannot read devices, %s".format(errbuf.toString()))
}
val targetIf = for {i <- 0 to (devs.size() - 1)
if (devs.get(i).getName.compareTo(_ifName) == 0)
} yield devs.get(i)
if (targetIf.isEmpty) {
logError("Cannot find the device named %s".format(_ifName))
}
val interface = targetIf(0)
val snaplen = 64 * 1024;
val flags = Pcap.MODE_PROMISCUOUS
val timeout = 10 * 1000
val pcap = Pcap.openLive(interface.getName, snaplen, flags, timeout, errbuf)
if (pcap == null) {
logError("Cannot open devices, %s".format(errbuf.toString()))
}
val ips = for {i <- 0 to interface.getAddresses.size() - 1
if (interface.getAddresses.get(i).getAddr.toString.contains("INET4"))
} yield interface.getAddresses.get(i).getAddr.toString
if (ips.isEmpty) {
logError("Cannot find the ip address of %s".format(_ifName))
}
val ip = ips(0).substring((ips(0).indexOf(':') + 1), (ips(0).size - 1))
logInfo("IP of %s is %s".format(_ifName, ip))
val program = new PcapBpfProgram()
val expression = "tcp and dst ".concat(ip)
val optimize = 0
val netmask = 0xffffff00
if (pcap.compile(program, expression, optimize, netmask) != Pcap.OK) {
logError("Error: %s".format(pcap.getErr))
}
if (pcap.setFilter(program) != Pcap.OK) {
logError("Error: %s".format(pcap.getErr))
}
val jpacketHandler = new PcapPacketHandler[String] {
val tcp = new Tcp()
var total: Int = 0xffff
var numCE: Int = 0x0
var fraction = 0.0
def nextPacket(packet: PcapPacket, user: String) {
logDebug("Received packet at %s with %s".format(packet.getCaptureHeader.timestampInMillis(), user))
if (packet.hasHeader(tcp)) {
logDebug("\\t Find tcp with prot %d, congestion condition: %b".format(tcp.destination(), tcp.flags_CWR()))
if (tcp.flags_CWR())
numCE = (numCE << 1 | 0x1) & 0xffff
for (i <- 0 to 15)
fraction = fraction + (numCE << i & 0x1).toDouble
fraction = fraction / 16
sender ! UpdateRate(fraction)
}
}
}
pcap.loop(Pcap.LOOP_INFINITE, jpacketHandler, _ifName)
pcap.close
}
}
| frankfzw/varys | core/src/main/scala/varys/util/ECN.scala | Scala | apache-2.0 | 3,317 |
package com.socrata.soql.parsing
import scala.util.parsing.input.NoPosition
import org.scalatest._
import org.scalatest.MustMatchers
import com.socrata.soql.ast._
import com.socrata.soql.parsing.standalone_exceptions.BadParse
import com.socrata.soql.environment.{FunctionName, ColumnName}
class ParserTest extends WordSpec with MustMatchers {
def parseExpression(soql: String) = new StandaloneParser().expression(soql)
def parseFull(soql: String) = new StandaloneParser().unchainedSelectStatement(soql)
def expectFailure(expectedMsg: String, soql: String) =
try {
new StandaloneParser().expression(soql)
fail("Unexpected success")
} catch {
case e: BadParse => e.message must equal (expectedMsg)
}
def ident(name: String) = ColumnOrAliasRef(None, ColumnName(name))(NoPosition)
def functionCall(name: FunctionName, args: Seq[Expression], filter: Option[Expression], window: Option[WindowFunctionInfo]) = FunctionCall(name, args, filter, window)(NoPosition, NoPosition)
def stringLiteral(s: String) = StringLiteral(s)(NoPosition)
def numberLiteral(num: BigDecimal) = NumberLiteral(num)(NoPosition)
"Parsing" should {
"require a full `between' clause" in {
expectFailure("Expected an expression, but got end of input", "x between")
expectFailure("Expected `AND', but got end of input", "x between a")
expectFailure("Expected an expression, but got end of input", "x between a and")
expectFailure("Expected one of `BETWEEN', `IN', or `LIKE', but got end of input", "x not")
expectFailure("Expected an expression, but got end of input", "x not between")
expectFailure("Expected `AND', but got end of input", "x not between a")
}
"require a full `is null' clause" in {
expectFailure("Expected one of `NULL' or `NOT', but got end of input", "x is")
expectFailure("Expected `NULL', but got end of input", "x is not")
expectFailure("Expected `NULL', but got `5'", "x is not 5")
expectFailure("Expected one of `NULL' or `NOT', but got `5'", "x is 5")
}
"require an expression after `not'" in {
expectFailure("Expected an expression, but got end of input", "not")
}
"reject a more-than-complete expression" in {
expectFailure("Expected end of input, but got `y'", "x y")
}
"reject a null expression" in {
expectFailure("Expected an expression, but got end of input", "")
}
"accept a lone identifier" in {
parseExpression("a") must equal (ident("a"))
}
"require something after a dereference-dot" in {
expectFailure("Expected an identifier, but got end of input", "a.")
}
"reject a system id after a dereference-dot" in {
expectFailure("Expected a non-system identifier, but got `:id'", "a.:id")
}
"accept expr.identifier" in {
parseExpression("a.b") must equal (functionCall(SpecialFunctions.Subscript, Seq(ident("a"), stringLiteral("b")), None, None))
}
"reject expr.identifier." in {
expectFailure("Expected an identifier, but got end of input", "a.b.")
}
"reject expr[" in {
expectFailure("Expected an expression, but got end of input", "a[")
}
"reject expr[expr" in {
expectFailure("Expected `]', but got end of input", "a[2 * b")
}
"accept expr[expr]" in {
parseExpression("a[2 * b]") must equal (
functionCall(
SpecialFunctions.Subscript,
Seq(
ident("a"),
functionCall(
SpecialFunctions.Operator("*"),
Seq(
numberLiteral(2),
ident("b")), None, None)), None, None))
}
"reject expr[expr]." in {
expectFailure("Expected an identifier, but got end of input", "a[2 * b].")
}
"accept expr[expr].ident" in {
parseExpression("a[2 * b].c") must equal (functionCall(SpecialFunctions.Subscript, Seq(
functionCall(
SpecialFunctions.Subscript,
Seq(
ident("a"),
functionCall(SpecialFunctions.Operator("*"), Seq(
numberLiteral(2),
ident("b")), None, None)), None, None),
stringLiteral("c")), None, None))
}
"accept expr[expr].ident[expr]" in {
parseExpression("a[2 * b].c[3]") must equal (functionCall(SpecialFunctions.Subscript, Seq(
functionCall(
SpecialFunctions.Subscript,
Seq(
functionCall(
SpecialFunctions.Subscript,
Seq(
ident("a"),
functionCall(SpecialFunctions.Operator("*"), Seq(
numberLiteral(2),
ident("b")), None, None)), None, None),
stringLiteral("c")), None, None),
numberLiteral(3)), None, None))
}
"accept modulo" in {
parseExpression("11 % 2") must equal (
functionCall(SpecialFunctions.Operator("%"), Seq(numberLiteral(11), numberLiteral(2)), None, None))
}
"^ has higher precedence than *" in {
val expr = parseExpression("10 * 3 ^ 2")
expr must equal (
functionCall(SpecialFunctions.Operator("*"), Seq(
numberLiteral(10),
functionCall(SpecialFunctions.Operator("^"), Seq(numberLiteral(3), numberLiteral(2)), None, None)
), None, None)
)
}
"allow offset/limit" in {
val x = parseFull("select * offset 6 limit 5")
x.offset must be (Some(BigInt(6)))
x.limit must be (Some(BigInt(5)))
}
"allow limit/offset" in {
val x = parseFull("select * limit 6 offset 5")
x.limit must be (Some(BigInt(6)))
x.offset must be (Some(BigInt(5)))
}
"allow only limit" in {
val x = parseFull("select * limit 32")
x.limit must be (Some(BigInt(32)))
x.offset must be (None)
}
"allow only offset" in {
val x = parseFull("select * offset 7")
x.limit must be (None)
x.offset must be (Some(BigInt(7)))
}
"allow search" in {
val x = parseFull("select * search 'weather'")
x.search.get must be ("weather")
}
"allow search before order by" in {
val x = parseFull("select * search 'weather' order by x")
x.search.get must be ("weather")
x.orderBys must be (List(OrderBy(ident("x"), true, true)))
}
"allow order by before search" in {
val x = parseFull("select * order by x search 'weather'")
x.search.get must be ("weather")
x.orderBys must be (Seq(OrderBy(ident("x"), true, true)))
}
"disallow order by before AND after search" in {
a [BadParse] must be thrownBy { parseFull("select * order by x search 'weather' order by x") }
}
"disallow search before AND after order by" in {
a [BadParse] must be thrownBy { parseFull("select * search 'weather' order by x search 'weather'") }
}
"not round trip" in {
val x = parseFull("select * where not true")
x.where.get.toString must be ("NOT TRUE")
}
"like round trip" in {
val x = parseFull("select * where `a` like 'b'")
x.where.get.toString must be ("`a` LIKE 'b'")
}
"not like round trip" in {
val x = parseFull("select * where `a` not like 'b'")
x.where.get.toString must be ("`a` NOT LIKE 'b'")
}
"search round trip" in {
val x = parseFull("select * search 'weather'")
val y = parseFull(x.toString)
y must be (x)
}
"allow hints" in {
val soql = "select hint(materialized) * limit 32"
val x = parseFull(soql)
x.hints must be (Vector(Materialized(SoQLPosition(1, 13, soql, 12))))
}
"count(disinct column)" in {
val x = parseFull("select count(distinct column)")
x.selection.expressions.head.expression.asInstanceOf[FunctionCall].functionName.name must be ("count_distinct")
x.selection.expressions.head.expression.toString must be ("count(DISTINCT `column`)")
}
"count(column)" in {
val x = parseFull("select count(column)")
x.selection.expressions.head.expression.asInstanceOf[FunctionCall].functionName.name must be ("count")
x.selection.expressions.head.expression.toString must be ("count(`column`)")
}
"count(*)" in {
val x = parseFull("select count(*)")
x.selection.expressions.head.expression.asInstanceOf[FunctionCall].functionName.name must be ("count/*")
x.selection.expressions.head.expression.toString must be ("count(*)")
}
"count(*) window" in {
val x = parseFull("select count(*) over (partition by column)")
x.toString must be("SELECT count(*) OVER (PARTITION BY `column`)")
}
"count(column) window" in {
val x = parseFull("select count(column_a) over (partition by column_b)")
x.toString must be("SELECT count(`column_a`) OVER (PARTITION BY `column_b`)")
}
"window function over partition order round trip" in {
val x = parseFull("select row_number() over(partition by x, y order by m, n)")
x.selection.expressions.head.expression.toCompactString must be ("row_number() OVER (PARTITION BY `x`, `y` ORDER BY `m` ASC NULL LAST, `n` ASC NULL LAST)")
}
"window function over partition order desc round trip" in {
val x = parseFull("select row_number() over(partition by x, y order by m desc null last, n)")
x.selection.expressions.head.expression.toCompactString must be ("row_number() OVER (PARTITION BY `x`, `y` ORDER BY `m` DESC NULL LAST, `n` ASC NULL LAST)")
}
"window function over partition round trip" in {
val x = parseFull("select avg(x) over(partition by x, y)")
x.selection.expressions.head.expression.toCompactString must be ("avg(`x`) OVER (PARTITION BY `x`, `y`)")
}
"window function over order round trip" in {
val x = parseFull("select avg(x) over(order by m, n)")
x.selection.expressions.head.expression.toString must be ("avg(`x`) OVER (ORDER BY `m` ASC NULL LAST, `n` ASC NULL LAST)")
}
"window function empty over round trip" in {
val x = parseFull("select avg(x) over()")
x.selection.expressions.head.expression.toString must be ("avg(`x`) OVER ()")
}
"window function over partition frame" in {
val x = parseFull("select avg(x) over(order by m range 123 PRECEDING)")
x.selection.expressions.head.expression.toString must be ("avg(`x`) OVER (ORDER BY `m` ASC NULL LAST RANGE 123 PRECEDING)")
}
"window frame clause should start with rows or range, not row" in {
expectFailure("Expected one of `)', `RANGE', `ROWS', `,', `NULL', `NULLS', `ASC', or `DESC', but got `row'", "avg(x) over(order by m row 123 PRECEDING)")
}
"reject pipe query where right side is not a leaf." in {
val parser = new StandaloneParser()
try {
parser.binaryTreeSelect("SELECT 1 |> (SELECT 2 UNION SELECT 3 FROM @t2 )")
fail("Unexpected success")
} catch {
case e: BadParse.ExpectedLeafQuery =>
// ok good
}
}
"lateral join" in {
val x = parseFull("select c1 join (select c11 from @t1) as j1 on true join lateral (select c21 from @t2 where [email protected]) as j2 on true")
x.joins(0).lateral must be (false)
x.joins(1).lateral must be (true)
}
"select empty" in {
val x = parseFull("select")
val s = x.selection
s.expressions.isEmpty must be (true)
s.allSystemExcept.isEmpty must be (true)
s.allUserExcept.isEmpty must be (true)
x.toString must be ("SELECT")
}
// def show[T](x: => T) {
// try {
// println(x)
// } catch {
// case l: LexerError =>
// val p = l.position
// println("[" + p.line + "." + p.column + "] failure: "+ l.getClass.getSimpleName)
// println()
// println(p.longString)
// }
// }
// show(new LexerReader("").toStream.force)
// show(new LexerReader("- -1*a(1,b)*(3)==2 or x between 1 and 3").toStream.force)
// show(p.expression("- -1*a(1,b)*(3)==2 or x not between 1 and 3"))
// show(p.expression("x between a is null and a between 5 and 6 or f(x x"))
// show(p.expression("x x"))
// LEXER TESTS
// show(p.expression("étäøîn"))
// show(p.expression("_abc + -- hello world!\n123 -- gnu"))
// show(p.expression("\"gnu\""))
// show(p.expression("\"\\U01D000\""))
// show(p.selectStatement("SELECT x AS `where`, y AS `hither-thither`")) // this one's only a semi-lexer-test
// show(p.limit("12e1"))
// show(p.expression("`hello world`"))
// show(p.identifier("__gnu__"))
// show(p.expression("* 8"))
// show(p.orderings("a,b desc,c + d/e"))
// show(p.orderings(""))
// show(p.orderings("a,"))
// show(p.orderings("a ascdesc"))
// show(p.selection(""))
// show(p.selection("a,b,c,"))
// show(p.selection("a,b as,c"))
// show(p.selection("a,b as x,c"))
// show(p.selection("a,b as x,c as y"))
// show(p.selection("a,b as x,c as"))
// show(p.selectStatement(""))
// show(p.selectStatement("x"))
// show(p.selectStatement("select"))
// show(p.selectStatement("select a, b where"))
// show(p.selectStatement("select a, b group"))
// show(p.selectStatement("select a, b order"))
// show(p.selectStatement("select a, b having"))
// show(p.selectStatement("select * where x group by y having z order by w"))
// show(p.selectStatement("select *"))
// show(p.selectStatement("select *("))
// show(p.selectStatement("select *(except"))
// show(p.selectStatement("select *(except a"))
// show(p.selectStatement("select *(except a)"))
// show(p.selectStatement("select *(except a,"))
// show(p.selectStatement("select *(except a,b)"))
// show(p.orderings("x, y asc null last"))
// show(p.selectStatement("select * order by x, y gnu,"))
// show(p.expression("-a :: b :: c"))
// show(p.selectStatement("select * order by x, y limit 5"))
// show(p.expression("a(1) || 'blah'"))
// show(p.selectStatement("select x"))
// show(p.expression("count(__gnu__)+a+b").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("count(a) == 'hello, world! This is a smiling gnu.'").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("count(a) == `hello-world`").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("`-world` + 2").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("world is not null").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("1 + `-world` = `a-` - 1").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression(":id - 1").asInstanceOf[Parsers#Success[Expression]].result.toSyntheticIdentifierBase)
// show(p.expression("count(a) == 'hello, world! This is a smiling gnu.'"))
// show(p.selectStatement("select x as :x"))
// show(p.selectStatement("select :*"))
// show(p.selectStatement("select *"))
// show(p.selectStatement("select :*,"))
// show(p.selectStatement("select :*,a"))
// show(p.selectStatement("select *,"))
// show(p.selectStatement("select *,a"))
// show(p.selectStatement("select :*,*"))
// show(p.selectStatement("select :*,*,"))
// show(p.selectStatement("select :*,*,a"))
// show(p.selectStatement("select :*(except a)"))
// show(p.selectStatement("select :*(except :a)"))
// show(p.selectStatement("select *(except :a)"))
// show(p.selectStatement("select *(except a)"))
}
}
| socrata-platform/soql-reference | soql-standalone-parser/src/test/scala/com/socrata/soql/parsing/ParserTest.scala | Scala | apache-2.0 | 15,815 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.lib
object NotSoRandom {
def apply(xs: Double*): java.util.Random = new NotSoRandomDouble(xs: _*)
}
// Note: we do not test the underlying source of random numbers here
class NotSoRandomDouble(xs: Double*) extends java.util.Random {
var i = -1
override def nextDouble(): Double = {
i += 1
if (i < xs.length) xs(i)
else Double.NaN // don't throw an exception as stream may compute one ahead
}
}
| coral-streaming/coral | src/test/scala/io/coral/lib/NotSoRandom.scala | Scala | apache-2.0 | 1,077 |
package org.deepdive.inference
import org.deepdive.Logging
import org.deepdive.serialization.InferenceResultProtos
import java.io.FileInputStream
class ProtobufInferenceResultDeserializier extends Logging {
def getWeights(fileName: String) : Iterator[WeightInferenceResult] = {
val stream = new FileInputStream(fileName)
Iterator.continually {
InferenceResultProtos.WeightInferenceResult.parseDelimitedFrom(stream)
}.takeWhile(_ != null).map { w =>
WeightInferenceResult(w.getId, w.getValue)
}
}
def getVariables(fileName: String) : Iterator[VariableInferenceResult] = {
val stream = new FileInputStream(fileName)
Iterator.continually {
InferenceResultProtos.VariableInferenceResult.parseDelimitedFrom(stream)
}.takeWhile(_ != null).map { v =>
VariableInferenceResult(v.getId, v.getCategory, v.getExpectation)
}
}
} | feiranwang/deepdive | src/main/scala/org/deepdive/inference/serialization/ProtobufInferenceResultDeserializier.scala | Scala | apache-2.0 | 884 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream.sql.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.JavaUserDefinedScalarFunctions.PythonScalarFunction
import org.apache.flink.table.utils.{StreamTableTestUtil, TableTestBase}
import org.hamcrest.Matchers
import org.junit.Test
class MatchRecognizeValidationTest extends TableTestBase {
private val streamUtil: StreamTableTestUtil = streamTestUtil()
streamUtil.addTable[(Int, String, Long)]("MyTable", 'a, 'b, 'c.rowtime, 'proctime.proctime)
/** Function 'MATCH_ROWTIME()' can only be used in MATCH_RECOGNIZE **/
@Test(expected = classOf[ValidationException])
def testMatchRowtimeInSelect() = {
val sql = "SELECT MATCH_ROWTIME() FROM MyTable"
streamUtil.verifySql(sql, "n/a")
}
/** Function 'MATCH_PROCTIME()' can only be used in MATCH_RECOGNIZE **/
@Test(expected = classOf[ValidationException])
def testMatchProctimeInSelect() = {
val sql = "SELECT MATCH_PROCTIME() FROM MyTable"
streamUtil.verifySql(sql, "n/a")
}
/** Python Function can not be used in MATCH_RECOGNIZE for now **/
@Test
def testMatchPythonFunction() = {
expectedException.expectCause(Matchers.isA(classOf[TableException]))
streamUtil.addFunction("pyFunc", new PythonScalarFunction("pyFunc"))
val sql =
"""SELECT T.aa as ta
|FROM MyTable
|MATCH_RECOGNIZE (
| ORDER BY proctime
| MEASURES
| A.a as aa,
| pyFunc(1,2) as bb
| PATTERN (A B)
| DEFINE
| A AS a = 1,
| B AS b = 'b'
|) AS T""".stripMargin
streamUtil.verifySql(sql, "n/a")
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/sql/validation/MatchRecognizeValidationTest.scala | Scala | apache-2.0 | 2,586 |
package meritserver.http.routes
import akka.http.scaladsl.model.StatusCodes._
import meritserver.models.{CreateTransaction, Transaction}
import org.scalatest.Assertion
import org.scalatest.exceptions.TestFailedException
import spray.json.JsArray
class TransactionServiceRouteTest extends ServiceTest {
"The service for the transactions path" when {
s"calling GET /$apiVersion/transactions" should {
"return an empty transaction list" in withTransactions(List()) {
transactions =>
Get(s"/$apiVersion/transactions") ~> routes ~> check {
responseAs[JsArray] shouldEqual JsArray()
}
}
"return all transactions" in withUsers(4) { users =>
withTransactions(users) { transactions =>
Get(s"/$apiVersion/transactions") ~> routes ~> check {
val response = responseAs[JsArray]
response.elements.size shouldEqual transactions.length
}
}
}
}
"calling POST /v1/transactions" should {
"return newly created transaction" when {
"created" in withUsers(2) { users =>
val transaction = CreateTransaction(from = users.head.id,
to = users.tail.head.id,
amount = 1,
reason = "Ey, its just a test!")
Post(
s"/$apiVersion/transactions?auth=42",
transaction
) ~> routes ~> check {
status shouldBe Created
assertTransaction(responseAs[Transaction],
Transaction(from = transaction.from,
to = transaction.to,
amount = transaction.amount,
reason = transaction.reason))
}
}
}
"fail" when {
"not authorized" in withUsers(2) { users =>
val transaction = CreateTransaction(from = users.head.id,
to = users.tail.head.id,
amount = 1,
reason = "Ey, its just a test!")
val exception = intercept[TestFailedException] {
Post(
s"/$apiVersion/transactions",
transaction
) ~> routes ~> check {
status shouldBe Unauthorized
}
}
assert(exception.message.get.contains(
"Request was rejected with rejection AuthorizationFailedRejection"))
}
"sender and receiver are the same" in withUsers(1) { users =>
val transaction = CreateTransaction(from = users.head.id,
to = users.head.id,
amount = 1,
reason = "Ey, its just a test!")
Post(
s"/$apiVersion/transactions?auth=42",
transaction
) ~> routes ~> check {
status shouldBe BadRequest
assert(responseAs[String].contains(
"Sender and Receiver cannot be the same!"))
}
}
}
}
}
private def assertTransaction(response: Transaction,
against: Transaction): Assertion = {
assert(
response.id.length > 0 && response.from == against.from && response.to == against.to && response.amount == against.amount && response.reason == against.reason && !response.booked
)
}
}
| tafli/MeritServer | src/test/scala/meritserver/http/routes/TransactionServiceRouteTest.scala | Scala | mit | 3,620 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.fetch
import scala.collection.{ breakOut, mutable }
import scala.util.control.NonFatal
import io.gatling.core.check.extractor.css.Jodd
import io.gatling.http.util.HttpHelper
import com.typesafe.scalalogging.StrictLogging
import jodd.lagarto.{ TagUtil, TagType, EmptyTagVisitor, Tag }
import jodd.lagarto.dom.HtmlCCommentExpressionMatcher
import org.asynchttpclient.uri.Uri
sealed abstract class RawResource {
def rawUrl: String
def uri(rootURI: Uri): Option[Uri] = HttpHelper.resolveFromUriSilently(rootURI, rawUrl)
def toEmbeddedResource(rootURI: Uri): Option[EmbeddedResource]
}
case class CssRawResource(rawUrl: String) extends RawResource {
def toEmbeddedResource(rootURI: Uri): Option[EmbeddedResource] = uri(rootURI).map(CssResource)
}
case class RegularRawResource(rawUrl: String) extends RawResource {
def toEmbeddedResource(rootURI: Uri): Option[EmbeddedResource] = uri(rootURI).map(RegularResource)
}
case class HtmlResources(rawResources: Seq[RawResource], base: Option[String])
object HtmlParser extends StrictLogging {
private val AppletTagName = "applet".toCharArray
private val BaseTagName = "base".toCharArray
private val BgsoundTagName = "bgsound".toCharArray
private val BodyTagName = "body".toCharArray
private val EmbedTagName = "embed".toCharArray
private val ImgTagName = "img".toCharArray
private val InputTagName = "input".toCharArray
private val LinkTagName = "link".toCharArray
private val ObjectTagName = "object".toCharArray
private val StyleTagName = "style".toCharArray
private val ArchiveAttribute = "archive".toCharArray
private val BackgroundAttribute = "background".toCharArray
private val CodeAttribute = "code".toCharArray
private val CodeBaseAttribute = "codebase".toCharArray
private val DataAttribute = "data".toCharArray
private val HrefAttribute = "href".toCharArray
private val IconAttributeName = "icon".toCharArray
private val ShortcutIconAttributeName = "shortcut icon".toCharArray
private val RelAttribute = "rel".toCharArray
private val SrcAttribute = "src".toCharArray
private val StyleAttribute = StyleTagName
private val StylesheetAttributeName = "stylesheet".toCharArray
def logException(htmlContent: String, e: Throwable): Unit =
if (logger.underlying.isDebugEnabled)
logger.debug(s"""HTML parser crashed, there's a chance your page wasn't proper HTML:
>>>>>>>>>>>>>>>>>>>>>>>
$htmlContent
<<<<<<<<<<<<<<<<<<<<<<<""", e)
else
logger.error(s"HTML parser crashed: ${e.getMessage}, there's a chance your page wasn't proper HTML, enable debug on 'io.gatling.http.fetch' logger to get the HTML content", e)
}
class HtmlParser extends StrictLogging {
import HtmlParser._
var inStyle = false
private def parseHtml(htmlContent: String, userAgent: Option[UserAgent]): HtmlResources = {
var base: Option[String] = None
val rawResources = mutable.ArrayBuffer.empty[RawResource]
val conditionalCommentsMatcher = new HtmlCCommentExpressionMatcher()
val ieVersion = userAgent.map(_.version)
val visitor = new EmptyTagVisitor {
var inHiddenCommentStack = List(false)
def addResource(tag: Tag, attributeName: Array[Char], factory: String => RawResource): Unit =
Option(tag.getAttributeValue(attributeName)).foreach { url =>
rawResources += factory(url.toString)
}
override def script(tag: Tag, body: CharSequence): Unit =
if (!isInHiddenComment)
addResource(tag, SrcAttribute, RegularRawResource)
override def text(text: CharSequence): Unit =
if (inStyle && !isInHiddenComment)
rawResources ++= CssParser.extractStyleImportsUrls(text).map(CssRawResource)
private def isInHiddenComment = inHiddenCommentStack.head
override def condComment(expression: CharSequence, isStartingTag: Boolean, isHidden: Boolean, isHiddenEndTag: Boolean): Unit =
ieVersion match {
case Some(version) =>
if (!isStartingTag) {
inHiddenCommentStack = inHiddenCommentStack.tail
} else {
val commentValue = conditionalCommentsMatcher.`match`(version, expression.toString)
inHiddenCommentStack = (!commentValue) :: inHiddenCommentStack
}
case None =>
throw new IllegalStateException("condComment call while it should be disabled")
}
override def tag(tag: Tag): Unit = {
def codeBase() = Option(tag.getAttributeValue(CodeBaseAttribute))
def prependCodeBase(codeBase: CharSequence, url: String) =
if (url.startsWith("http"))
url
else if (codeBase.charAt(codeBase.length()) != '/')
codeBase + "/" + url
else
codeBase + url
def processTag(): Unit =
tag.getType match {
case TagType.START | TagType.SELF_CLOSING =>
if (tag.isRawTag && tag.nameEquals(StyleTagName)) {
inStyle = true
} else if (tag.nameEquals(BaseTagName)) {
base = Option(tag.getAttributeValue(HrefAttribute)).map(_.toString)
} else if (tag.nameEquals(LinkTagName)) {
Option(tag.getAttributeValue(RelAttribute)) match {
case Some(rel) if TagUtil.equalsToLowercase(rel, StylesheetAttributeName) =>
addResource(tag, HrefAttribute, CssRawResource)
case Some(rel) if TagUtil.equalsToLowercase(rel, IconAttributeName) || TagUtil.equalsToLowercase(rel, ShortcutIconAttributeName) =>
addResource(tag, HrefAttribute, RegularRawResource)
case None =>
logger.error("Malformed HTML: <link> tag without rel attribute")
case _ =>
}
} else if (tag.nameEquals(ImgTagName) ||
tag.nameEquals(BgsoundTagName) ||
tag.nameEquals(EmbedTagName) ||
tag.nameEquals(InputTagName)) {
addResource(tag, SrcAttribute, RegularRawResource)
} else if (tag.nameEquals(BodyTagName)) {
addResource(tag, BackgroundAttribute, RegularRawResource)
} else if (tag.nameEquals(AppletTagName)) {
val code = tag.getAttributeValue(CodeAttribute).toString
val archives = Option(tag.getAttributeValue(ArchiveAttribute).toString).map(_.split(",").map(_.trim)(breakOut))
val appletResources = archives.getOrElse(List(code)).iterator
val appletResourcesUrls = codeBase() match {
case Some(cb) => appletResources.map(prependCodeBase(cb, _))
case _ => appletResources
}
rawResources ++= appletResourcesUrls.map(RegularRawResource)
} else if (tag.nameEquals(ObjectTagName)) {
Option(tag.getAttributeValue(DataAttribute)).foreach { data =>
val objectResourceUrl = codeBase() match {
case Some(cb) => prependCodeBase(cb, data.toString)
case _ => data.toString
}
rawResources += RegularRawResource(objectResourceUrl)
}
} else {
Option(tag.getAttributeValue(StyleAttribute)).foreach { style =>
val styleUrls = CssParser.extractInlineStyleImageUrls(style).map(RegularRawResource)
rawResources ++= styleUrls
}
}
case TagType.END =>
if (inStyle && tag.nameEquals(StyleTagName))
inStyle = false
case _ =>
}
if (!isInHiddenComment)
processTag()
}
}
try { Jodd.newLagartoParser(htmlContent, ieVersion).parse(visitor) }
catch { case NonFatal(e) => logException(htmlContent, e) }
HtmlResources(rawResources, base)
}
def getEmbeddedResources(documentURI: Uri, htmlContent: String, userAgent: Option[UserAgent]): List[EmbeddedResource] = {
val htmlResources = parseHtml(htmlContent, userAgent)
val rootURI = htmlResources.base.map(Uri.create(documentURI, _)).getOrElse(documentURI)
htmlResources.rawResources
.distinct
.iterator
.filterNot(res => res.rawUrl.isEmpty || res.rawUrl.charAt(0) == '#' || res.rawUrl.startsWith("data:"))
.flatMap(_.toEmbeddedResource(rootURI))
.toList
}
}
| wiacekm/gatling | gatling-http/src/main/scala/io/gatling/http/fetch/HtmlParser.scala | Scala | apache-2.0 | 9,107 |
package org.openapitools.server.model
/**
* @param self for example: ''null''
* @param `class` for example: ''null''
*/
final case class GithubRepositorieslinks (
self: Option[Link],
`class`: Option[String]
)
| cliffano/swaggy-jenkins | clients/scala-akka-http-server/generated/src/main/scala/org/openapitools/server/model/GithubRepositorieslinks.scala | Scala | mit | 220 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.bin
import java.io.{ByteArrayOutputStream, OutputStream}
import java.nio.charset.StandardCharsets
import java.nio.{ByteBuffer, ByteOrder}
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.utils.bin.BinaryEncodeCallback.{ByteArrayCallback, ByteStreamCallback}
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.ToValues
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.geotools.sft.SimpleFeatureSpec.ListAttributeSpec
import org.locationtech.geomesa.utils.geotools.sft.SimpleFeatureSpecParser
import org.locationtech.jts.geom.{Geometry, LineString, Point}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.collection.JavaConversions._
class BinaryOutputEncoder private (toValues: ToValues) {
def encode(f: SimpleFeature): Array[Byte] = {
toValues(f, ByteArrayCallback)
ByteArrayCallback.result
}
def encode(f: SimpleFeature, callback: BinaryOutputCallback): Unit = toValues(f, callback)
def encode(f: CloseableIterator[SimpleFeature], os: OutputStream, sort: Boolean = false): Long = {
if (sort) {
val byteStream = new ByteArrayOutputStream
val callback = new ByteStreamCallback(byteStream)
try { f.foreach(toValues(_, callback)) } finally {
f.close()
}
val count = callback.result
val bytes = byteStream.toByteArray
val size = (bytes.length / count).toInt
bytes.grouped(size).toSeq.sorted(BinaryOutputEncoder.DateOrdering).foreach(os.write)
count
} else {
val callback = new ByteStreamCallback(os)
try { f.foreach(toValues(_, callback)) } finally {
f.close()
}
callback.result
}
}
}
object BinaryOutputEncoder extends LazyLogging {
import AxisOrder._
import org.locationtech.geomesa.utils.geotools.Conversions._
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
val BinEncodedSft: SimpleFeatureType = SimpleFeatureTypes.createType("bin", "bin:Bytes,*geom:Point:srid=4326")
val BIN_ATTRIBUTE_INDEX = 0 // index of 'bin' attribute in BinEncodedSft
// compares the 4 bytes representing the date in a bin array
private val DateOrdering = new Ordering[Array[Byte]] {
override def compare(x: Array[Byte], y: Array[Byte]): Int = {
val compare1 = Ordering.Byte.compare(x(4), y(4))
if (compare1 != 0) { return compare1 }
val compare2 = Ordering.Byte.compare(x(5), y(5))
if (compare2 != 0) { return compare2 }
val compare3 = Ordering.Byte.compare(x(6), y(6))
if (compare3 != 0) { return compare3 }
Ordering.Byte.compare(x(7), y(7))
}
}
case class EncodingOptions(geomField: Option[Int],
dtgField: Option[Int],
trackIdField: Option[Int],
labelField: Option[Int] = None,
axisOrder: Option[AxisOrder] = None)
case class EncodedValues(trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long)
/**
* BIN queries pack multiple records into each feature. To count the records, we have to count
* the total bytes coming back, instead of the number of features
*
* @param iter aggregated bin iter
* @param maxFeatures max features
* @param hasLabel bin results have labels (extended format) or not
*/
class FeatureLimitingIterator(iter: CloseableIterator[SimpleFeature], maxFeatures: Int, hasLabel: Boolean)
extends CloseableIterator[SimpleFeature] {
private val bytesPerHit = if (hasLabel) { 24 } else { 16 }
private var seen = 0L
override def hasNext: Boolean = seen < maxFeatures && iter.hasNext
override def next(): SimpleFeature = {
if (hasNext) {
val sf = iter.next()
val bytes = sf.getAttribute(0).asInstanceOf[Array[Byte]]
val count = bytes.length / bytesPerHit
seen += count
if (seen > maxFeatures) {
// remove the extra aggregated features so that we hit our exact feature limit
val trimmed = Array.ofDim[Byte]((count - (seen - maxFeatures).toInt) * bytesPerHit)
System.arraycopy(bytes, 0, trimmed, 0, trimmed.length)
sf.setAttribute(0, trimmed)
}
sf
} else {
Iterator.empty.next()
}
}
override def close(): Unit = iter.close()
}
def apply(sft: SimpleFeatureType, options: EncodingOptions): BinaryOutputEncoder =
new BinaryOutputEncoder(toValues(sft, options))
def convertToTrack(f: SimpleFeature, i: Int): Int = convertToTrack(f.getAttribute(i))
def convertToTrack(track: AnyRef): Int = if (track == null) { 0 } else { track.hashCode }
// TODO could use `.getDateAsLong` if we know we have a KryoBufferSimpleFeature
def convertToDate(f: SimpleFeature, i: Int): Long = convertToDate(f.getAttribute(i).asInstanceOf[Date])
def convertToDate(date: Date): Long = if (date == null) { 0L } else { date.getTime }
def convertToLabel(f: SimpleFeature, i: Int): Long = convertToLabel(f.getAttribute(i))
def convertToLabel(label: AnyRef): Long = label match {
case null => 0L
case n: Number => n.longValue()
case _ =>
import org.locationtech.geomesa.utils.conversions.ScalaImplicits.RichTraversableOnce
var sum = 0L
label.toString.getBytes(StandardCharsets.UTF_8).iterator.take(8).foreachIndex {
case (b, i) => sum += (b & 0xffL) << (8 * i)
}
sum
}
/**
* Decodes a byte array
*
* @param encoded encoded byte array
* @param callback callback for results
*/
def decode(encoded: Array[Byte], callback: BinaryOutputCallback): Unit = {
val buf = ByteBuffer.wrap(encoded).order(ByteOrder.LITTLE_ENDIAN)
val trackId = buf.getInt
val time = buf.getInt * 1000L
val lat = buf.getFloat
val lon = buf.getFloat
if (encoded.length > 16) {
val label = buf.getLong
callback(trackId, lat, lon, time, label)
} else {
callback(trackId, lat, lon, time)
}
}
def decode(encoded: Array[Byte]): EncodedValues = {
var values: EncodedValues = null
decode(encoded, new BinaryOutputCallback() {
override def apply(trackId: Int, lat: Float, lon: Float, dtg: Long): Unit =
values = EncodedValues(trackId, lat, lon, dtg, -1L)
override def apply(trackId: Int, lat: Float, lon: Float, dtg: Long, label: Long): Unit =
values = EncodedValues(trackId, lat, lon, dtg, label)
})
values
}
/**
* Creates the function to map a simple feature to a bin-encoded buffer
*
* @param sft simple feature type
* @param options encoding options
* @return
*/
private def toValues(sft: SimpleFeatureType, options: EncodingOptions): ToValues = {
val geomIndex = options.geomField.getOrElse(sft.getGeomIndex)
if (geomIndex == -1) {
throw new IllegalArgumentException(s"Invalid geometry field requested for feature type ${sft.getTypeName}")
}
val dtgIndex = options.dtgField.orElse(sft.getDtgIndex).getOrElse(-1)
if (dtgIndex == -1) {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
val isSingleDate = classOf[Date].isAssignableFrom(sft.getDescriptor(dtgIndex).getType.getBinding)
val axisOrder = options.axisOrder.getOrElse(AxisOrder.LonLat)
val (isPoint, isLineString) = {
val binding = sft.getDescriptor(geomIndex).getType.getBinding
(binding == classOf[Point], binding == classOf[LineString])
}
// noinspection ExistsEquals
if (options.trackIdField.exists(_ == -1)) {
throw new IllegalArgumentException(s"Invalid track field requested for feature type ${sft.getTypeName}")
} else if (options.labelField.exists(_ == -1)) {
throw new IllegalArgumentException(s"Invalid label field requested for feature type ${sft.getTypeName}")
} else if (!isSingleDate) {
if (isLineString) {
val dtgField = sft.getDescriptor(dtgIndex).getLocalName
val sftAttributes = SimpleFeatureSpecParser.parse(SimpleFeatureTypes.encodeType(sft)).attributes
sftAttributes.find(_.name == dtgField).foreach { spec =>
if (!spec.isInstanceOf[ListAttributeSpec] ||
!classOf[Date].isAssignableFrom(spec.asInstanceOf[ListAttributeSpec].subClass)) {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
}
} else {
throw new RuntimeException(s"Invalid date field requested for feature type ${sft.getTypeName}")
}
}
// gets the track id from a feature
val getTrackId: (SimpleFeature) => Int = options.trackIdField match {
case None => (f) => f.getID.hashCode
case Some(trackId) => convertToTrack(_, trackId)
}
// gets the label from a feature
val getLabelOption: Option[(SimpleFeature) => Long] = options.labelField.map { labelIndex =>
convertToLabel(_, labelIndex)
}
if (isLineString) {
// for linestrings, we return each point - use an array so we get constant-time lookup
// depending on srs requested and wfs versions, axis order can be flipped
val getLineLatLon: (SimpleFeature) => Array[(Float, Float)] = axisOrder match {
case LatLon => lineToXY(_, geomIndex)
case LonLat => lineToYX(_, geomIndex)
}
if (isSingleDate) {
getLabelOption match {
case None => new ToValuesLines(getTrackId, getLineLatLon, dtgIndex)
case Some(getLabel) => new ToValuesLinesLabels(getTrackId, getLineLatLon, getLabel, dtgIndex)
}
} else {
// for line strings, we need an array of dates corresponding to the points in the line
val getLineDtg: (SimpleFeature) => Array[Long] = dateArray(_, dtgIndex)
getLabelOption match {
case None => new ToValuesLinesDates(getTrackId, getLineLatLon, getLineDtg)
case Some(getLabel) => new ToValuesLinesDatesLabels(getTrackId, getLineLatLon, getLineDtg, getLabel)
}
}
} else {
// get lat/lon as floats
// depending on srs requested and wfs versions, axis order can be flipped
val getLatLon: (SimpleFeature) => (Float, Float) = (isPoint, axisOrder) match {
case (true, LatLon) => pointToXY(_, geomIndex)
case (true, LonLat) => pointToYX(_, geomIndex)
case (false, LatLon) => geomToXY(_, geomIndex)
case (false, LonLat) => geomToYX(_, geomIndex)
}
getLabelOption match {
case None => new ToValuesPoints(getTrackId, getLatLon, dtgIndex)
case Some(getLabel) => new ToValuesPointsLabels(getTrackId, getLatLon, getLabel, dtgIndex)
}
}
}
private def pointToXY(p: Point): (Float, Float) = (p.getX.toFloat, p.getY.toFloat)
private def pointToYX(p: Point): (Float, Float) = (p.getY.toFloat, p.getX.toFloat)
private def pointToXY(f: SimpleFeature, i: Int): (Float, Float) =
pointToXY(f.getAttribute(i).asInstanceOf[Point])
private def pointToYX(f: SimpleFeature, i: Int): (Float, Float) =
pointToYX(f.getAttribute(i).asInstanceOf[Point])
private def geomToXY(f: SimpleFeature, i: Int): (Float, Float) =
pointToXY(f.getAttribute(i).asInstanceOf[Geometry].safeCentroid())
private def geomToYX(f: SimpleFeature, i: Int): (Float, Float) =
pointToYX(f.getAttribute(i).asInstanceOf[Geometry].safeCentroid())
private def lineToXY(f: SimpleFeature, i: Int): Array[(Float, Float)] = {
val line = f.getAttribute(i).asInstanceOf[LineString]
Array.tabulate(line.getNumPoints)(i => pointToXY(line.getPointN(i)))
}
private def lineToYX(f: SimpleFeature, i: Int): Array[(Float, Float)] = {
val line = f.getAttribute(i).asInstanceOf[LineString]
Array.tabulate(line.getNumPoints)(i => pointToYX(line.getPointN(i)))
}
private def dateArray(f: SimpleFeature, i: Int): Array[Long] = {
val dates = f.getAttribute(i).asInstanceOf[java.util.List[Date]]
if (dates == null) { Array.empty } else { dates.map(_.getTime).toArray }
}
private trait ToValues {
def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit
}
private class ToValuesPoints(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => (Float, Float),
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val (lat, lon) = getLatLon(f)
callback(getTrackId(f), lat, lon, convertToDate(f, dtgIndex))
}
}
private class ToValuesPointsLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => (Float, Float),
getLabel: (SimpleFeature) => Long,
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val (lat, lon) = getLatLon(f)
callback(getTrackId(f), lat, lon, convertToDate(f, dtgIndex), getLabel(f))
}
}
private class ToValuesLines(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val date = convertToDate(f, dtgIndex)
var i = 0
while (i < points.length) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, date)
i += 1
}
}
}
private class ToValuesLinesLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLabel: (SimpleFeature) => Long,
dtgIndex: Int) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val date = convertToDate(f, dtgIndex)
val label = getLabel(f)
var i = 0
while (i < points.length) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, date, label)
i += 1
}
}
}
private class ToValuesLinesDates(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLineDtg: (SimpleFeature) => Array[Long]) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val dates = getLineDtg(f)
val size = if (points.length == dates.length) { points.length } else {
logger.warn(s"Mismatched geometries and dates for simple feature $f: ${points.toList} ${dates.toList}")
math.min(points.length, dates.length)
}
var i = 0
while (i < size) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, dates(i))
i += 1
}
}
}
private class ToValuesLinesDatesLabels(getTrackId: (SimpleFeature) => Int,
getLatLon: (SimpleFeature) => Array[(Float, Float)],
getLineDtg: (SimpleFeature) => Array[Long],
getLabel: (SimpleFeature) => Long) extends ToValues {
override def apply(f: SimpleFeature, callback: BinaryOutputCallback): Unit = {
val trackId = getTrackId(f)
val points = getLatLon(f)
val dates = getLineDtg(f)
val size = if (points.length == dates.length) { points.length } else {
logger.warn(s"Mismatched geometries and dates for simple feature $f: ${points.toList} ${dates.toList}")
math.min(points.length, dates.length)
}
val label = getLabel(f)
var i = 0
while (i < size) {
val (lat, lon) = points(i)
callback(trackId, lat, lon, dates(i), label)
i += 1
}
}
}
}
| locationtech/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/bin/BinaryOutputEncoder.scala | Scala | apache-2.0 | 16,755 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding
import org.specs._
import cascading.pipe.joiner._
import java.lang.reflect.InvocationTargetException
import scala.collection.mutable.Buffer
class InnerProductJob(args: Args) extends Job(args) {
val l = args.getOrElse("left", "1").toInt
val r = args.getOrElse("right", "1").toInt
val j = args.getOrElse("joiner", "i") match {
case "i" => new InnerJoin
case "l" => new LeftJoin
case "r" => new RightJoin
case "o" => new OuterJoin
}
val in0 = Tsv("input0").read.mapTo((0, 1, 2) -> ('x1, 'y1, 's1)) { input: (Int, Int, Int) => input }
val in1 = Tsv("input1").read.mapTo((0, 1, 2) -> ('x2, 'y2, 's2)) { input: (Int, Int, Int) => input }
in0
.blockJoinWithSmaller('y1 -> 'y2, in1, leftReplication = l, rightReplication = r, joiner = j)
.map(('s1, 's2) -> 'score) { v: (Int, Int) =>
v._1 * v._2
}
.groupBy('x1, 'x2) { _.sum[Double]('score) }
.write(Tsv("output"))
}
class BlockJoinPipeTest extends Specification {
noDetailedDiffs()
"An InnerProductJob" should {
val in1 = List(("0", "0", "1"), ("0", "1", "1"), ("1", "0", "2"), ("2", "0", "4"))
val in2 = List(("0", "1", "1"), ("1", "0", "2"), ("2", "4", "5"))
val correctOutput = Set((0, 1, 2.0), (0, 0, 1.0), (1, 1, 4.0), (2, 1, 8.0))
def runJobWithArguments(left: Int = 1, right: Int = 1, joiner: String = "i")(callback: Buffer[(Int, Int, Double)] => Unit) {
JobTest("com.twitter.scalding.InnerProductJob")
.source(Tsv("input0"), in1)
.source(Tsv("input1"), in2)
.arg("left", left.toString)
.arg("right", right.toString)
.arg("joiner", joiner)
.sink[(Int, Int, Double)](Tsv("output")) { outBuf =>
callback(outBuf)
}
.run
.finish
}
"correctly compute product with 1 left block and 1 right block" in {
runJobWithArguments() { outBuf =>
val unordered = outBuf.toSet
unordered must_== correctOutput
}
}
"correctly compute product with multiple left and right blocks" in {
runJobWithArguments(left = 3, right = 7) { outBuf =>
val unordered = outBuf.toSet
unordered must_== correctOutput
}
}
"correctly compute product with a valid LeftJoin" in {
runJobWithArguments(right = 7, joiner = "l") { outBuf =>
val unordered = outBuf.toSet
unordered must_== correctOutput
}
}
"throw an exception when used with OuterJoin" in {
runJobWithArguments(joiner = "o") { g => g } must throwA[InvocationTargetException]
}
"throw an exception when used with an invalid LeftJoin" in {
runJobWithArguments(joiner = "l", left = 2) { g => g } must throwA[InvocationTargetException]
}
"throw an exception when used with an invalid RightJoin" in {
runJobWithArguments(joiner = "r", right = 2) { g => g } must throwA[InvocationTargetException]
}
}
}
| zirpins/scalding | scalding-core/src/test/scala/com/twitter/scalding/BlockJoinTest.scala | Scala | apache-2.0 | 3,487 |
package com.calclavia.graph.matrix
import com.calclavia.graph.matrix.GaloisField.GF2
/**
* @author Calclavia
*/
class AdjacencyMatrix[K](rows: Set[K], columns: Set[K]) extends SparseMatrix[K, Boolean](rows, columns)(new GF2(false)) {
def this(rows: Set[K]) = this(rows, rows)
def isConnected(from: K, to: K): Boolean = this(from, to) || this(to, from)
def isBiConnected(from: K, to: K): Boolean = this(from, to) && this(to, from)
/**
* Gets a set of nodes directed both from and to this node.
* @return Gets nodes directed both from and to this node.
*/
def getDirected(node: K) = getDirectedTo(node) | getDirectedFrom(node)
/**
* Gets a set of nodes that is connected to the given node.
* Find the row for the node, and all elements in that row.
* @param node - The node to check
* @return Gets a set of nodes that the given node is connected to.
*/
def getDirectedTo(node: K): Set[K] = mat.collect { case ((k1, k2), v) if k2.equals(node) && v => k1 }.toSet
/**
* Gets a set of nodes that the given node is connected to.
* Find the column for the node, and all elements in that column.
* @param node - The node to check
* @return Gets a set of nodes that the given node is connected from.
*/
def getDirectedFrom(node: K): Set[K] = mat.collect { case ((k1, k2), v) if k1.equals(node) && v => k2 }.toSet
}
| halvors/Graph-Plugin | src/main/scala/com/calclavia/graph/matrix/AdjacencyMatrix.scala | Scala | lgpl-3.0 | 1,350 |
/** Copyright 2014 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.data.storage
import com.google.common.collect.ImmutableBiMap
import org.apache.spark.rdd.RDD
import scala.collection.JavaConversions._
/** Immutable Bi-directional Map
*
*/
class BiMap[K, V] private[prediction] (
private val m: ImmutableBiMap[K, V],
private val i: Option[BiMap[V, K]] = None
) extends Serializable {
val inverse: BiMap[V, K] = i.getOrElse(new BiMap(m.inverse, Some(this)))
def get(k: K): Option[V] = Option(m.get(k))
def getOrElse(k: K, default: => V): V = get(k).getOrElse(default)
def contains(k: K): Boolean = m.containsKey(k)
def apply(k: K): V = {
if (m.containsKey(k))
m.get(k)
else
throw new java.util.NoSuchElementException(s"key not found: ${k}")
}
/** Converts to a map.
* @return a map of type immutable.Map[K, V]
*/
def toMap: Map[K, V] = m.toMap
/** Converts to a sequence.
* @return a sequence containing all elements of this map
*/
def toSeq: Seq[(K, V)] = m.toSeq
def size: Int = m.size
def take(n: Int) = BiMap(m.toMap.take(n))
override def toString = m.toString
}
object BiMap {
def apply[K, V](x: Map[K, V]): BiMap[K, V] =
new BiMap(ImmutableBiMap.copyOf[K, V](x))
/** Create a BiMap[String, Long] from a set of String. The Long index starts
* from 0.
* @param keys a set of String
* @return a String to Long BiMap
*/
def stringLong(keys: Set[String]): BiMap[String, Long] = {
val builder: ImmutableBiMap.Builder[String, Long] = ImmutableBiMap.builder()
keys.zipWithIndex.foreach { case (k, v) =>
builder.put(k, v)
}
new BiMap(builder.build())
}
/** Create a BiMap[String, Long] from an array of String.
* NOTE: the the array cannot have duplicated element.
* The Long index starts from 0.
* @param keys a set of String
* @return a String to Long BiMap
*/
def stringLong(keys: Array[String]): BiMap[String, Long] = {
val builder: ImmutableBiMap.Builder[String, Long] = ImmutableBiMap.builder()
keys.zipWithIndex.foreach { case (k, v) =>
builder.put(k, v)
}
new BiMap(builder.build())
}
/** Create a BiMap[String, Long] from RDD[String]. The Long index starts
* from 0.
* @param keys RDD of String
* @return a String to Long BiMap
*/
def stringLong(keys: RDD[String]): BiMap[String, Long] = {
stringLong(keys.distinct.collect)
}
/** Create a BiMap[String, Int] from a set of String. The Int index starts
* from 0.
* @param keys a set of String
* @return a String to Int BiMap
*/
def stringInt(keys: Set[String]): BiMap[String, Int] = {
val builder: ImmutableBiMap.Builder[String, Int] = ImmutableBiMap.builder()
keys.zipWithIndex.foreach { case (k, v) =>
builder.put(k, v)
}
new BiMap(builder.build())
}
/** Create a BiMap[String, Int] from an array of String.
* NOTE: the the array cannot have duplicated element.
* The Int index starts from 0.
* @param keys a set of String
* @return a String to Int BiMap
*/
def stringInt(keys: Array[String]): BiMap[String, Int] = {
val builder: ImmutableBiMap.Builder[String, Int] = ImmutableBiMap.builder()
keys.zipWithIndex.foreach { case (k, v) =>
builder.put(k, v)
}
new BiMap(builder.build())
}
/** Create a BiMap[String, Int] from RDD[String]. The Int index starts
* from 0.
* @param keys RDD of String
* @return a String to Int BiMap
*/
def stringInt(keys: RDD[String]): BiMap[String, Int] = {
stringInt(keys.distinct.collect)
}
}
| TheDataShed/PredictionIO | data/src/main/scala/storage/BiMap.scala | Scala | apache-2.0 | 4,187 |
/*
* Copyright 2019 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.producer.rest.controller
import io.swagger.annotations.{Api, ApiOperation, ApiResponse, ApiResponses}
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.http.{HttpStatus, ResponseEntity}
import org.springframework.web.bind.annotation._
import za.co.absa.spline.producer.rest.HttpConstants.{Encoding, SplineHeaders}
import za.co.absa.spline.producer.rest.ProducerAPI
import za.co.absa.spline.producer.service.repo.ExecutionProducerRepository
import scala.concurrent.{ExecutionContext, Future}
@RestController
@Api(tags = Array("status"))
class StatusController @Autowired()(
val repo: ExecutionProducerRepository) {
import ExecutionContext.Implicits.global
@RequestMapping(
path = Array("/status"),
method = Array(RequestMethod.HEAD))
@ApiOperation(
value = "Server health status",
notes = "Check that producer is running and that the database is accessible and initialized")
@ApiResponses(Array(
new ApiResponse(code = 200, message = "Everything's working"),
new ApiResponse(code = 503, message = "There is a problem")
))
@deprecated("Use liveness probe instead", since = "0.7.0")
def statusHead(): Future[_] = repo
.isDatabaseOk
.map {
if (_) HttpStatus.OK
else HttpStatus.SERVICE_UNAVAILABLE
}
.map {
ResponseEntity
.status(_)
.header(SplineHeaders.ApiVersion, ProducerAPI.SupportedVersions.map(_.asString): _*)
.header(SplineHeaders.ApiLTSVersion, ProducerAPI.LTSVersions.map(_.asString): _*)
.header(SplineHeaders.AcceptRequestEncoding, Encoding.GZIP)
.build()
}
}
| AbsaOSS/spline | producer-rest-core/src/main/scala/za/co/absa/spline/producer/rest/controller/StatusController.scala | Scala | apache-2.0 | 2,259 |
package org.coursera.naptime.courier
import org.junit.Test
import org.scalatest.junit.AssertionsForJUnit
class CourierSerializerTest extends AssertionsForJUnit {
import CourierTestFixtures._
@Test
def testRecordTemplates(): Unit = {
val mock = CourierSerializer.read[TypedDefinitionRecord](typedDefinitionJson)
val roundTripped = CourierSerializer.write(mock)
assert(
CourierSerializer.read[TypedDefinitionRecord](roundTripped) ===
CourierSerializer.read[TypedDefinitionRecord](typedDefinitionJson))
}
@Test
def testUnionTemplates(): Unit = {
val mock = CourierSerializer.readUnion[MockTyperefUnion](mockTyperefUnionJson)
val roundTripped = CourierSerializer.writeUnion(mock)
assert(
CourierSerializer.readUnion[MockTyperefUnion](roundTripped) ===
CourierSerializer.readUnion[MockTyperefUnion](mockTyperefUnionJson))
}
}
| vkuo-coursera/naptime | naptime-models/src/test/scala/org/coursera/naptime/courier/CourierSerializerTest.scala | Scala | apache-2.0 | 891 |
package me.yingrui.segment.core
import me.yingrui.segment.conf.SegmentConfiguration
import me.yingrui.segment.dict.{DictionaryService, IWord}
import me.yingrui.segment.graph._
import me.yingrui.segment.pinyin.WordToPinyinClassfierFactory
class MPSegment(config: SegmentConfiguration, dictionaryService: DictionaryService) {
private var dijk: IShortestPath = null
private var graph: IGraph = null
private var posTagging: IPOSRecognizer = null
private val withPinyin: Boolean = config.isWithPinyin()
private val conceptRecognizer: IConceptRecognizer = new SimpleConceptRecognizer()
private var lastSection: Boolean = false
private var lastSectionStr: String = ""
initialize()
private def initialize() {
initializePOSTagging()
}
private def initializeGraph(size: Int) {
graph = new Graph(size)
if (config.get("segment.bigram") != "") {
dijk = new BigramDijkstra(size, WordBigram(config.get("segment.bigram")))
} else {
dijk = new DijkstraImpl(size)
}
}
private def initializePOSTagging() {
posTagging = new POSRecognizer()
}
def segmentMP(sentence: String, withPOS: Boolean): SegmentResult = {
if (sentence == null || sentence.length() < 1) {
return null
}
lastSectionStr = ""
val totalLength = sentence.length()
var result: SegmentResult = null
if (totalLength < 1023) {
result = segment(sentence, withPOS, false)
} else {
lastSection = false
result = new SegmentResult(0)
var startIndex = 0
while (startIndex < totalLength) {
val section = getSection(sentence, startIndex)
startIndex += section.length()
lastSection = startIndex == totalLength
val sectionResult = segment(section, withPOS, true)
result.append(sectionResult)
if (!lastSection && lastSectionStr.length() > 0) {
startIndex -= lastSectionStr.length()
}
}
}
result
}
private def buildGraph(sen: String, startPos: Int) {
val builder = new GraphBuilder(graph, config, dictionaryService)
builder.buildGraph(sen, startPos)
}
private def buildSegmentResult(path: Path): SegmentResult = {
val length = path.getLength()
val wordStartAts = new Array[Int](length)
val wordEndAts = new Array[Int](length)
val wordNames = new Array[String](length)
val domainTypes = new Array[Int](length)
if (length < 1) {
return null
}
val segmentResult = new SegmentResult(length)
for (index <- 0 until length) {
wordStartAts(index) = path.iget(index) - 1
wordEndAts(index) = path.iget(index + 1) - 1
val word = graph.getEdgeObject(path.iget(index), path.iget(index + 1))
wordNames(index) = word.getWordName()
domainTypes(index) = word.getDomainType()
}
segmentResult.setWords(wordNames)
segmentResult.setDomainTypes(domainTypes)
segmentResult.setWordStartAts(wordStartAts)
segmentResult.setWordEndAts(wordEndAts)
segmentResult
}
private def getSection(sentence: String, startIndex: Int): String = {
var sectionedSentence: String = null
if (sentence.length() - startIndex >= 1000) {
sectionedSentence = sentence.substring(startIndex, startIndex + 1000)
} else {
sectionedSentence = sentence.substring(startIndex)
}
sectionedSentence
}
private def lookupStopVertex(sentence: String): Int = {
val length = sentence.length()
lastSectionStr = ""
var endVertex = -2
if (!lastSection) {
endVertex = graph.getStopVertex(length - 20, length)
if (endVertex > 1 && endVertex > length - 20 && endVertex < length) {
lastSectionStr = sentence.substring(endVertex - 1)
} else {
lastSectionStr = ""
endVertex = length + 1
}
} else {
endVertex = length + 1
}
endVertex
}
private def getShortestPathToStopVertex(sentence: String, sectionSegment: Boolean): Path = {
buildGraph(sentence, 0)
val sentenceLength = sentence.length()
dijk.setGraph(graph)
var p: Path = null
if (!sectionSegment) {
p = dijk.getShortestPath(1, sentenceLength + 1)
} else {
val stopVertex = lookupStopVertex(sentence)
if (stopVertex > 1) {
p = dijk.getShortestPath(1, stopVertex)
} else {
p = dijk.getShortestPath(1, sentenceLength + 1)
}
}
p
}
private def segment(sentence: String, withPOS: Boolean, sectionSegment: Boolean): SegmentResult = {
initializeGraph(sentence.length + 2)
val path = getShortestPathToStopVertex(sentence, sectionSegment)
val result = buildSegmentResult(path)
if (withPinyin) {
WordToPinyinClassfierFactory().getClassifier().classify(result)
}
if (withPOS) {
result.setPOSArray(posTagging.findPOS(path, graph))
setConcepts(result, path)
}
result
}
private def setConcepts(result: SegmentResult, path: Path) {
val length = path.getLength()
if (length == 0) {
return
}
val words = new Array[IWord](length)
val posArray = new Array[Int](length)
for (index <- 0 until length) {
words(index) = graph.getEdgeObject(path.iget(index), path.iget(index + 1))
posArray(index) = result.getPOS(index)
}
conceptRecognizer.reset()
conceptRecognizer.setPosArray(posArray)
conceptRecognizer.setWordArray(words)
result.setConcepts(conceptRecognizer.getConcepts())
}
}
| yingrui/mahjong | lib-segment/src/main/scala/me/yingrui/segment/core/MPSegment.scala | Scala | gpl-3.0 | 5,606 |
package net.tyler.sopwith
object Configuration {
val LOG = "Sopwith"
val NO_SPLASH = false
val SPLASH_SCREEN_MS = 2000
val GAME_WIDTH = 480
val GAME_HEIGHT = 320
val PLANE_WIDTH = 99
val PLANE_HEIGHT = 51
val MAX_PLANE_VELOCITY = 20f
val BOMB_ACCELERATION = -9.8f
val BOMB_RADIUS = 5f
val INIT_BOMBS = 5
} | DaveTCode/SopwithLibgdx | SopwithCoreProject/src/net/tyler/sopwith/Configuration.scala | Scala | mit | 352 |
package antonkulaga.projects.switch
import rx.core.{Rx, Var}
trait ToggleSwitchInit {
lazy val lacI_mRNA_start = Var(0.0)
lazy val tetR_mRNA_start = Var(0.0)
lazy val lacI_start = Var(0.0)
lazy val tetR_start = Var(0.0)
lazy val initialConditions = Rx{ Array(lacI_mRNA_start(), tetR_mRNA_start(), lacI_start(), tetR_start()) }
}
| antonkulaga/personal | app/shared/src/main/scala/antonkulaga/projects/switch/ToggleSwitchInit.scala | Scala | mpl-2.0 | 342 |
package app.services.cache
object LDAPConnectionCache {
val cache = new EhCacheProvider("ldap-connection")
}
| YoshinoriN/Credentiam | src/app/services/cache/LDAPConnectionCache.scala | Scala | apache-2.0 | 114 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
import com.intellij.testFramework.EditorTestUtil
/**
* mattfowler
* 4/28/16
*/
class FindAndMapToApplyTest extends OperationsOnCollectionInspectionTest {
import EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START}
override protected val classOfInspection: Class[FindAndMapToApplyInspection] =
classOf[FindAndMapToApplyInspection]
override protected val hint: String =
"Replace find and map with apply"
def test_inline_map() {
val selected = s"Map().${START}find(_ == 1).map(_._2)$END"
checkTextHasError(selected)
val text = "Map().find(_ == 1).map(_._2)"
val result = "Map()(1)"
testQuickFix(text, result, hint)
}
def test_with_map_as_val() = {
val selected =
s"""val m = Map("k" -> "5", "v" -> "6")
m.${START}find(_ == "5").map(_._2)$END"""
checkTextHasError(selected)
val text =
s"""val m = Map("k" -> "5", "v" -> "6")
m.find(_ == "5").map(_._2)""".stripMargin
val result =
s"""val m = Map("k" -> "5", "v" -> "6")
m("5")""".stripMargin
testQuickFix(text, result, hint)
}
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/codeInspection/collections/FindAndMapToApplyTest.scala | Scala | apache-2.0 | 1,203 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.client
import java.nio.charset.Charset
import akka.util.{ByteString, Timeout}
import org.apache.toree.communication.ZMQMessage
import org.apache.toree.kernel.protocol.v5._
import org.apache.toree.kernel.protocol.v5.content.ExecuteRequest
import org.apache.toree.utils.LogLike
import play.api.data.validation.ValidationError
import play.api.libs.json.{JsPath, Json, Reads}
import scala.concurrent.duration._
object Utilities extends LogLike {
//
// NOTE: This is brought in to remove feature warnings regarding the use of
// implicit conversions regarding the following:
//
// 1. ByteStringToString
// 2. ZMQMessageToKernelMessage
//
import scala.language.implicitConversions
private val sessionId: UUID = java.util.UUID.randomUUID().toString
/**
* This timeout needs to be defined for the Akka asks to timeout
*/
implicit val timeout = Timeout(21474835.seconds) // Maximum delay
implicit def ByteStringToString(byteString : ByteString) : String = {
new String(byteString.toArray, Charset.forName("UTF-8"))
}
implicit def StringToByteString(string : String) : ByteString = {
ByteString(string.getBytes)
}
implicit def ZMQMessageToKernelMessage(message: ZMQMessage): KernelMessage = {
val delimiterIndex: Int =
message.frames.indexOf(ByteString("<IDS|MSG>".getBytes))
// TODO Handle the case where there is no delimeter
val ids: Seq[String] =
message.frames.take(delimiterIndex).map(
(byteString : ByteString) => { new String(byteString.toArray) }
)
val header = Json.parse(message.frames(delimiterIndex + 2)).as[Header]
val parentHeader = Json.parse(message.frames(delimiterIndex + 3)).validate[ParentHeader].fold[ParentHeader](
// TODO: Investigate better solution than setting parentHeader to null for {}
(invalid: Seq[(JsPath, Seq[ValidationError])]) => null, //HeaderBuilder.empty,
(valid: ParentHeader) => valid
)
val metadata = Json.parse(message.frames(delimiterIndex + 4)).as[Metadata]
KMBuilder().withIds(ids.toList)
.withSignature(message.frame(delimiterIndex + 1))
.withHeader(header)
.withParentHeader(parentHeader)
.withMetadata(metadata)
.withContentString(message.frame(delimiterIndex + 5)).build(false)
}
implicit def KernelMessageToZMQMessage(kernelMessage : KernelMessage) : ZMQMessage = {
val frames: scala.collection.mutable.ListBuffer[ByteString] = scala.collection.mutable.ListBuffer()
kernelMessage.ids.map((id : String) => frames += id )
frames += "<IDS|MSG>"
frames += kernelMessage.signature
frames += Json.toJson(kernelMessage.header).toString()
frames += Json.toJson(kernelMessage.parentHeader).toString()
frames += Json.toJson(kernelMessage.metadata).toString
frames += kernelMessage.contentString
ZMQMessage(frames : _*)
}
def parseAndHandle[T](json: String, reads: Reads[T], handler: T => Unit) : Unit = {
Json.parse(json).validate[T](reads).fold(
(invalid: Seq[(JsPath, Seq[ValidationError])]) =>
logger.error(s"Could not parse JSON, ${json}"),
(content: T) => handler(content)
)
}
def getSessionId = sessionId
def toKernelMessage(message: ExecuteRequest): KernelMessage = {
// construct a kernel message whose content is an ExecuteRequest
val id = java.util.UUID.randomUUID().toString
val header = Header(
id, "spark", sessionId, MessageType.Incoming.ExecuteRequest.toString, "5.0")
KMBuilder().withIds(Seq[String]()).withSignature("").withHeader(header)
.withParentHeader(HeaderBuilder.empty).withContentString(message).build
}
}
| asorianostratio/incubator-toree | client/src/main/scala/org/apache/toree/kernel/protocol/v5/client/Utilities.scala | Scala | apache-2.0 | 4,563 |
package epic
package preprocess
import java.io.InputStream
import java.net.URL
import de.l3s.boilerpipe.extractors.ArticleExtractor
import epic.slab.Slab
import epic.trees.Span
import org.apache.tika.Tika
import org.apache.tika.io.TikaInputStream
import org.apache.tika.metadata.Metadata
import org.apache.tika.parser.html.BoilerpipeContentHandler
import org.apache.tika.parser.{ParseContext, Parser}
import org.apache.tika.sax.ToTextContentHandler
import org.xml.sax._
/**
* Just a simple thing for me to learn Tika
*
* @author dlwh
**/
object TextExtractor {
def extractText(url: URL, extractMainContentOnly: Boolean = true) = loadSlab(url, extractMainContentOnly).content
def loadSlab(url: URL, extractMainContentOnly: Boolean = true) = {
val newLineTags = Set(
"address",
"blockquote",
"div",
"dl",
"fieldset",
"form",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"hr",
"noscript",
"ol",
"p",
"pre",
"table",
"ul",
"dd",
"dt",
"li",
"tbody",
"td",
"tfoot",
"th",
"thead",
"tr",
// html5
"article",
"aside",
"audio",
"canvas",
"figcaption",
"figure",
"header",
"hgroup",
"output",
"section",
"video"
)
val textHandler = new ToTextContentHandler() {
override def ignorableWhitespace(ch: Array[Char], start: Int, length: Int): Unit = characters(ch, start, length)
override def startElement(uri: String, localName: String, qName: String, attributes: Attributes): Unit = {
super.startElement(uri, localName, qName, attributes)
if (newLineTags(qName.toLowerCase)) {
ignorableWhitespace(Array('\\n'), 0, 1)
}
}
override def endElement(uri: String, localName: String, qName: String): Unit = {
super.endElement(uri, localName, qName)
if (newLineTags(qName.toLowerCase)) {
ignorableWhitespace(Array('\\n'), 0, 1)
}
}
}
val handler = if(extractMainContentOnly) {
new BoilerpipeContentHandler(textHandler, ArticleExtractor.getInstance()) {
// stupid handler doesn't pass whitespace
/*
override def ignorableWhitespace(ch: Array[Char], start: Int, length: Int): Unit = {
try {
characters(ch, start, length)
} catch {
case ex: ArrayIndexOutOfBoundsException =>
}
}
*/
setIncludeMarkup(true)
}
} else {
textHandler
}
val tk = new Tika()
val parser = tk.getParser
val metadata: Metadata = new Metadata
val stream: InputStream = TikaInputStream.get(url, metadata)
try {
val context: ParseContext = new ParseContext
context.set(classOf[Parser], parser)
parser.parse(stream, handler, metadata, context)
} finally {
stream.close()
}
val content = textHandler.toString.trim
Slab(content).addLayer(Span(0, content.length) -> epic.slab.Source(url))
}
/* TODO: I'd like to be able to keep the XHTML formatting in the text, but right now that looks like it's going to
cause problems with the way slabs work. (Namely, we'll get discontiguous blocks of text, even in the middle of words.
* Uses boilerpipe to extract the content from an XHTML document
* @return
case class Content(labels: Set[String] = Set.empty)
def loadSlab(url: URL):StringSlab[Content] = {
val originalxhtml = extractXHTML(url)
val doc = new BoilerpipeSAXInput(new InputSource(new StringReader(originalxhtml.toString))).getTextDocument
ArticleExtractor.getInstance().process(doc)
val textElements = doc.getTextBlocks.asScala.collect { case block if block.isContent =>
block.getContainedTextElements
}.foldLeft(new java.util.BitSet()) {(a,b) => a |= b; a}
val sb = new StringBuilder()
val contents = new ArrayBuffer[(Span, Content)]()
val handler = new DefaultHandler {
var index = 0
override def startElement(uri: String, localName: String, qName: String, attributes: Attributes): Unit = {
sb.append('<')
sb.append(qName)
for(i <- 0 until attributes.getLength) {
val attr: String = attributes.getQName(i)
val value: String = attributes.getValue(i)
sb.append(' ')
sb.append(attr)
sb.append("=\\"")
sb.append(StringEscapeUtils.escapeXml11(value))
sb.append("\\"")
}
sb.append('>')
}
override def endElement(uri: String, localName: String, qName: String): Unit = {
sb.append("</")
sb.append(qName)
sb.append('>')
}
override def ignorableWhitespace(ch: Array[Char], start: Int, length: Int): Unit = {
sb.appendAll(ch, start, length)
}
override def characters(ch: Array[Char], start: Int, length: Int): Unit = {
index += 1
val s = sb.length
val value = new String(ch, start, length)
sb.append(StringEscapeUtils.escapeXml11(value))
if(textElements(index)) {
contents.append(Span(s, sb.length) -> Content(getLabelsForTextElement(doc, index)))
}
}
}
val parser = new AbstractSAXParser(new HTMLConfiguration) {setContentHandler(handler)}
parser.parse(new InputSource(new StringReader(originalxhtml.toString())))
val content = sb.toString
Slab(content)++contents
}
private def getLabelsForTextElement(doc: TextDocument, index: Int): Set[String] = {
doc.getTextBlocks.asScala.find(_.getContainedTextElements.get(index)).map(b => Option(b.getLabels).map(_.asScala).iterator.flatten.toSet).getOrElse(Set.empty)
}
*/
def extractXHTML(url: URL) = {
val metadata = new Metadata()
val stream: InputStream = TikaInputStream.get(url, metadata)
val loader = new Loader()
new Tika().getParser.parse(stream, loader, metadata, new ParseContext)
loader.value
}
def foo(url: URL)= {
ArticleExtractor.INSTANCE.getText(url)
}
import org.xml.sax._
import org.xml.sax.helpers.DefaultHandler
import scala.xml._
import scala.xml.factory.XMLLoader
class Loader extends DefaultHandler with XMLLoader[Elem] {
val newAdapter = adapter
def value = newAdapter.rootElem.asInstanceOf[Elem]
override def characters( ch: Array[Char], start: Int, length: Int) {
newAdapter.characters(ch, start, length)
}
override def endDocument() {
newAdapter.endDocument()
// the pdf parser sends two end documents...
if(newAdapter.scopeStack.nonEmpty)
newAdapter.scopeStack.pop()
}
override def endElement(uri: String, localName: String, qName: String) {
newAdapter.endElement(uri, localName, qName)
}
override def processingInstruction(target: String, data: String) {
newAdapter.processingInstruction(target, data)
}
override def startDocument() {
newAdapter.scopeStack push TopScope
newAdapter.startDocument()
}
override def startElement(uri: String, localName: String, qName: String, atts: Attributes) {
newAdapter.startElement(uri, localName, qName, atts)
}
override def ignorableWhitespace(ch: Array[Char], start: Int, length: Int): Unit = {
characters(ch, start, length)
}
}
}
| bitemyapp/epic | src/main/scala/epic/preprocess/TextExtractor.scala | Scala | apache-2.0 | 7,389 |
package io.bst.index
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import com.sksamuel.elastic4s.ElasticClient
import com.sksamuel.elastic4s.ElasticDsl._
import io.bst.content.ContentFixture
import io.bst.ext.ElasticSearch
import io.bst.model.Protocol.IndexedContent.Operation
import io.bst.model.Protocol.{IndexedContent, IndexContent}
import io.bst.user.User
import java.util.UUID
import org.scalatest.{BeforeAndAfterAll, Matchers, FlatSpecLike}
import scala.concurrent.blocking
import scala.concurrent.duration._
class IndexerSpec extends TestKit(ActorSystem("IndexerSpec"))
with ImplicitSender
with FlatSpecLike
with Matchers
with BeforeAndAfterAll {
def withElasticSearch(testCode: (ElasticClient, ElasticSearch) => Any) {
val client = ElasticClient.local
val es = ElasticSearch(client, User(UUID.randomUUID(), "foobar", "[email protected]"))
try {
testCode(client, es)
} finally {
client.sync.execute {
deleteIndex(es.uid)
}
blocking {
client.shutdown
}
}
}
def withContentFixture(testCode: ContentFixture => Any) {
try testCode(new ContentFixture {})
}
override def afterAll() {
system.shutdown()
system.awaitTermination(10.seconds)
}
"An indexer" should "create an index entry for a new content item" in withElasticSearch {
(client, es) => withContentFixture {
(contentFixture) => {
val indexer = system.actorOf(Indexer.props(es))
indexer ! IndexContent(contentFixture.provider, contentFixture.content)
val ic = expectMsgType[IndexedContent]
ic.content shouldBe contentFixture.content
ic.operation shouldBe Operation.Created
val getResponse = client.sync.execute { get id ic.content.id from es.indexName fields "excerpt"}
getResponse.isExists shouldBe true
getResponse.getField("excerpt").getValue shouldBe contentFixture.content.excerpt
}
}
}
it should "update an index entry for an existing content item" in withElasticSearch {
(client, es) => withContentFixture {
(contentFixture) => {
val indexer = system.actorOf(Indexer.props(es))
indexer ! IndexContent(contentFixture.provider, contentFixture.content)
expectMsgType[IndexedContent]
val changedContent = contentFixture.content.copy(excerpt = "Changed Content")
indexer ! IndexContent(contentFixture.provider, changedContent)
val ic = expectMsgType[IndexedContent]
ic.content shouldBe changedContent
ic.operation shouldBe Operation.Updated
val getResponse = client.sync.execute { get id ic.content.id from es.indexName fields "excerpt"}
getResponse.isExists shouldBe true
getResponse.getField("excerpt").getValue shouldBe "Changed Content"
}
}
}
}
| bst-cave/core | src/test/scala/io/bst/index/IndexerSpec.scala | Scala | apache-2.0 | 2,941 |
package cbt
import java.time._
/**
* This represents a logger with namespaces that can be enabled or disabled as needed. The
* namespaces are defined using {{enabledLoggers}}. Possible values are defined in the subobject
* "names".
*
* We can replace this with something more sophisticated eventually.
*/
case class Logger(enabledLoggers: Set[String]) {
def this(enabledLoggers: Option[String]) = this( enabledLoggers.toVector.flatMap( _.split(",") ).toSet )
val start = LocalTime.now()
def log(name: String, msg: => String) = {
val timeTaken = (Duration.between(start, LocalTime.now()).toMillis.toDouble / 1000).toString
System.err.println( s"[${" "*(6-timeTaken.size)}$timeTaken][$name] $msg" )
}
def showInvocation(method: String, args: Any) = method ++ "( " ++ args.toString ++ " )"
final def stage1(msg: => String) = logGuarded(names.stage1, msg)
final def stage2(msg: => String) = logGuarded(names.stage2, msg)
final def loop(msg: => String) = logGuarded(names.loop, msg)
final def task(msg: => String) = logGuarded(names.task, msg)
final def composition(msg: => String) = logGuarded(names.composition, msg)
final def resolver(msg: => String) = logGuarded(names.resolver, msg)
final def lib(msg: => String) = logGuarded(names.lib, msg)
final def test(msg: => String) = logGuarded(names.test, msg)
private object names{
val stage1 = "stage1"
val stage2 = "stage2"
val loop = "loop"
val task = "task"
val resolver = "resolver"
val composition = "composition"
val lib = "lib"
val test = "test"
}
private def logGuarded(name: String, msg: => String) = {
if(
(enabledLoggers contains name)
|| (enabledLoggers contains "all")
){
log(name, msg)
}
}
}
| tobias-johansson/cbt | stage1/logger.scala | Scala | bsd-2-clause | 1,769 |
package coursier.bootstrap.launcher.credentials
import utest._
import scala.jdk.CollectionConverters._
import scala.compat.java8.OptionConverters._
import java.io.File
object FileCredentialsParseTests extends TestSuite {
val tests = Tests {
test {
val credFilePath = Option(getClass.getResource("/bootstrap-credentials.properties"))
.map(u => new File(u.toURI).getAbsolutePath)
.getOrElse {
throw new Exception("bootstrap-credentials.properties resource not found")
}
val credFile = new File(credFilePath)
assert(credFile.exists())
val parsed = new FileCredentials(credFilePath)
.get()
.asScala
.sortBy(_.getUsernameOpt.asScala.getOrElse(""))
val expected = Seq(
new DirectCredentials("127.0.0.1", "secure", "sEcUrE", "secure realm")
.withOptional(true)
.withHttpsOnly(true),
new DirectCredentials("127.0.0.1", "simple", "SiMpLe", "simple realm")
.withOptional(true)
.withHttpsOnly(false)
)
assert(parsed == expected)
}
}
}
| alexarchambault/coursier | modules/bootstrap-launcher/src/test/scala/coursier/bootstrap/launcher/credentials/FileCredentialsParseTests.scala | Scala | apache-2.0 | 1,102 |
/*
Copyright 2012 Georgia Tech Research Institute
Author: [email protected]
This file is part of org.gtri.util.iteratee library.
org.gtri.util.iteratee library is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
org.gtri.util.iteratee library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with org.gtri.util.iteratee library. If not, see <http://www.gnu.org/licenses/>.
*/
package org.gtri.util.iteratee.impl.test
import org.gtri.util.iteratee.api._
import org.gtri.util.iteratee.impl.iteratees._
/**
* Created with IntelliJ IDEA.
* User: Lance
* Date: 11/12/12
* Time: 5:25 PM
* To change this template use File | Settings | File Templates.
*/
class TestIntegerBuilder extends Iteratee[Int, Int] {
case class Cont(acc: Int) extends SingleItemCont[Int, Int] {
def apply(item: Int) = {
if(acc == 11) {
Failure()
} else {
Result(next = Cont(item + acc))
}
}
def endOfInput() = Success(acc)
}
def initialState = Cont(0)
}
| gtri-iead/org.gtri.util.iteratee | impl/src/main/scala/org/gtri/util/iteratee/impl/test/TestIntBuilder.scala | Scala | gpl-3.0 | 1,522 |