code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package se.lu.nateko.cp.data.formats
import scala.util.matching.Regex
class ColumnsMeta(val columns: Seq[ColumnMeta]) {
val plainCols: Map[String, ValueFormat] = columns
.collect { case PlainColumn(format, title, _) => title -> format }
.toMap
private val regexCols = columns.collect { case rc: RegexColumn => rc }
val hasAnyRegexCols: Boolean = regexCols.nonEmpty
val hasOptionalColumns: Boolean = columns.exists(_.isOptional)
def matchColumn(colTitle: String): Option[ValueFormat] = plainCols.get(colTitle).orElse {
regexCols.find(_.matches(colTitle)).map(_.format)
}
def matchesColumn(colTitle: String): Boolean =
plainCols.contains(colTitle) || regexCols.exists(_.matches(colTitle))
def findMissingColumns(actualColumns: Seq[String]): Seq[ColumnMeta] =
columns.filter(c => !c.isOptional && !actualColumns.exists(c.matches))
}
sealed trait ColumnMeta {
def format: ValueFormat
def matches(colTitle: String): Boolean
def isOptional: Boolean
}
case class PlainColumn(format: ValueFormat, title: String, isOptional: Boolean) extends ColumnMeta {
def matches(colTitle: String): Boolean = title == colTitle
override def toString = title
}
case class RegexColumn(format: ValueFormat, regex: Regex, isOptional: Boolean) extends ColumnMeta {
def matches(colTitle: String): Boolean = regex.findFirstIn(colTitle).isDefined
override def toString = regex.toString
}
case class ColumnsMetaWithTsCol(colsMeta: ColumnsMeta, timeStampColumn: String)
| ICOS-Carbon-Portal/data | src/main/scala/se/lu/nateko/cp/data/formats/ColumnsMeta.scala | Scala | gpl-3.0 | 1,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.v2
import scala.collection.JavaConverters._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.encoders.{ExpressionEncoder, RowEncoder}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.LeafExecNode
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.types.StructType
case class DataSourceV2ScanExec(
fullOutput: Array[AttributeReference],
@transient reader: DataSourceV2Reader,
// TODO: these 3 parameters are only used to determine the equality of the scan node, however,
// the reader also have this information, and ideally we can just rely on the equality of the
// reader. The only concern is, the reader implementation is outside of Spark and we have no
// control.
readSchema: StructType,
@transient filters: ExpressionSet,
hashPartitionKeys: Seq[String]) extends LeafExecNode {
def output: Seq[Attribute] = readSchema.map(_.name).map { name =>
fullOutput.find(_.name == name).get
}
override def references: AttributeSet = AttributeSet.empty
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override protected def doExecute(): RDD[InternalRow] = {
val readTasks: java.util.List[ReadTask[UnsafeRow]] = reader match {
case r: SupportsScanUnsafeRow => r.createUnsafeRowReadTasks()
case _ =>
reader.createReadTasks().asScala.map {
new RowToUnsafeRowReadTask(_, reader.readSchema()): ReadTask[UnsafeRow]
}.asJava
}
val inputRDD = new DataSourceRDD(sparkContext, readTasks)
.asInstanceOf[RDD[InternalRow]]
val numOutputRows = longMetric("numOutputRows")
inputRDD.map { r =>
numOutputRows += 1
r
}
}
}
class RowToUnsafeRowReadTask(rowReadTask: ReadTask[Row], schema: StructType)
extends ReadTask[UnsafeRow] {
override def preferredLocations: Array[String] = rowReadTask.preferredLocations
override def createReader: DataReader[UnsafeRow] = {
new RowToUnsafeDataReader(rowReadTask.createReader, RowEncoder.apply(schema))
}
}
class RowToUnsafeDataReader(rowReader: DataReader[Row], encoder: ExpressionEncoder[Row])
extends DataReader[UnsafeRow] {
override def next: Boolean = rowReader.next
override def get: UnsafeRow = encoder.toRow(rowReader.get).asInstanceOf[UnsafeRow]
override def close(): Unit = rowReader.close()
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/DataSourceV2ScanExec.scala | Scala | apache-2.0 | 3,431 |
package com.seanshubin.builder.domain
import scala.concurrent.{ExecutionContext, Future}
class ExecutionContextFutureRunner(notifyOfException: Throwable => Unit)
(implicit executionContext: ExecutionContext) extends FutureRunner {
override def runInFuture[T](block: => T): Future[T] = {
Future {
try {
block
} catch {
case ex: Throwable =>
notifyOfException(ex)
throw ex
}
}
}
}
| SeanShubin/builder | domain/src/main/scala/com/seanshubin/builder/domain/ExecutionContextFutureRunner.scala | Scala | unlicense | 481 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.utils
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.implicitConversions
object `package` {
/** Concats two array - fast way */
def concat[T](a1: Array[T], a2: Array[T])(implicit m: Manifest[T]): Array[T] = {
var i, j = 0
val result = new Array[T](a1.length + a2.length)
while (i < a1.length) {
result(i) = a1(i)
i = i + 1
}
while (j < a2.length) {
result(i + j) = a2(j)
j = j + 1
}
result
}
/** Makes an option of the value matching the condition. */
def option[T](cond: => Boolean, value: => T): Option[T] = (if (cond) Some(value) else None)
}
case class LazyLogger(logger: org.apache.logging.log4j.Logger) {
def trace(s: => String) { if (logger.isTraceEnabled) logger.trace(s) }
def trace(s: => String, e: => Throwable) { if (logger.isTraceEnabled) logger.trace(s, e) }
def debug(s: => String) { if (logger.isDebugEnabled) logger.debug(s) }
def debug(s: => String, e: => Throwable) { if (logger.isDebugEnabled) logger.debug(s, e) }
def info(s: => String) { if (logger.isInfoEnabled) logger.info(s) }
def info(s: => String, e: => Throwable) { if (logger.isInfoEnabled) logger.info(s, e) }
def warn(s: => String) { if (logger.isWarnEnabled) logger.warn(s) }
def warn(s: => String, e: => Throwable) { if (logger.isWarnEnabled) logger.warn(s, e) }
def error(s: => String) { if (logger.isErrorEnabled) logger.error(s) }
def error(s: => String, e: => Throwable) { if (logger.isErrorEnabled) logger.error(s, e) }
}
object LazyLogger {
def apply(logger: String): LazyLogger = LazyLogger(org.apache.logging.log4j.LogManager.getLogger(logger))
}
case class EitherMappableFuture[A](future: Future[A]) {
def mapEither[E <: Throwable, B](f: A => Either[E, B])(implicit ec: ExecutionContext) = {
future.flatMap(
f(_) match {
case Left(e) => Future.failed(e)
case Right(b) => Future.successful(b)
})
}
}
object EitherMappableFuture {
implicit def futureToEitherMappable[A](future: Future[A]): EitherMappableFuture[A] = EitherMappableFuture(future)
}
object ExtendedFutures {
import akka.actor.{ ActorSystem, Scheduler }
// better way to this?
def DelayedFuture(millis: Long, system: ActorSystem): Future[Unit] = {
implicit val ec = system.dispatcher
val promise = Promise[Unit]()
system.scheduler.scheduleOnce(Duration.apply(millis, "millis"))(promise.success(()))
promise.future
}
}
| reactific/ReactiveMongo | driver/src/main/scala/utils.scala | Scala | apache-2.0 | 3,116 |
/*
Copyright 2015 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.serialization
import org.scalacheck.Arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop
import org.scalacheck.Prop.forAll
import org.scalacheck.Gen
import org.scalacheck.Prop._
import JavaStreamEnrichments._
import java.io._
import scala.util.{ Try, Success }
object LawTester {
def apply[T: Arbitrary](base: String, laws: Iterable[Law[T]]): Properties =
new LawTester(implicitly[Arbitrary[T]].arbitrary, base, laws) {}
}
abstract class LawTester[T](g: Gen[T], base: String, laws: Iterable[Law[T]]) extends Properties(base) {
laws.foreach {
case Law1(name, fn) => property(name) = forAll(g)(fn)
case Law2(name, fn) => property(name) = forAll(g, g)(fn)
case Law3(name, fn) => property(name) = forAll(g, g, g)(fn)
}
}
object SerializationProperties extends Properties("SerializationProperties") {
import OrderedSerialization.{ resultFrom, CompareFailure, readThenCompare }
implicit val intOrderedSerialization: OrderedSerialization[Int] = new OrderedSerialization[Int] {
def read(in: InputStream) = Try(Reader.read[Int](in))
def write(o: OutputStream, t: Int) = Try(Writer.write[Int](o, t))
def hash(t: Int) = t.hashCode
def compare(a: Int, b: Int) = java.lang.Integer.compare(a, b)
def compareBinary(a: InputStream, b: InputStream) =
readThenCompare(a, b)(this)
val staticSize = Some(4)
def dynamicSize(i: Int) = staticSize
}
implicit val stringOrdSer: OrderedSerialization[String] = new StringOrderedSerialization
class IntWrapperClass(val x: Int)
implicit val myIntWrapperOrdSer: OrderedSerialization[IntWrapperClass] =
OrderedSerialization.viaTransform[IntWrapperClass, Int](_.x, new IntWrapperClass(_))
class IntTryWrapperClass(val x: Int)
implicit val myTryIntWrapperOrdSer: OrderedSerialization[IntTryWrapperClass] =
OrderedSerialization.viaTryTransform[IntTryWrapperClass, Int](_.x, { x: Int => Success(new IntTryWrapperClass(x)) })
implicit val arbIntWrapperClass: Arbitrary[IntWrapperClass] =
Arbitrary(implicitly[Arbitrary[Int]].arbitrary.map(new IntWrapperClass(_)))
implicit val arbIntTryWrapperClass: Arbitrary[IntTryWrapperClass] =
Arbitrary(implicitly[Arbitrary[Int]].arbitrary.map(new IntTryWrapperClass(_)))
implicit def tuple[A: OrderedSerialization, B: OrderedSerialization]: OrderedSerialization[(A, B)] =
new OrderedSerialization2[A, B](implicitly, implicitly)
def serializeSequenceCompare[T: OrderedSerialization](g: Gen[T]): Prop = forAll(Gen.listOf(g)) { list =>
// make sure the list is even in size:
val pairList = (if (list.size % 2 == 1) list.tail else list).grouped(2)
val baos1 = new ByteArrayOutputStream
val baos2 = new ByteArrayOutputStream
pairList.foreach {
case Seq(a, b) =>
Serialization.write(baos1, a)
Serialization.write(baos2, b)
case _ => sys.error("unreachable")
}
// now the compares must match:
val in1 = baos1.toInputStream
val in2 = baos2.toInputStream
pairList.forall {
case Seq(a, b) =>
OrderedSerialization.compareBinary[T](in1, in2) ==
OrderedSerialization.resultFrom(OrderedSerialization.compare(a, b))
case _ => sys.error("unreachable")
}
}
def serializeSequenceCompare[T: OrderedSerialization: Arbitrary]: Prop =
serializeSequenceCompare[T](implicitly[Arbitrary[T]].arbitrary)
def serializeSequenceEquiv[T: Serialization](g: Gen[T]): Prop = forAll(Gen.listOf(g)) { list =>
// make sure the list is even in size:
val pairList = (if (list.size % 2 == 1) list.tail else list).grouped(2)
val baos1 = new ByteArrayOutputStream
val baos2 = new ByteArrayOutputStream
pairList.foreach {
case Seq(a, b) =>
Serialization.write(baos1, a)
Serialization.write(baos2, b)
case _ => sys.error("unreachable")
}
// now the compares must match:
val in1 = baos1.toInputStream
val in2 = baos2.toInputStream
pairList.forall {
case Seq(a, b) =>
val rta = Serialization.read[T](in1).get
val rtb = Serialization.read[T](in2).get
Serialization.equiv(a, rta) && Serialization.equiv(b, rtb)
case _ => sys.error("unreachable")
}
}
def serializeSequenceEquiv[T: Serialization: Arbitrary]: Prop =
serializeSequenceEquiv[T](implicitly[Arbitrary[T]].arbitrary)
property("sequences compare well [Int]") = serializeSequenceCompare[Int]
property("sequences equiv well [Int]") = serializeSequenceEquiv[Int]
property("sequences compare well [(Int, Int)]") = serializeSequenceCompare[(Int, Int)]
property("sequences equiv well [(Int, Int)]") = serializeSequenceEquiv[(Int, Int)]
property("sequences compare well [String]") = serializeSequenceCompare[String]
property("sequences equiv well [String]") = serializeSequenceEquiv[String]
property("sequences compare well [(String, String)]") = serializeSequenceCompare[(String, String)]
property("sequences equiv well [(String, String)]") = serializeSequenceEquiv[(String, String)]
property("sequences compare well [IntWrapperClass]") = serializeSequenceCompare[IntWrapperClass]
property("sequences compare well [IntTryWrapperClass]") = serializeSequenceCompare[IntTryWrapperClass]
property("sequences equiv well [IntWrapperClass]") = serializeSequenceEquiv[IntWrapperClass]
property("sequences equiv well [IntTryWrapperClass]") = serializeSequenceEquiv[IntTryWrapperClass]
// Test the independent, non-sequenced, laws as well
include(LawTester("Int Ordered", OrderedSerialization.allLaws[Int]))
include(LawTester("(Int, Int) Ordered", OrderedSerialization.allLaws[(Int, Int)]))
include(LawTester("String Ordered", OrderedSerialization.allLaws[String]))
include(LawTester("(String, Int) Ordered", OrderedSerialization.allLaws[(String, Int)]))
include(LawTester("(Int, String) Ordered", OrderedSerialization.allLaws[(Int, String)]))
include(LawTester("(String, String) Ordered", OrderedSerialization.allLaws[(String, String)]))
include(LawTester("IntWrapperClass Ordered", OrderedSerialization.allLaws[IntWrapperClass]))
include(LawTester("IntTryWrapperClass Ordered", OrderedSerialization.allLaws[IntTryWrapperClass]))
}
| tglstory/scalding | scalding-serialization/src/test/scala/com/twitter/scalding/serialization/SerializationProperties.scala | Scala | apache-2.0 | 6,775 |
/*
* This file is part of the Linux Variability Modeling Tools (LVAT).
*
* Copyright (C) 2010 Steven She <[email protected]>
*
* LVAT is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* LVAT is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with LVAT. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package gsd.linux
/**
* Methods for analyzing the hierarchy of a concrete Kconfig model.
*
* @author Steven She ([email protected])
*/
object Hierarchy {
type CParentMap = Map[CConfig, CConfig]
type AParentMap = Map[CSymbol, CConfig]
type HierarchyMap = Map[CSymbol, CSymbol]
/**
* Creates a map containing only of concrete features: Configs and not Menus
* or Choices.
*/
def mkConfigMap(k: ConcreteKConfig): CParentMap =
(mkParentMap(k) collect {
case x@(_:CConfig,_) => x
}).asInstanceOf[Map[CConfig, CConfig]]
/**
* A map from any feature (config, menu and choices) to its closest Config.
* Features that have no config in its ancestors are not present in the
* returned map.
*
* This map contains all features - configs, menus and choices.
*
* FIXME If nodes.
*/
def mkParentMap(k: ConcreteKConfig): AParentMap = {
def _mkTuples(par: Option[CConfig])(curr: CSymbol): List[(CSymbol, CConfig)] =
par match {
case None => curr match {
case c:CConfig => curr.children.flatMap(_mkTuples(Some(c)))
case _ => curr.children.flatMap(_mkTuples(None))
}
//A parent exists - curr's ancestors contains a config.
case Some(p) => curr match {
case c: CConfig =>
(c, p) :: c.children.flatMap(_mkTuples(Some(c)))
case _ =>
(curr, p) :: curr.children.flatMap(_mkTuples(par))
}
}
Map() ++ _mkTuples(None)(k.root)
}
/**
* A map from a feature to its immediate parent.
*/
def mkHierarchyMap(k: ConcreteKConfig): HierarchyMap = {
def _mkTuples(p: CSymbol)(c: CSymbol): List[(CSymbol, CSymbol)] =
(c, p) :: c.children.flatMap(_mkTuples(c))
Map() ++ k.root.children.flatMap(_mkTuples(k.root))
}
def toStringMap(in: Map[CConfig, CConfig], features: Iterable[CConfig], root: String) =
features map
{ f => f -> in.get(f) } map
{
case (x, Some(y)) => x.name -> y.name
case (x, None) => x.name -> root
} toMap
} | leutheus/linux-variability-analysis-tools | src/main/scala/gsd/linux/Hierarchy.scala | Scala | lgpl-3.0 | 2,890 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import slamdata.Predef._
import quasar.api.resource.ResourcePath
import quasar.common.effect.NameGenerator
import quasar.common.JoinType
import quasar.contrib.scalaz.MonadReader_
import quasar.ejson.EJson
import quasar.fp._
import quasar.contrib.iota._
import quasar.fp.ski.κ
import quasar.qscript.{
construction,
educatedToTotal,
Filter,
Hole,
HoleF,
LeftSide,
RightSide,
LeftShift,
JoinSide,
Map,
MonadPlannerErr,
QCE,
QScriptEducated,
Read,
Reduce,
ReduceFuncs,
ReduceIndexF,
ShiftType,
Sort,
SrcHole,
Subset,
ThetaJoin,
Union,
Unreferenced
}
import quasar.qscript.PlannerError.InternalError
import quasar.qscript.provenance.JoinKey
import quasar.qsu.{QScriptUniform => QSU}
import quasar.qsu.QSUGraph.QSUPattern
import quasar.qsu.ReifyIdentities.ResearchedQSU
import matryoshka.{Corecursive, BirecursiveT, CoalgebraM, Recursive, ShowT}
import matryoshka.data._
import matryoshka.patterns.CoEnv
import scalaz.{~>, -\\/, \\/-, \\/, Const, Monad, NonEmptyList, NaturalTransformation, ReaderT}
import scalaz.Scalaz._
import iotaz.CopK
final class Graduate[T[_[_]]: BirecursiveT: ShowT] private () extends QSUTTypes[T] {
type QSE[A] = QScriptEducated[A]
def apply[F[_]: Monad: MonadPlannerErr: NameGenerator](rqsu: ResearchedQSU[T]): F[T[QSE]] = {
type G[A] = ReaderT[F, References, A]
val grad = graduateƒ[G, QSE](None)(NaturalTransformation.refl[QSE])
Corecursive[T[QSE], QSE]
.anaM[G, QSUGraph](rqsu.graph)(grad)
.run(rqsu.refs)
}
////
private type QSU[A] = QScriptUniform[A]
private type RefsR[F[_]] = MonadReader_[F, References]
// We can't use final here due to SI-4440 - it results in warning
private case class SrcMerge[A, B](src: A, lval: B, rval: B)
private val func = construction.Func[T]
private def mergeSources[F[_]: Monad: MonadPlannerErr: NameGenerator: RefsR](
left: QSUGraph,
right: QSUGraph): F[SrcMerge[QSUGraph, FreeQS]] = {
val lvert = left.vertices
val rvert = right.vertices
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def lub(
lefts: Set[Symbol],
rights: Set[Symbol],
visited: Set[Symbol]): Set[Symbol] = {
if (lefts.isEmpty || rights.isEmpty) {
Set()
} else {
val lnodes = lefts.map(lvert)
val rnodes = rights.map(rvert)
val lexp = lnodes.flatMap(_.foldLeft(Set[Symbol]())(_ + _))
val rexp = rnodes.flatMap(_.foldLeft(Set[Symbol]())(_ + _))
val check: Set[Symbol] =
(lexp intersect visited) union
(rexp intersect visited) union
(lexp intersect rexp)
if (!check.isEmpty)
check
else
lub(lexp, rexp, visited.union(lexp).union(rexp))
}
}
val source: Set[Symbol] = if (left.root === right.root)
Set(left.root)
else
lub(Set(left.root), Set(right.root), Set(left.root, right.root))
// we merge the vertices in the result, just in case the graphs are
// additively applied to a common root
val mergedVertices: QSUVerts[T] = left.vertices ++ right.vertices
source.headOption match {
case hole @ Some(root) =>
for {
lGrad <- graduateCoEnv[F](hole, left)
rGrad <- graduateCoEnv[F](hole, right)
} yield SrcMerge(QSUGraph[T](root, mergedVertices), lGrad, rGrad)
case None =>
for {
lGrad <- graduateCoEnv[F](None, left)
rGrad <- graduateCoEnv[F](None, right)
name <- NameGenerator[F].prefixedName("merged")
} yield {
val root: Symbol = Symbol(name)
val newVertices: QSUVerts[T] =
mergedVertices + (root -> QSU.Unreferenced[T, Symbol]())
SrcMerge(QSUGraph[T](root, newVertices), lGrad, rGrad)
}
}
}
private def educate[F[_]: Monad: MonadPlannerErr: NameGenerator: RefsR]
(pattern: QSUPattern[T, QSUGraph])
: F[QSE[QSUGraph]] = pattern match {
case QSUPattern(name, qsu) =>
val MR = MonadReader_[F, References]
def holeAs(sym: Symbol): Hole => Symbol =
κ(sym)
def resolveAccess[A, B](fa: FreeMapA[A])(ex: A => Access[B] \\/ B)(f: B => Symbol)
: F[FreeMapA[B]] =
MR.asks(_.resolveAccess[A, B](name, fa)(f)(ex))
def eqCond(lroot: Symbol, rroot: Symbol): JoinKey[IdAccess] => F[JoinFunc] = {
case JoinKey(l, r) =>
for {
lside <- resolveAccess(func.Hole as Access.id(l, lroot))(_.left)(κ(lroot))
rside <- resolveAccess(func.Hole as Access.id(r, rroot))(_.left)(κ(rroot))
} yield func.Eq(lside >> func.LeftSide, rside >> func.RightSide)
}
qsu match {
case QSU.Read(path, idStatus) =>
CopK.Inject[Const[Read[ResourcePath], ?], QSE].inj(
Const(Read(ResourcePath.leaf(path), idStatus))).point[F]
case QSU.Map(source, fm) =>
QCE(Map[T, QSUGraph](source, fm)).point[F]
case QSU.QSFilter(source, fm) =>
QCE(Filter[T, QSUGraph](source, fm)).point[F]
case QSU.QSReduce(source, buckets, reducers, repair) =>
buckets traverse (resolveAccess(_)(_.left)(holeAs(source.root))) map { bs =>
QCE(Reduce[T, QSUGraph](source, bs, reducers, repair))
}
case QSU.LeftShift(source, struct, idStatus, onUndefined, repair, rot) =>
for {
// Access.value is already resolved, from ReifyIdentities.
// this would be nicer with a tri-state Access type.
resolvedRepair <-
resolveAccess(repair) {
case QSU.ShiftTarget.AccessLeftTarget(access) => access.map[JoinSide](_ => LeftSide).left
case QSU.ShiftTarget.LeftTarget => (LeftSide: JoinSide).right
case QSU.ShiftTarget.RightTarget => (RightSide: JoinSide).right
}(κ(source.root))
shiftType = rot match {
case QSU.Rotation.FlattenArray | QSU.Rotation.ShiftArray =>
ShiftType.Array
case QSU.Rotation.FlattenMap | QSU.Rotation.ShiftMap =>
ShiftType.Map
}
} yield QCE(LeftShift[T, QSUGraph](source, struct, idStatus, shiftType, onUndefined, resolvedRepair))
case QSU.QSSort(source, buckets, order) =>
buckets traverse (resolveAccess(_)(_.left)(holeAs(source.root))) map { bs =>
QCE(Sort[T, QSUGraph](source, bs, order))
}
case QSU.Union(left, right) =>
mergeSources[F](left, right) map {
case SrcMerge(source, lBranch, rBranch) =>
QCE(Union[T, QSUGraph](source, lBranch, rBranch))
}
case QSU.Subset(from, op, count) =>
mergeSources[F](from, count) map {
case SrcMerge(source, fromBranch, countBranch) =>
QCE(Subset[T, QSUGraph](source, fromBranch, op, countBranch))
}
// TODO distinct should be its own node in qscript proper
case QSU.Distinct(source) =>
resolveAccess(HoleF map (Access.value(_)))(_.left)(holeAs(source.root)) map { fm =>
QCE(Reduce[T, QSUGraph](
source,
// Bucket by the value
List(fm),
// Emit the input verbatim as it may include identities.
List(ReduceFuncs.Arbitrary(HoleF)),
ReduceIndexF(\\/-(0))))
}
case QSU.Unreferenced() =>
QCE(Unreferenced[T, QSUGraph]()).point[F]
case QSU.QSAutoJoin(left, right, joinKeys, combiner) =>
val condition = joinKeys.keys.toNel.fold(func.Constant[JoinSide](EJson.bool(true)).point[F]) { jks =>
val mkEq = eqCond(left.root, right.root)
val mkIsect =
(_: NonEmptyList[JoinKey[IdAccess]])
.foldMapRight1(mkEq)((l, r) => (mkEq(l) |@| r)(func.Or(_, _)))
val mkConj =
(_: NonEmptyList[NonEmptyList[JoinKey[IdAccess]]])
.foldMapRight1(mkIsect)((l, r) => (mkIsect(l) |@| r)(func.And(_, _)))
jks.foldMapRight1(mkConj)((l, r) => (mkConj(l) |@| r)(func.Or(_, _)))
}
(mergeSources[F](left, right) |@| condition) {
case (SrcMerge(source, lBranch, rBranch), cond) =>
CopK.Inject[ThetaJoin, QSE].inj(
ThetaJoin[T, QSUGraph](source, lBranch, rBranch, cond, JoinType.Inner, combiner))
}
case QSU.ThetaJoin(left, right, condition, joinType, combiner) =>
mergeSources[F](left, right) map {
case SrcMerge(source, lBranch, rBranch) =>
CopK.Inject[ThetaJoin, QSE].inj(
ThetaJoin[T, QSUGraph](source, lBranch, rBranch, condition, joinType, combiner))
}
case qsu =>
MonadPlannerErr[F].raiseError(
InternalError(s"Found an unexpected LP-ish $qsu.", None)) // TODO use Show to print
}
}
private def graduateƒ[F[_]: Monad: MonadPlannerErr: NameGenerator: RefsR, G[_]](
halt: Option[(Symbol, F[G[QSUGraph]])])(
lift: QSE ~> G)
: CoalgebraM[F, G, QSUGraph] = graph => {
val pattern: QSUPattern[T, QSUGraph] =
Recursive[QSUGraph, QSUPattern[T, ?]].project(graph)
def default: F[G[QSUGraph]] = educate[F](pattern).map(lift)
halt match {
case Some((name, output)) =>
pattern match {
case QSUPattern(`name`, _) => output
case _ => default
}
case None => default
}
}
private def graduateCoEnv[F[_]: Monad: MonadPlannerErr: NameGenerator: RefsR]
(hole: Option[Symbol], graph: QSUGraph)
: F[FreeQS] = {
type CoEnvTotal[A] = CoEnv[Hole, QScriptTotal, A]
val halt: Option[(Symbol, F[CoEnvTotal[QSUGraph]])] =
hole.map((_, CoEnv.coEnv[Hole, QScriptTotal, QSUGraph](-\\/(SrcHole)).point[F]))
val lift: QSE ~> CoEnvTotal =
educatedToTotal[T].inject andThen PrismNT.coEnv[QScriptTotal, Hole].reverseGet
Corecursive[FreeQS, CoEnvTotal].anaM[F, QSUGraph](graph)(
graduateƒ[F, CoEnvTotal](halt)(lift))
}
}
object Graduate {
def apply[
T[_[_]]: BirecursiveT: ShowT,
F[_]: Monad: MonadPlannerErr: NameGenerator]
(rqsu: ResearchedQSU[T])
: F[T[QScriptEducated[T, ?]]] =
taggedInternalError("Graduate", new Graduate[T].apply[F](rqsu))
}
| slamdata/slamengine | qsu/src/main/scala/quasar/qsu/Graduate.scala | Scala | apache-2.0 | 10,961 |
package sjs.react.bootstrap
import japgolly.scalajs.react._
/**
* Created by alberto on 18/02/15.
*/
trait BoostrapMixinProps {
def bsClass: String
def bsStyle: String
def bsSize: String
def getBsClassSet: Map[String, Boolean] ={
var classes= Map.empty[String, Boolean]
if(bsClass.nonEmpty ){
classes += (bsClass -> true)
val prefix= bsClass+"-"
if(bsSize.nonEmpty){
classes += (prefix+bsSize -> true)
}
if(bsStyle.nonEmpty){
classes += (prefix+bsStyle -> true)
}
}
classes
}
}
object BootStrapFunctionUtils {
def createChainedFunction (one:(ReactEvent) => Unit, two:(ReactEvent) => Unit):(ReactEvent) => Unit ={
if(one==null && two == null) return null
if(one!=null) return one
if(two!=null) return two
val f : (ReactEvent) => Unit = {
(event) => {
one(event)
two(event)
}
}
f
}
def createChainedFunction (one:(Any, String, String) => Unit, two:(Any, String, String) => Unit):(Any, String, String) => Unit ={
if(one==null && two == null) return null
if(one!=null) return one
if(two!=null) return two
val f : (Any, String, String) => Unit = {
(key,k1,k2) => {
one(key,k1,k2)
two(key,k1,k2)
}
}
f
}
def joinClasses(className:String, classes:Map[String, Boolean]):String={
(className+classes.filter(_._2).mkString(" ")).trim
}
def joinClasses(className:String, className2:String):String={
s"$className $className2".trim
}
}
| aparo/scalajs-react-extra | react-bootstrap/src/main/scala/sjs/react/bootstrap/BoostrapMixin.scala | Scala | apache-2.0 | 1,540 |
// Lastly, Scala provides a convenience for applying a Function
trait Function1[T, R] {
def apply(x: T): R
}
val f: Int => Boolean = _ > 2
f.apply(3) // true
f(3) // true
| agconti/scala-school | 04-functions-as-values/slides/slide064.scala | Scala | mit | 180 |
package com.twitter.finagle.http.codec
import com.twitter.finagle.Service
import com.twitter.finagle.http._
import com.twitter.finagle.http.exp.{GenSerialServerDispatcher, StreamTransport}
import com.twitter.finagle.stats.{StatsReceiver, RollupStatsReceiver}
import com.twitter.logging.Logger
import com.twitter.util.{Future, Promise, Throwables}
private[http] object HttpServerDispatcher {
val handleHttp10: PartialFunction[Throwable, Response] = {
case _ => Response(Version.Http10, Status.InternalServerError)
}
val handleHttp11: PartialFunction[Throwable, Response] = {
case _ => Response(Version.Http11, Status.InternalServerError)
}
}
private[finagle] class HttpServerDispatcher(
trans: StreamTransport[Response, Request],
underlying: Service[Request, Response],
stats: StatsReceiver)
extends GenSerialServerDispatcher[Request, Response, Response, Request](trans) {
import HttpServerDispatcher._
private[this] val failureReceiver =
new RollupStatsReceiver(stats.scope("stream")).scope("failures")
// Response conformance (length headers, etc) is performed by the `ResponseConformanceFilter`
private[this] val service = ResponseConformanceFilter.andThen(underlying)
trans.onClose.ensure {
service.close()
}
protected def dispatch(req: Request): Future[Response] = {
val handleFn = req.version match {
case Version.Http10 => handleHttp10
case _ => handleHttp11
}
service(req).handle(handleFn)
}
protected def handle(rep: Response): Future[Unit] = {
setKeepAlive(rep, !isClosing)
if (rep.isChunked) {
val p = new Promise[Unit]
val f = trans.write(rep)
f.proxyTo(p)
// This awkwardness is unfortunate but necessary for now as you may be
// interrupted in the middle of a write, or when there otherwise isn’t
// an outstanding read (e.g. read-write race).
f.onFailure { t =>
Logger.get(this.getClass.getName).debug(t, "Failed mid-stream. Terminating stream, closing connection")
failureReceiver.counter(Throwables.mkString(t): _*).incr()
rep.reader.discard()
}
p.setInterruptHandler { case intr =>
rep.reader.discard()
f.raise(intr)
}
p
} else {
trans.write(rep)
}
}
/**
* Set the Connection header as appropriate. This will NOT clobber a 'Connection: close' header,
* allowing services to gracefully close the connection through the Connection header mechanism.
*/
private def setKeepAlive(rep: Response, keepAlive: Boolean): Unit = {
val connectionHeaders = rep.headerMap.getAll(Fields.Connection)
if (connectionHeaders.isEmpty || !connectionHeaders.exists("close".equalsIgnoreCase(_))) {
rep.version match {
case Version.Http10 if keepAlive =>
rep.headerMap.set(Fields.Connection, "keep-alive")
case Version.Http11 if (!keepAlive) =>
// The connection header may contain additional information, so add
// rather than set.
rep.headerMap.add(Fields.Connection, "close")
case _ =>
}
}
}
}
| koshelev/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/codec/HttpServerDispatcher.scala | Scala | apache-2.0 | 3,137 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.sumobot.plugins
import akka.actor.{ActorSystem, Props}
import akka.event.Logging
import com.typesafe.config.ConfigException
import scala.util.{Failure, Success, Try}
trait PluginCollection {
protected def addPlugin(name: String, props: Props)(implicit system: ActorSystem): Unit = {
lazy val log = Logging.getLogger(system, this)
val property = s"plugins.$name.enabled"
Try(system.settings.config.getBoolean(property)) match {
case Success(true) =>
system.actorOf(props, name)
case Success(false) =>
log.debug(s"Plugin $name is disabled.")
case Failure(_: ConfigException.Missing) =>
log.debug(s"Could not find $property. Enabling plugin by default.")
system.actorOf(props, name)
case Failure(other) =>
throw other
}
}
def setup(implicit system: ActorSystem): Unit
}
| SumoLogic/sumobot | src/main/scala/com/sumologic/sumobot/plugins/PluginCollection.scala | Scala | apache-2.0 | 1,687 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.sources.tsextractors
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.expressions.{Expression, PlannerExpression, ResolvedFieldReference}
/**
* Extracts the timestamp of a StreamRecord into a rowtime attribute.
*
* Note: This extractor only works for StreamTableSources.
*/
final class StreamRecordTimestamp extends TimestampExtractor {
/** No argument fields required. */
override def getArgumentFields: Array[String] = Array()
/** No validation required. */
@throws[ValidationException]
override def validateArgumentFields(physicalFieldTypes: Array[TypeInformation[_]]): Unit = { }
/**
* Returns an [[Expression]] that extracts the timestamp of a StreamRecord.
*/
override def getExpression(fieldAccesses: Array[ResolvedFieldReference]): PlannerExpression = {
org.apache.flink.table.expressions.StreamRecordTimestamp()
}
override def equals(obj: Any): Boolean = obj match {
case _: StreamRecordTimestamp => true
case _ => false
}
override def hashCode(): Int = {
classOf[StreamRecordTimestamp].hashCode()
}
}
object StreamRecordTimestamp {
val INSTANCE: StreamRecordTimestamp = new StreamRecordTimestamp
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/sources/tsextractors/StreamRecordTimestamp.scala | Scala | apache-2.0 | 2,107 |
package org.jetbrains.plugins.scala
package lang
package parameterInfo
package functionParameterInfo
import _root_.scala.collection.mutable.ArrayBuffer
import _root_.scala.util.Sorting
import com.intellij.codeInsight.hint.{HintUtil, ShowParameterInfoContext}
import com.intellij.lang.parameterInfo.ParameterInfoUIContext
import com.intellij.openapi.fileEditor.{OpenFileDescriptor, FileEditorManager}
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.PsiElement
import java.awt.Color
import java.io.File
import java.lang.String
import lexer.ScalaTokenTypes
import psi.api.ScalaFile
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.util.io.FileUtil
import base.ScalaLightPlatformCodeInsightTestCaseAdapter
/**
* User: Alexander Podkhalyuzin
* Date: 02.03.2009
*/
abstract class FunctionParameterInfoTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
val caretMarker = "/*caret*/"
protected def folderPath = baseRootPath() + "parameterInfo/functionParameterInfo/"
protected def doTest() {
import _root_.junit.framework.Assert._
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.findFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val offset = fileText.indexOf(caretMarker)
assert(offset != -1, "Not specified caret marker in test case. Use /*caret*/ in scala file for this.")
val fileEditorManager = FileEditorManager.getInstance(getProjectAdapter)
val editor = fileEditorManager.openTextEditor(new OpenFileDescriptor(getProjectAdapter, file, offset), false)
val context = new ShowParameterInfoContext(editor, getProjectAdapter, scalaFile, offset, -1)
val handler = new ScalaFunctionParameterInfoHandler
val leafElement = scalaFile.findElementAt(offset)
val element = PsiTreeUtil.getParentOfType(leafElement, handler.getArgumentListClass)
handler.findElementForParameterInfo(context)
val items = new ArrayBuffer[String]
for (item <- context.getItemsToShow) {
val uiContext = new ParameterInfoUIContext {
def getDefaultParameterColor: Color = HintUtil.INFORMATION_COLOR
def setupUIComponentPresentation(text: String, highlightStartOffset: Int, highlightEndOffset: Int,
isDisabled: Boolean, strikeout: Boolean, isDisabledBeforeHighlight: Boolean,
background: Color): String = {
items.append(text)
text
}
def isUIComponentEnabled: Boolean = false
def getCurrentParameterIndex: Int = 0
def getParameterOwner: PsiElement = element
def setUIComponentEnabled(enabled: Boolean) {}
}
handler.updateUI(item, uiContext)
}
val itemsArray = items.toArray
Sorting.quickSort[String](itemsArray)
val res = new StringBuilder("")
for (item <- itemsArray) res.append(item).append("\\n")
if (res.length > 0) res.replace(res.length - 1, res.length, "")
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ => assertTrue("Test result must be in last comment statement.", false)
}
assertEquals(output, res.toString())
}
}
| consulo/consulo-scala | test/org/jetbrains/plugins/scala/lang/parameterInfo/functionParameterInfo/FunctionParameterInfoTestBase.scala | Scala | apache-2.0 | 3,880 |
package com.seanshubin.http.values.domain
object RegexUtil {
def optional(pattern: String): String = {
nonCapture(pattern) + "?"
}
def capture(pattern: String): String = {
s"($pattern)"
}
def nonCapture(pattern: String): String = {
s"(?:$pattern)"
}
}
| SeanShubin/http-values | domain/src/main/scala/com/seanshubin/http/values/domain/RegexUtil.scala | Scala | unlicense | 279 |
package com.dbaneman.leveldb.internal
import java.util.UUID
import org.iq80.leveldb.{DBIterator, WriteBatch}
/**
* Created by dan on 9/23/14.
*/
object ServerState {
var writeBatches: Map[UUID, WriteBatch] = Map()
var iterators: Map[UUID, DBIterator] = Map()
}
| dbaneman/leveldb-server4j | src/main/scala/com/dbaneman/leveldb/internal/ServerState.scala | Scala | apache-2.0 | 270 |
package net.fwbrasil.activate.statement
import net.fwbrasil.activate.util.ManifestUtil._
import net.fwbrasil.activate.util.Reflection._
import net.fwbrasil.activate.util.Reflection.materializeJodaInstant
import net.fwbrasil.activate.util.Reflection.set
import net.fwbrasil.activate.util.Reflection.get
import net.fwbrasil.activate.util.uuid.UUIDUtil.generateUUID
import net.fwbrasil.activate.ActivateContext
import net.fwbrasil.radon.transaction.TransactionContext
import scala.collection.mutable.{ Map => MutableMap }
import net.fwbrasil.activate.entity.Var
import net.fwbrasil.activate.entity.EntityValue
import net.fwbrasil.activate.entity.BaseEntity
import net.fwbrasil.activate.entity.EntityHelper
import net.fwbrasil.activate.entity.IdVar
import net.fwbrasil.activate.util.uuid.UUIDUtil
import org.joda.time.base.AbstractInstant
import java.util.Date
import scala.collection.mutable.Stack
import net.fwbrasil.activate.entity.EntityPropertyMetadata
object StatementMocks {
val entityMockCache = MutableMap[Class[_ <: BaseEntity], BaseEntity]()
def funcToVarName[E <: BaseEntity: Manifest](func: E => _) = {
func(StatementMocks.mockEntity(erasureOf[E]))
StatementMocks.lastFakeVarCalled.get.name
}
var _lastFakeVarCalled =
new ThreadLocal[Stack[FakeVar[_]]] {
override def initialValue = Stack()
}
def lastFakeVarCalled = {
val last = _lastFakeVarCalled.get.headOption
clearFakeVarCalled
last
}
def fakeVarCalledStack = {
val stack = _lastFakeVarCalled.get.toSeq
clearFakeVarCalled
stack
}
def clearFakeVarCalled =
_lastFakeVarCalled.set(Stack())
class FakeVar[P](metadata: EntityPropertyMetadata, outerEntity: BaseEntity, val _outerEntityClass: Class[BaseEntity], val originVar: FakeVar[_])
extends Var[P](metadata, outerEntity, false) {
override def outerEntityClass = _outerEntityClass
override def get: Option[P] = {
val value =
if (classOf[BaseEntity].isAssignableFrom(valueClass))
mockEntity(valueClass.asInstanceOf[Class[BaseEntity]], this)
else
mock(valueClass)
_lastFakeVarCalled.get.push(this)
Option(value.asInstanceOf[P])
}
override def put(value: Option[P]): Unit = {
throw new IllegalStateException("You can't alter vars in a predicate!")
}
override def toString =
name
}
def mock(clazz: Class[_]) = {
clazz.getName match {
case "char" => 'M'
case "int" => 0
case "long" => 0l
case "float" => 0f
case "double" => 0d
case "boolean" => false
case "java.util.Calendar" => java.util.Calendar.getInstance
case "java.lang.String" => "mock"
case "[B" => Array[Byte]()
case other =>
null
}
}
def mockEntity[E <: BaseEntity](clazz: Class[E]): E =
entityMockCache.getOrElseUpdate(clazz, mockEntityWithoutCache[E](clazz)).asInstanceOf[E]
def mockEntityWithoutCache[E <: BaseEntity](clazz: Class[E]): E =
mockEntity[E](clazz, null).asInstanceOf[E]
def mockEntity[E <: BaseEntity](entityClass: Class[E], originVar: FakeVar[_]): E = {
val concreteClass = EntityHelper.concreteClasses(entityClass).headOption.getOrElse {
throw new IllegalStateException(
"Can't find a concrete class for " + entityClass + ".\\n" +
"Maybe the context isn't initialized or you must override acceptEntity on your context.\\n" +
"The context definition must be declared in a base package of the entities packages or " +
" the entitiesPackages method must be overriden on your ActivateContext to specify which packages" +
" should be scanned for entities.")
}
val entity = newInstance(concreteClass)
val entityMetadata = entity.entityMetadata
val context = ActivateContext.contextFor(entityClass)
for (propertyMetadata <- entityMetadata.propertiesMetadata)
context.transactional(context.transient) {
val ref = new FakeVar[Any](propertyMetadata, entity, entityClass.asInstanceOf[Class[BaseEntity]], originVar)
val field = propertyMetadata.varField
field.set(entity, ref)
}
entity.buildVarsMap
entity.asInstanceOf[E]
}
def mockVar =
newInstanceUnitialized(classOf[FakeVar[_]])
} | avramirez/activate | activate-core/src/main/scala/net/fwbrasil/activate/statement/StatementMocks.scala | Scala | lgpl-2.1 | 4,758 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes
import org.apache.calcite.rel.{RelFieldCollation, RelNode}
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFieldImpl}
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.core.Window.Group
import org.apache.calcite.rel.core.Window
import org.apache.calcite.rex.{RexInputRef}
import org.apache.flink.table.runtime.aggregate.AggregateUtil._
import org.apache.flink.table.functions.{ProcTimeType, RowTimeType}
import scala.collection.JavaConverters._
trait OverAggregate {
private[flink] def partitionToString(inputType: RelDataType, partition: Array[Int]): String = {
val inFields = inputType.getFieldNames.asScala
partition.map( inFields(_) ).mkString(", ")
}
private[flink] def orderingToString(
inputType: RelDataType,
orderFields: java.util.List[RelFieldCollation]): String = {
val inFields = inputType.getFieldList.asScala
val orderingString = orderFields.asScala.map {
x => inFields(x.getFieldIndex).getValue
}.mkString(", ")
orderingString
}
private[flink] def windowRange(
logicWindow: Window,
overWindow: Group,
input: RelNode): String = {
if (overWindow.lowerBound.isPreceding && !overWindow.lowerBound.isUnbounded) {
s"BETWEEN ${getLowerBoundary(logicWindow, overWindow, input)} PRECEDING " +
s"AND ${overWindow.upperBound}"
} else {
s"BETWEEN ${overWindow.lowerBound} AND ${overWindow.upperBound}"
}
}
private[flink] def aggregationToString(
inputType: RelDataType,
rowType: RelDataType,
namedAggregates: Seq[CalcitePair[AggregateCall, String]]): String = {
val inFields = inputType.getFieldList.asScala.map {
x =>
x.asInstanceOf[RelDataTypeFieldImpl].getType
match {
case proceTime: ProcTimeType => "PROCTIME"
case rowTime: RowTimeType => "ROWTIME"
case _ => x.asInstanceOf[RelDataTypeFieldImpl].getName
}
}
val outFields = rowType.getFieldList.asScala.map {
x =>
x.asInstanceOf[RelDataTypeFieldImpl].getType
match {
case proceTime: ProcTimeType => "PROCTIME"
case rowTime: RowTimeType => "ROWTIME"
case _ => x.asInstanceOf[RelDataTypeFieldImpl].getName
}
}
val aggStrings = namedAggregates.map(_.getKey).map(
a => s"${a.getAggregation}(${
if (a.getArgList.size() > 0) {
inFields(a.getArgList.get(0))
} else {
"*"
}
})")
(inFields ++ aggStrings).zip(outFields).map {
case (f, o) => if (f == o) {
f
} else {
s"$f AS $o"
}
}.mkString(", ")
}
private[flink] def getLowerBoundary(
logicWindow: Window,
overWindow: Group,
input: RelNode): Long = {
val ref: RexInputRef = overWindow.lowerBound.getOffset.asInstanceOf[RexInputRef]
val lowerBoundIndex = input.getRowType.getFieldCount - ref.getIndex;
val lowerBound = logicWindow.constants.get(lowerBoundIndex).getValue2
lowerBound match {
case x: java.math.BigDecimal => x.asInstanceOf[java.math.BigDecimal].longValue()
case _ => lowerBound.asInstanceOf[Long]
}
}
}
| DieBauer/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/OverAggregate.scala | Scala | apache-2.0 | 4,023 |
///////////////////////////////////////////////////////////////////////////////
// Copyright (C) 2010 Travis Brown, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.textgrounder.app
import java.io._
import opennlp.textgrounder.eval._
import opennlp.textgrounder.resolver._
import opennlp.textgrounder.topo.gaz._
import opennlp.textgrounder.text._
import opennlp.textgrounder.text.io._
import opennlp.textgrounder.text.prep._
import opennlp.textgrounder.util.Constants
object ReprocessTrApp {
def main(args: Array[String]) {
val tokenizer = new OpenNLPTokenizer
val recognizer = new OpenNLPRecognizer
val gazetteer = new InMemoryGazetteer
gazetteer.load(new WorldReader(new File(
Constants.getGazetteersDir() + File.separator + "dataen-fixed.txt.gz"
)))
val corpus = Corpus.createStreamCorpus
val source = new TrXMLDirSource(new File(args(0)), tokenizer)
val stripped = new ToponymRemover(source)
corpus.addSource(new ToponymAnnotator(stripped, recognizer, gazetteer))
val writer = new CorpusXMLWriter(corpus)
writer.write(new File(args(1)))
}
}
| tectronics/textgrounder | src/main/scala/opennlp/textgrounder/app/ReprocessTrApp.scala | Scala | apache-2.0 | 1,755 |
/*
* Copyright 2012 Tumblr Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.redis
import java.nio.ByteBuffer
import com.twitter.conversions.time.intToTimeableNumber
import com.twitter.util.Await.{ready, result}
import com.twitter.zipkin.common.{Annotation, AnnotationType, BinaryAnnotation, Endpoint, Span}
import com.twitter.zipkin.storage.IndexedTraceId
class RedisIndexSpec extends RedisSpecification {
val redisIndex = new RedisIndex(_client, Some(7.days))
val ep = Endpoint(123, 123, "service")
def binaryAnnotation(key: String, value: String) =
BinaryAnnotation(
key,
ByteBuffer.wrap(value.getBytes),
AnnotationType.String,
Some(ep)
)
val spanId = 456
val ann1 = Annotation(1, "cs", Some(ep))
val ann2 = Annotation(2, "sr", None)
val ann3 = Annotation(2, "custom", Some(ep))
val ann4 = Annotation(2, "custom", Some(ep))
val span1 = Span(123, "methodcall", spanId, None, List(ann1, ann3),
List(binaryAnnotation("BAH", "BEH")))
val span2 = Span(123, "methodcall", spanId, None, List(ann2),
List(binaryAnnotation("BAH2", "BEH2")))
val span3 = Span(123, "methodcall", spanId, None, List(ann2, ann3, ann4),
List(binaryAnnotation("BAH2", "BEH2")))
val spanEmptySpanName = Span(123, "", spanId, None, List(ann1, ann2), List())
val spanEmptyServiceName = Span(123, "spanname", spanId, None, List(), List())
val mergedSpan = Span(123, "methodcall", spanId, None,
List(ann1, ann2), List(binaryAnnotation("BAH2", "BEH2")))
test("index and get span names") {
ready(redisIndex.index(span1))
result(redisIndex.getSpanNames("service")) should be (Set(span1.name))
}
test("index and get service names") {
ready(redisIndex.index(span1))
result(redisIndex.getServiceNames) should be (Set(span1.serviceNames.head))
}
test("getTraceIdsByName") {
ready(redisIndex.index(span1))
val endTs = ann3.timestamp + 1
result(redisIndex.getTraceIdsByName("service", None, endTs, 1)).map(_.traceId) should
be(Seq(span1.traceId))
result(redisIndex.getTraceIdsByName("service", Some("methodcall"), endTs, 1)).map(_.traceId) should
be(Seq(span1.traceId))
}
test("getTraceIdsByAnnotation") {
ready(redisIndex.index(span1))
// fetch by time based annotation, find trace
val endTs = ann3.timestamp + 1
result(redisIndex.getTraceIdsByAnnotation("service", "custom", None, endTs, 1)).map(_.traceId) should
be (Seq(span1.traceId))
// should not find any traces since the core annotation doesn't exist in index
result(redisIndex.getTraceIdsByAnnotation("service", "cs", None, 0, 1)) should be (empty)
// should find traces by the key and value annotation
result(redisIndex.getTraceIdsByAnnotation("service", "BAH", Some(ByteBuffer.wrap("BEH".getBytes)), endTs, 1)) should
be (Seq(IndexedTraceId(span1.traceId, span1.lastAnnotation.get.timestamp)))
}
test("not index empty service name") {
ready(redisIndex.index(spanEmptyServiceName))
result(redisIndex.getServiceNames) should be (empty)
}
test("not index empty span name ") {
ready(redisIndex.index(spanEmptySpanName))
result(redisIndex.getSpanNames(spanEmptySpanName.name)) should be (empty)
}
}
| jstanier/zipkin | zipkin-redis/src/test/scala/com/twitter/zipkin/storage/redis/RedisIndexSpec.scala | Scala | apache-2.0 | 3,796 |
package me.lignum.lambdacraft.computer
class Memory(private val size: Int, private val cd: ComplaintDesk) {
private val memoryBlock: Array[Byte] = new Array[Byte](size)
def getSize = size
def isValidAddress(address: Short): Boolean = address < size && address >= 0
def get(address: Short): Byte =
if (isValidAddress(address))
memoryBlock(address)
else {
cd.error(ErrorCode.InvalidAddress)
0
}
def set(address: Short, value: Byte) =
if (isValidAddress(address))
memoryBlock(address) = value
else
cd.error(ErrorCode.InvalidAddress)
def clear() = memoryBlock.map(x => 0)
}
| Lignumm/LambdaCraft | src/main/scala/me/lignum/lambdacraft/computer/Memory.scala | Scala | mit | 638 |
package org.scalameter
package reporting
import java.awt.{BasicStroke, Color}
import java.io._
import java.text.DateFormat.{MEDIUM, getDateTimeInstance}
import java.util.Date
import org.jfree.chart.labels.{ItemLabelAnchor, ItemLabelPosition, StandardCategoryItemLabelGenerator}
import org.jfree.chart.renderer.category.BarRenderer
import org.jfree.chart.renderer.xy.{DeviationRenderer, XYLineAndShapeRenderer}
import org.jfree.chart.{LegendItem, LegendItemCollection}
import org.jfree.data.category.DefaultCategoryDataset
import org.jfree.ui.TextAnchor
import org.scalameter.Key.reports._
import org.scalameter.utils.Statistics._
import org.scalameter.utils.Tree
import scala.Numeric.Implicits._
import scala.collection._
import scala.math.Pi
import scalax.chart.Chart
import scalax.chart.api._
case class ChartReporter[T: Numeric](drawer: ChartReporter.ChartFactory,
fileNamePrefix: String = "", wdt: Int = 1600, hgt: Int = 1200) extends Reporter[T] {
/** Does nothing, the charts are generated only at the end. */
override final def report(result: CurveData[T], persistor: Persistor): Unit = ()
def report(result: Tree[CurveData[T]], persistor: Persistor) = {
for ((ctx, curves) <- result.scopes if curves.nonEmpty) {
val scopename = ctx.scope
val histories = curves.map(c => persistor.load[T](c.context))
val chart = drawer.createChart(scopename, curves, histories)
val dir = result.context(resultDir)
new File(dir).mkdirs()
val chartfile = s"$dir/$fileNamePrefix$scopename.png"
chart.saveAsPNG(chartfile, (wdt,hgt))
}
true
}
}
object ChartReporter {
import Key._
trait ChartFactory {
/** Generates a chart for the given curve data, with the given history.
*
* @param scopename name of the chart
* @param cs a list of curves that should appear on the chart
* @param histories previous chart data for the same set of curves
* @param colors specifies the colors assigned to the the first `colors.size` curves from `cs`.
* The rest of the curves are assigned some default set of colors.
*/
def createChart[T: Numeric](scopename: String, cs: Seq[CurveData[T]],
histories: Seq[History[T]], colors: Seq[Color] = Seq()): Chart
}
object ChartFactory {
case class XYLine() extends ChartFactory {
def createChart[T: Numeric](scopename: String, cs: Seq[CurveData[T]],
histories: Seq[History[T]], colors: Seq[Color] = Seq()): Chart = {
val dataset = for ((curve, idx) <- cs.zipWithIndex) yield {
val seriesName = curve.context.goe(dsl.curve, idx.toString)
val seriesData = for {
measurement <- curve.measurements
x = measurement.params.axisData.head._2.asInstanceOf[Int]
y = measurement.value
} yield x -> y
seriesName -> seriesData
}
val chart = XYLineChart(dataset)
chart.title = scopename
chart.plot.domain.axis.label = cs.head.measurements.head.params.axisData.head._1.fullName
chart.plot.range.axis.label = "value"
chart.plot.setBackgroundPaint(new java.awt.Color(180, 180, 180))
chart.antiAlias = true
val renderer = new XYLineAndShapeRenderer()
for (i <- cs.indices) renderer.setSeriesShapesVisible(i, true)
chart.plot.setRenderer(renderer)
chart
}
}
case class ConfidenceIntervals(showLatestCi: Boolean,
showHistoryCi: Boolean, t: RegressionReporter.Tester) extends ChartFactory {
private def ciFor[T: Numeric](curve: CurveData[T], values: Seq[T]) = if (showLatestCi) {
t.confidenceInterval(curve.context, values)
} else {
(0D, 0D)
}
def createChart[T: Numeric](scopename: String, cs: Seq[CurveData[T]],
histories: Seq[History[T]], colors: Seq[Color] = Seq()): Chart = {
def createDataset = {
val dataset = new YIntervalSeriesCollection
for ((curve, history) <- cs zip histories) {
if (history.results.isEmpty) {
val series = new YIntervalSeries(curve.context(dsl.curve))
for (measurement <- curve.measurements) {
val (yLow,yHigh) = ciFor(curve, measurement.complete)
series.add(measurement.params.axisData.head._2.asInstanceOf[Int],
measurement.value.toDouble(), yLow, yHigh)
}
dataset.addSeries(series)
} else {
val newestSeries = new YIntervalSeries(curve.context(dsl.curve))
val historySeries = new YIntervalSeries(curve.context(dsl.curve))
for ((measurement, measurementIndex) <- curve.measurements.zipWithIndex) {
val x = measurement.params.axisData.head._2.asInstanceOf[Int]
/* Fetch, for each corresponding curve in history, the measurements that were at the same position (same size for instance)
on x-axis, and make a list of them */
val previousMeasurements = for {
pastResult <- history.results
correspondingCurveInHistory = pastResult._3
} yield correspondingCurveInHistory.measurements(measurementIndex)
// We then take all observations that gave the value measurement (by calling complete) of each point, and concat them
val previousMeasurementsObservations = previousMeasurements flatMap(m => m.complete)
val (yLowThis,yHighThis) = ciFor(curve, previousMeasurementsObservations)
val (yLowNewest,yHighNewest) = ciFor(curve, measurement.complete)
val meanForThisPoint = mean(previousMeasurementsObservations.map(_.toDouble()))
// Params : x - the x-value, y - the y-value, yLow - the lower bound of the y-interval, yHigh - the upper bound of the y-interval.
historySeries.add(x, meanForThisPoint, yLowThis, yHighThis)
newestSeries.add(x, measurement.value.toDouble(), yLowNewest, yHighNewest)
}
dataset.addSeries(historySeries)
dataset.addSeries(newestSeries)
}
}
dataset
}
/* We may want to call other methods from the JFreeChart API, as there are a
lot of them related to appearance in class DeviationRenderer and in its parent classes */
def paintCurves(renderer: DeviationRenderer) {
for((color, i) <- colors.zipWithIndex) {
renderer.setSeriesStroke(i, new BasicStroke(3F, BasicStroke.CAP_ROUND, BasicStroke.JOIN_ROUND))
renderer.setSeriesPaint(i, color)
renderer.setSeriesFillPaint(i, color)
}
renderer.setAlpha(0.25F)
}
val dataset = createDataset
val chartName = scopename
val xAxisName = cs.head.measurements.head.params.axisData.head._1.fullName
// instantiate a DeviationRenderer (lines, shapes)
val renderer = new DeviationRenderer(true, true)
paintCurves(renderer)
val chart = XYLineChart(dataset)
chart.title = chartName
chart.plot.domain.axis.label = xAxisName
chart.plot.range.axis.label = "value"
chart.plot.setBackgroundPaint(new java.awt.Color(200, 200, 200))
chart.plot.setRenderer(renderer)
// There are many other configurable appearance options !
chart.antiAlias = true
chart
}
}
/** Returns the data to dataset converter. */
private implicit def MyToCategoryDatasetConverter[T: Numeric]: ToCategoryDataset[Seq[(String,(String, T))]] =
ToCategoryDataset { coll =>
coll.foldLeft(new DefaultCategoryDataset) { case (dataset,(series,(category,value))) =>
dataset.addValue(value.toDouble(), series, category)
dataset
}
}
case class TrendHistogram() extends ChartFactory {
def createChart[T: Numeric](scopename: String, cs: Seq[CurveData[T]],
histories: Seq[History[T]], colors: Seq[Color] = Seq()): Chart = {
val now = new Date
val df = getDateTimeInstance(MEDIUM, MEDIUM)
/*
* A History contains the previous curves for a curve in cs. For instance, if we have three dates (categories) on the chart, and for instance
* three curves for the most recent measurements (so cs has size 3), then histories will have length 3 too (because there are 3 curves), and each
* History in histories will contain 2 Entry because there are 3 dates (categories).
*
* cs and histories will always have the same length here
*
* case class History(results: Seq[History.Entry], ...)
* type Entry = (Date, Context, CurveData)
* def curves = results.map(_._3)
* def dates = results.map(_._1)
*/
val data = for {
(c, history) <- cs zip histories
curves = history.curves :+ c
dates = history.dates :+ now
categories = dates map df.format
(curve, category) <- curves zip categories
measurement <- curve.measurements
curveName = curve.context(dsl.curve)
measurementParams = (for(p <- measurement.params.axisData) yield (s"""${p._1.fullName} : ${p._2}""")).mkString("[", ", ", "]")
series = s"""$curveName $measurementParams"""
} yield (series,(category,measurement.value))
val chart = BarChart(data)
chart.title = scopename
chart.plot.domain.axis.label = "Date"
chart.plot.range.axis.label = "Value"
val plot = chart.plot
val renderer: BarRenderer = plot.getRenderer.asInstanceOf[BarRenderer]
renderer.setDrawBarOutline(false)
renderer.setItemMargin(0D) // to have no space between bars of a same category
// old version of paintCurves, does not allow custom colors
/*
def paintCurves = {
var seriesIndex = 0
for(curve <- cs) {
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
val numberOfMeasurements = curve.measurements.size
for (i <- (0 until numberOfMeasurements)) {
renderer.setSeriesPaint(seriesIndex + i, seriesPaint)
}
seriesIndex += numberOfMeasurements
}
}*/
/*
* new version. If there are not enough colors specified, the rest are default colors assigned by JFreeChart
*/
def paintCurves() = {
def loop(numbersOfMeasurements: Seq[Int], colors: Seq[Color], seriesIndex: Int): Unit = (numbersOfMeasurements, colors) match {
case (Nil, _) => // do nothing
case (hn :: tn, Nil) =>
for (i <- (0 until hn)) {
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
renderer.setSeriesPaint(seriesIndex + i, seriesPaint)
}
loop(tn, Nil, seriesIndex + hn)
case (hn :: tn, hc :: tc) =>
for (i <- (0 until hn)) {
renderer.setSeriesPaint(seriesIndex + i, hc)
}
loop(tn, tc, seriesIndex + hn)
}
val numbersOfMeasurementsPerCurve = cs map (c => c.measurements.size)
loop(numbersOfMeasurementsPerCurve, colors, 0)
}
def setChartLegend() = {
var seriesIndex = 0
val legendItems = new LegendItemCollection
for((curve, curveIndex) <- cs.zipWithIndex) {
val curveName = curve.context.goe(dsl.curve, "Curve " + curveIndex.toString)
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
val numberOfMeasurements = curve.measurements.size
legendItems.add(new LegendItem(curveName, seriesPaint))
seriesIndex += numberOfMeasurements
}
plot.setFixedLegendItems(legendItems)
}
paintCurves()
setChartLegend()
class LabelGenerator extends StandardCategoryItemLabelGenerator {
val serialVersionUID = -7553175765030937177L
override def generateLabel(categorydataset: CategoryDataset, i: Int, j: Int) = {
val rowKey = categorydataset.getRowKey(i).toString
rowKey.substring(rowKey.indexOf("["))
}
}
renderer.setBaseItemLabelGenerator(new LabelGenerator)
renderer.setBaseItemLabelsVisible(true)
// ItemLabelPosition params : 1. item label anchor, 2. text anchor, 3. rotation anchor, 4. rotation angle
val itemLabelPosition = new ItemLabelPosition(ItemLabelAnchor.INSIDE12, TextAnchor.CENTER_RIGHT, TextAnchor.CENTER_RIGHT, -Pi / 2)
renderer.setBasePositiveItemLabelPosition(itemLabelPosition)
val altItemLabelPosition = new ItemLabelPosition(ItemLabelAnchor.OUTSIDE12, TextAnchor.CENTER_LEFT, TextAnchor.CENTER_LEFT, -Pi / 2)
renderer.setPositiveItemLabelPositionFallback(altItemLabelPosition)
plot.setBackgroundPaint(new java.awt.Color(200, 200, 200))
plot.setDomainGridlinePaint(Color.white)
plot.setRangeGridlinePaint(Color.white)
chart.backgroundPaint = Color.white
chart.antiAlias = true
chart
}
}
case class NormalHistogram() extends ChartFactory {
def createChart[T: Numeric](scopename: String, cs: Seq[CurveData[T]],
histories: Seq[History[T]], colors: Seq[Color] = Seq()): Chart = {
val now = new Date
val df = getDateTimeInstance(MEDIUM, MEDIUM)
val data = for {
(c, history) <- cs zip histories
curves = history.curves :+ c
dates = history.dates :+ now
formattedDates = dates map df.format
(curve, formattedDate) <- curves zip formattedDates
measurement <- curve.measurements
curveName = curve.context(dsl.curve)
measurementParams = (for(p <- measurement.params.axisData) yield (s"""${p._1.fullName} : ${p._2}""")).mkString("[", ", ", "]")
series = s"""$curveName#$formattedDate"""
} yield (series,(measurementParams,measurement.value))
val chart = BarChart(data)
chart.title = scopename
chart.plot.domain.axis.label = "Parameters"
chart.plot.range.axis.label = "Value"
val plot = chart.plot
val renderer: BarRenderer = plot.getRenderer.asInstanceOf[BarRenderer]
renderer.setDrawBarOutline(false)
renderer.setItemMargin(0D) // to have no space between bars of a same category
/*def paintCurves = {
var seriesIndex = 0
for ((curve, history) <- cs zip histories) {
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
for (i <- (0 to history.results.size)) {
renderer.setSeriesPaint(seriesIndex + i, seriesPaint)
}
seriesIndex += (history.results.size + 1)
}
}*/
def paintCurves() = {
def loop(numbersOfEntries: Seq[Int], colors: Seq[Color], seriesIndex: Int): Unit = (numbersOfEntries, colors) match {
case (Nil, _) => // do nothing
case (hn :: tn, Nil) =>
for (i <- (0 until hn)) {
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
renderer.setSeriesPaint(seriesIndex + i, seriesPaint)
}
loop(tn, Nil, seriesIndex + hn)
case (hn :: tn, hc :: tc) =>
for (i <- (0 until hn)) {
renderer.setSeriesPaint(seriesIndex + i, hc)
}
loop(tn, tc, seriesIndex + hn)
}
val numbersOfEntries = histories map (h => h.results.size + 1)
loop(numbersOfEntries, colors, 0)
}
def setChartLegend() = {
var seriesIndex = 0
var curveIndex = 0
val legendItems = new LegendItemCollection
for ((curve, history) <- cs zip histories) {
val curveName = curve.context.goe(dsl.curve, "Curve " + curveIndex.toString)
val seriesPaint = renderer.lookupSeriesPaint(seriesIndex)
legendItems.add(new LegendItem(curveName, seriesPaint))
seriesIndex += (history.results.size + 1)
curveIndex += 1
}
plot.setFixedLegendItems(legendItems)
}
paintCurves()
setChartLegend()
class LabelGenerator extends StandardCategoryItemLabelGenerator {
val serialVersionUID = -7553175765030937177L
override def generateLabel(categorydataset: CategoryDataset, i: Int, j: Int) = {
val rowKey = categorydataset.getRowKey(i).toString
rowKey.substring(rowKey.indexOf("#") + 1)
}
}
renderer.setBaseItemLabelGenerator(new LabelGenerator)
renderer.setBaseItemLabelsVisible(true)
// ItemLabelPosition params : 1. item label anchor, 2. text anchor, 3. rotation anchor, 4. rotation angle
val itemLabelPosition = new ItemLabelPosition(ItemLabelAnchor.INSIDE12, TextAnchor.CENTER_RIGHT, TextAnchor.CENTER_RIGHT, -Pi / 2)
renderer.setBasePositiveItemLabelPosition(itemLabelPosition)
val altItemLabelPosition = new ItemLabelPosition(ItemLabelAnchor.OUTSIDE12, TextAnchor.CENTER_LEFT, TextAnchor.CENTER_LEFT, -Pi / 2)
renderer.setPositiveItemLabelPositionFallback(altItemLabelPosition)
plot.setBackgroundPaint(new java.awt.Color(200, 200, 200))
plot.setDomainGridlinePaint(Color.white)
plot.setRangeGridlinePaint(Color.white)
chart.backgroundPaint = Color.white
chart.antiAlias = true
chart
}
}
}
}
| kjanosz/scalameter | src/main/scala/org/scalameter/reporting/ChartReporter.scala | Scala | bsd-3-clause | 17,766 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.fixture
import org.ensime.api._
import org.ensime.indexer.{ EnsimeVFS, SourceResolver }
trait SourceResolverFixture {
def withSourceResolver(testCode: SourceResolver => Any): Any
def withSourceResolver(testCode: (EnsimeConfig, SourceResolver) => Any): Any
}
trait IsolatedSourceResolverFixture extends SourceResolverFixture
with IsolatedEnsimeConfigFixture {
override def withSourceResolver(testCode: SourceResolver => Any): Any = withEnsimeConfig { config =>
implicit val vfs = EnsimeVFS()
try {
testCode(new SourceResolver(config))
} finally {
vfs.close()
}
}
override def withSourceResolver(testCode: (EnsimeConfig, SourceResolver) => Any): Any = withEnsimeConfig { config =>
implicit val vfs = EnsimeVFS()
try {
testCode(config, new SourceResolver(config))
} finally {
vfs.close()
}
}
}
trait SharedSourceResolverFixture extends SourceResolverFixture
with SharedEnsimeConfigFixture {
this: SharedEnsimeVFSFixture =>
private[fixture] var _resolver: SourceResolver = _
override def beforeAll(): Unit = {
super.beforeAll()
_resolver = new SourceResolver(_config)
}
override def withSourceResolver(testCode: SourceResolver => Any): Any = testCode(_resolver)
override def withSourceResolver(testCode: (EnsimeConfig, SourceResolver) => Any): Any = {
testCode(_config, _resolver)
}
}
| j-mckitrick/ensime-sbt | src/sbt-test/ensime-sbt/ensime-server/core/src/it/scala/org/ensime/fixture/SourceResolverFixture.scala | Scala | apache-2.0 | 1,538 |
package org.gensokyo.elasticsearchclient
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest._
/**
* Created by liusen on 15-12-29.
*/
class ClientSpec extends FlatSpec with Matchers with OptionValues with Inside with Inspectors {
def createIndex(index: String): Unit = {
ElasticSearchClient.deleteIndex(index).sync
ElasticSearchClient.createIndex(name = index).sync.tee.asString.contains("acknowledged") shouldBe true
}
def index(index: String, `type`: String, id: String, data: Option[String] = None): Unit = {
ElasticSearchClient.index(
id = Some(id),
index = index, `type` = `type`,
data = data.getOrElse( s"""{"id":"$id"}"""), refresh = true
).sync.tee.asString.contains("\"_version\"") shouldBe true
}
def deleteIndex(index: String): Unit = {
ElasticSearchClient.deleteIndex(index).sync.tee.asString.contains("acknowledged") shouldBe true
}
"ElasticSearchClient" should "fail usefully" in {
ElasticSearchClient.verifyIndex("foobarbaz").sync.tee.status.intValue shouldEqual 404
}
it should "create and delete indexes" in {
createIndex("foo")
ElasticSearchClient.verifyIndex("foo").sync.tee.asString
deleteIndex("foo")
}
// "create and delete aliases" should {
// createIndex("foo")
// ElasticSearchClient.createAlias(actions = """{ "add": { "index": "foo", "alias": "foo-write" }}""").sync.tee.asString.contains("acknowledged") shouldBe true
// ElasticSearchClient.getAliases(index = Some("foo")).sync.tee.asString.contains("foo-write") shouldBe true
// ElasticSearchClient.deleteAlias(index = "foo", alias = "foo-write").sync.tee.asString.contains("acknowledged") shouldBe true
// deleteIndex("foo")
// }
// "put, get, and delete warmer" should {
// createIndex("trogdor")
// Thread.sleep(100) //ES needs some time to make the index first
// ElasticSearchClient.putWarmer(index = "trogdor", name = "fum", body = """{"query": {"match_all":{}}}""").sync.tee.asString.contains("acknowledged") shouldBe true
// ElasticSearchClient.getWarmers("trogdor", "fu*").sync.tee.asString.contains("fum") shouldBe true
// ElasticSearchClient.deleteWarmer("trogdor", "fum").sync.tee.asString.contains("acknowledged") shouldBe true
// ElasticSearchClient.getWarmers("trogdor", "fu*").sync.tee.asString should not contain("fum")
// deleteIndex("trogdor")
// }
it should "index, fetch, and delete a document" in {
ElasticSearchClient.index(
id = Some("foo"),
index = "foo", `type` = "foo",
data = "{\"foo\":\"bar₡\"}", refresh = true
).sync.tee.asString.contains("\"_version\"") shouldBe true
ElasticSearchClient.get("foo", "foo", "foo").sync.tee.asString.contains("\"bar₡\"") shouldBe true
ElasticSearchClient.delete("foo", "foo", "foo").sync.tee.asString.contains("\"found\"") shouldBe true
deleteIndex("foo")
}
"get multiple documents" should "with index and type" in {
index(index = "foo", `type` = "bar", id = "1")
index(index = "foo", `type` = "bar", id = "2")
val txt = ElasticSearchClient.mget(index = Some("foo"), `type` = Some("bar"), query =
"""
|{
| "ids" : ["1", "2"]
|}
""".stripMargin).sync.tee.asString
(parse(txt) \\ "docs")(0) \\ "found" \\ classOf[JBool] shouldBe List(true)
(parse(txt) \\ "docs")(1) \\ "found" \\ classOf[JBool] shouldBe List(true)
deleteIndex("foo")
}
"get multiple documents" should "with index, type and some specified fields using source uri param" in {
index(index = "foo", `type` = "bar", id = "1",
data = Some("{\"name\":\"Jon Snow\", \"age\":18, \"address\":\"Winterfell\"}"))
index(index = "foo", `type` = "bar", id = "2",
data = Some("{\"name\":\"Arya Stark\", \"age\":14, \"address\":\"Winterfell\"}"))
val txt = ElasticSearchClient.mget(index = Some("foo"), `type` = Some("bar"), query =
"""
|{
| "ids" : ["1", "2"]
|}
""".stripMargin, uriParameters = List("name ", "", " address")).sync.tee.asString
(parse(txt) \ "docs")(0) \ "_source" \ "name" \\ classOf[JString] shouldBe List("Jon Snow")
(parse(txt) \ "docs")(0) \ "_source" \ "address" \\ classOf[JString] shouldBe List("Winterfell")
(parse(txt) \ "docs")(1) \ "_source" \ "name" \\ classOf[JString] shouldBe List("Arya Stark")
(parse(txt) \ "docs")(1) \ "_source" \ "address" \\ classOf[JString] shouldBe List("Winterfell")
((parse(txt) \ "docs")(1) \ "_source" \ "age").toOption.isEmpty shouldBe true
deleteIndex("foo")
}
"get multiple documents" should "with index" in {
index(index = "foo", `type` = "bar1", id = "1")
index(index = "foo", `type` = "bar2", id = "2")
val txt = ElasticSearchClient.mget(index = Some("foo"), `type` = None, query =
"""{
| "docs" : [
| {
| "_type" : "bar1",
| "_id" : "1"
| },
| {
| "_type" : "bar2",
| "_id" : "2"
| }
| ]
|}
""".stripMargin).sync.tee.asString
(parse(txt) \ "docs")(0) \ "found" \\ classOf[JBool] shouldBe List(true)
(parse(txt) \ "docs")(1) \ "found" \\ classOf[JBool] shouldBe List(true)
deleteIndex("foo")
}
"get multiple documents" should "without index and type" in {
index(index = "foo1", `type` = "bar1", id = "1")
index(index = "foo2", `type` = "bar2", id = "2")
val txt = ElasticSearchClient.mget(index = None, `type` = None, query =
"""{
| "docs" : [
| {
| "_index" : "foo1",
| "_type" : "bar1",
| "_id" : "1"
| },
| {
| "_index" : "foo2",
| "_type" : "bar2",
| "_id" : "2"
| }
| ]
|}
""".stripMargin).sync.tee.asString
(parse(txt) \ "docs")(0) \ "found" \\ classOf[JBool] shouldBe List(true)
(parse(txt) \ "docs")(1) \ "found" \\ classOf[JBool] shouldBe List(true)
deleteIndex("foo1")
deleteIndex("foo2")
}
it should "search for a document" in {
index(index = "foo", `type` = "foo", id = "foo2", data = Some("{\"foo\":\"bar\"}"))
ElasticSearchClient.search("foo", "{\"query\": { \"match_all\": {} } }").sync.tee.asString.contains("\"foo2\"") shouldBe true
ElasticSearchClient.count(Seq("foo"), Seq("foo"), "{\"query\": { \"match_all\": {} }").sync.tee.asString.contains("\"count\"") shouldBe true
ElasticSearchClient.delete("foo", "foo", "foo2").sync.tee.asString.contains("\"found\"") shouldBe true
deleteIndex("foo")
}
it should "search with search_type and scroll parameters" in {
index(index = "foo", `type` = "bar", id = "bar1", data = Some("{\"abc\":\"def\"}"))
ElasticSearchClient.search("foo", "{\"query\": { \"match_all\": {} } }", Some("bar"),
scroll = Some("1m")).sync.tee.asString.contains("\"bar1\"") shouldBe true
ElasticSearchClient.search("foo", "{\"query\": { \"match_all\": {} } }", Some("bar"),
scroll = Some("1m"), searchType = Some("scan")).sync.tee.asString.contains("\"_scroll_id\"") shouldBe true
deleteIndex("foo")
}
it should "scroll" in {
def extractScrollId(responseBody: String): Option[String] = (parse(responseBody) \\ "_scroll_id").extractOpt[String]
def extractResultIds(responseBody: String): Option[List[String]] = Option((parse(responseBody) \ "hits" \ "hits" \ "_id" \\ classOf[JString]))
index(index = "foo", `type` = "bar", id = "bar1", data = Some("{\"abc\":\"def1\"}"))
index(index = "foo", `type` = "bar", id = "bar2", data = Some("{\"abc\":\"def2\"}"))
index(index = "foo", `type` = "bar", id = "bar3", data = Some("{\"abc\":\"def3\"}"))
val firstSearchResponse = ElasticSearchClient.search("foo",
"""{"query": { "match_all": {} }, "size": 2 }""", Some("bar"), None, Some("1m")).sync.tee.asString
val fistScrollIdOption = extractScrollId(firstSearchResponse)
fistScrollIdOption.isDefined shouldBe true
val firstResultIds = extractResultIds(firstSearchResponse)
firstResultIds shouldEqual Some(List("bar1", "bar2"))
val secondSearchResponse = ElasticSearchClient.scroll("1m", fistScrollIdOption.get).sync.tee.asString
val secondScrollIdOption = extractScrollId(secondSearchResponse)
secondScrollIdOption.isDefined shouldBe true
val secondResultIds = extractResultIds(secondSearchResponse)
secondResultIds shouldEqual Some(List("bar3"))
deleteIndex("foo")
}
"multi-search" should "with index and type" in {
index(index = "foo", `type` = "bar", id = "1", data = Some( """{"name": "Fred Smith"}"""))
index(index = "foo", `type` = "bar", id = "2", data = Some( """{"name": "Mary Jones"}"""))
val txt = ElasticSearchClient.msearch(index = Some("foo"), `type` = Some("bar"), query =
"""
|{}
|{"query" : {"match" : {"name": "Fred"}}}
|{}
|{"query" : {"match" : {"name": "Jones"}}}
""".stripMargin).sync.tee.asString
(parse(txt) \ "responses")(0) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
(parse(txt) \ "responses")(1) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
deleteIndex("foo")
}
"multi-search" should "with index" in {
index(index = "foo", `type` = "bar1", id = "1", data = Some( """{"name": "Fred Smith"}"""))
index(index = "foo", `type` = "bar2", id = "2", data = Some( """{"name": "Mary Jones"}"""))
val txt = ElasticSearchClient.msearch(index = Some("foo"), query =
"""
|{"type": "bar1"}
|{"query" : {"match" : {"name": "Fred"}}}
|{"type": "bar2"}
|{"query" : {"match" : {"name": "Jones"}}}
""".stripMargin).sync.tee.asString
(parse(txt) \ "responses")(0) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
(parse(txt) \ "responses")(1) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
deleteIndex("foo")
}
"multi-search" should "without index or type" in {
index(index = "foo1", `type` = "bar", id = "1", data = Some( """{"name": "Fred Smith"}"""))
index(index = "foo2", `type` = "bar", id = "2", data = Some( """{"name": "Mary Jones"}"""))
val txt = ElasticSearchClient.msearch(index = Some("foo"), query =
"""
|{"index": "foo1"}
|{"query" : {"match" : {"name": "Fred"}}}
|{"index": "foo2"}
|{"query" : {"match" : {"name": "Jones"}}}
""".stripMargin).sync.tee.asString
(parse(txt) \ "responses")(0) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
(parse(txt) \ "responses")(1) \ "hits" \ "total" \\ classOf[JInt] shouldBe List(1)
deleteIndex("foo1")
deleteIndex("foo2")
}
it should "delete a document by query" in {
index(index = "foo", `type` = "foo", id = "foo2", data = Some("{\"foo\":\"bar\"}"))
ElasticSearchClient.count(Seq("foo"), Seq("foo"), "{\"query\": { \"match_all\": {} } }").sync.tee.asString.contains("\"count\":1") shouldBe true
ElasticSearchClient.deleteByQuery(Seq("foo"), Seq.empty[String], """{ "query": { "match_all" : { } } }""").sync.tee.asString.contains("\"successful\"") shouldBe true
ElasticSearchClient.count(Seq("foo"), Seq("foo"), "{\"query\": { \"match_all\": {} }").sync.tee.asString.contains("\"count\":0") shouldBe true
deleteIndex("foo")
}
it should "get settings" in {
ElasticSearchClient.deleteIndex("replicas3").sync
ElasticSearchClient.createIndex(name = "replicas3",
settings = Some( """{"settings": {"numr_of_shards" : 1, "numr_of_replicas": 3}}""")
).sync.tee.asString.contains("acknowledged") shouldBe true
// The tests start a single-node cluster and so the index can never green. Hence we only wait for "yellow".
ElasticSearchClient.health(List("replicas3"), waitForStatus = Some("yellow"), timeoutInterval = Some("5s")).sync.tee.asString
val txt = ElasticSearchClient.getSettings(List("replicas3")).sync.tee.asString
parse(txt) \ "replicas3" \ "settings" \ "index" \ "numr_of_replicas" \\ classOf[JString] shouldBe List("3")
ElasticSearchClient.deleteIndex("replicas3").sync.tee.asString.contains("acknowledged") shouldBe true
}
it should "properly manipulate mappings" in {
createIndex("foo")
ElasticSearchClient.putMapping(Seq("foo"), "foo", """{"foo": { "properties": { "message": { "type": "string", "store": true } } } }""").sync.tee.asString.contains("acknowledged") shouldBe true
ElasticSearchClient.verifyType("foo", "foo").sync.tee.asString
ElasticSearchClient.getMapping(Seq("foo"), Seq("foo")).sync.tee.asString.contains("store") shouldBe true
ElasticSearchClient.putMapping(Seq("foo"), "foo",
"""{"foo": { "properties": { "message": { "type": "integer", "store": true } } } }""",
ignoreConflicts = false).sync.tee.asString.contains("MergeMappingException") shouldBe true
ElasticSearchClient.putMapping(Seq("foo"), "foo",
"""{"foo": { "properties": { "message": { "type": "integer", "store": true } } } }""",
ignoreConflicts = true).sync.tee.asString.contains("acknowledged") shouldBe true
deleteIndex("foo")
}
it should "properly update settings" in {
createIndex("foo")
createIndex("bar")
ElasticSearchClient.putSettings(Seq("foo", "bar"), """{"index.blocks.read": true}""").sync.tee.asString.contains("acknowledged") shouldBe true
val txt = ElasticSearchClient.getSettings(Seq("foo", "bar")).sync.tee.asString
parse(txt) \ "foo" \ "settings" \ "index" \ "blocks" \ "read" \\ classOf[JString] shouldBe List("true")
parse(txt) \ "bar" \ "settings" \ "index" \ "blocks" \ "read" \\ classOf[JString] shouldBe List("true")
deleteIndex("foo")
deleteIndex("bar")
}
it should "suggest completions" in {
createIndex("music")
ElasticSearchClient.putMapping(Seq("music"), "song",
"""{
| "song" : {
| "properties" : {
| "name" : { "type" : "string" },
| "suggest" : { "type" : "completion",
| "index_analyzer" : "simple",
| "search_analyzer" : "simple",
| "payloads" : true
| }
| }
| }
|}
""".stripMargin).sync.tee.asString
index("music", "song", "1",
Some(
"""{
| "name" : "Nevermind",
| "suggest" : {
| "input": [ "Nevermind", "Nirvana" ],
| "output": "Nirvana - Nevermind",
| "payload" : { "artistId" : 2321 },
| "weight" : 34
| }
|}
""".stripMargin))
ElasticSearchClient.suggest("music",
"""{
| "song-suggest" : {
| "text" : "n",
| "completion" : {
| "field" : "suggest"
| }
| }
|}
""".stripMargin).sync.tee.asString.contains("Nirvana - Nevermind") shouldBe true
deleteIndex("music")
}
it should "validate and explain queries" in {
createIndex("foo")
index(index = "foo", `type` = "foo", id = "foo2", data = Some("{\"foo\":\"bar\"}"))
ElasticSearchClient.validate(index = "foo", query = "{\"query\": { \"match_all\": {} }").sync.tee.asString.contains("\"valid\"") shouldBe true
ElasticSearchClient.explain(index = "foo", `type` = "foo", id = "foo2", query = "{\"query\": { \"term\": { \"foo\":\"bar\"} } }").sync.tee.asString.contains("explanation") shouldBe true
deleteIndex("foo")
}
it should "handle health checking" in {
ElasticSearchClient.health().sync.tee.asString.contains("number_of_nodes") shouldBe true
ElasticSearchClient.health(level = Some("indices"), timeoutInterval = Some("5")).sync.tee.asString.contains("number_of_nodes") shouldBe true
}
it should "handle stats checking" in {
createIndex("foo")
createIndex("bar")
val res = ElasticSearchClient.stats().sync.tee.asString
res.contains("index") shouldBe true
res.contains("number_of_shards") shouldBe true
res.contains("number_of_replicas") shouldBe true
val fooRes = ElasticSearchClient.stats(indices = Seq("foo")).sync.tee.asString
fooRes.contains("settings") shouldBe true
fooRes.contains("number_of_shards") shouldBe true
fooRes.contains("foo") shouldBe true
fooRes should not contain ("bar")
val barRes = ElasticSearchClient.stats(indices = Seq("bar")).sync.tee.asString
barRes.contains("settings") shouldBe true
barRes.contains("number_of_shards") shouldBe true
barRes.contains("bar") shouldBe true
barRes should not contain ("foo")
deleteIndex("foo")
deleteIndex("bar")
}
it should "handle refresh" in {
createIndex("test")
val res = ElasticSearchClient.refresh("test").sync.tee.asString
res.contains("\"successful\"") shouldBe true
deleteIndex("test")
}
it should "handle bulk requests" in {
val res = ElasticSearchClient.bulk(data =
"""{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
{ "field1" : "value1" }
{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
{ "field1" : "value3" }
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1"} }
{ "doc" : {"field2" : "value2"} }""").sync.tee.asString
res.contains("\"status\":201") shouldBe true
deleteIndex("test")
}
}
| srzyhead/ElasticSearchScalaClient | src/test/java/org/gensokyo/elasticsearchclient/ClientSpec.scala | Scala | mit | 17,503 |
package org.bowlerframework.examples.squeryl
import org.squeryl.Schema
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: 20/02/2011
* Time: 19:56
* To change this template use File | Settings | File Templates.
*/
object ApplicationSchema extends Schema{
val people = table[Person]("people")
} | rkpandey/Bowler | examples/src/main/scala/org/bowlerframework/examples/squeryl/ApplicationSchema.scala | Scala | bsd-3-clause | 308 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{Calculated, CtBoxIdentifier, CtOptionalInteger}
import uk.gov.hmrc.ct.computations.calculations.SummaryLossesArisingThisPeriodCalculator
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class B122(value: Option[Int]) extends CtBoxIdentifier("Trading losses arising") with CtOptionalInteger
object B122 extends Calculated[B122, ComputationsBoxRetriever] with SummaryLossesArisingThisPeriodCalculator{
override def calculate(fieldValueRetriever: ComputationsBoxRetriever): B122 = {
summaryTradingLossesArisingCalculation(cp118 = fieldValueRetriever.cp118())
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B122.scala | Scala | apache-2.0 | 1,262 |
package at.logic.gapt.examples.tip.prod
import at.logic.gapt.expr._
import at.logic.gapt.formats.ClasspathInputFile
import at.logic.gapt.formats.tip.TipSmtParser
import at.logic.gapt.proofs.gaptic.{ Lemma, TacticsProof, _ }
import at.logic.gapt.proofs.{ Ant, Sequent }
object prop_32 extends TacticsProof {
val bench = TipSmtParser.fixupAndParse( ClasspathInputFile( "tip/prod/prop_32.smt2", getClass ) )
ctx = bench.ctx
val sequent = bench.toSequent.zipWithIndex.map {
case ( f, Ant( i ) ) => s"h$i" -> f
case ( f, _ ) => "goal" -> f
}
val theory = sequent.antecedent ++: Sequent()
val append_nil_left_id = hof"!xs append(xs,nil) = xs"
val append_nil_left_id_proof = Lemma( theory :+
( "append_nil_left_id" -> append_nil_left_id ) ) {
allR; induction( hov"xs:list" )
//- BC
rewrite.many ltr "h5" in "append_nil_left_id"; refl
//- IC
rewrite.many ltr "h6" in "append_nil_left_id";
rewrite.many ltr "IHxs_0" in "append_nil_left_id"; refl
}
val append_comm = hof"!xs!ys!zs append(xs,append(ys,zs)) = append(append(xs,ys),zs)"
val append_comm_proof = Lemma( theory :+
( "append_comm" -> append_comm ) ) {
allR; induction( hov"xs:list" )
//- BC
allR; allR;
rewrite.many ltr "h5" in "append_comm"; refl
//- IC
allR; allR;
rewrite.many ltr "h6" in "append_comm"
rewrite.many ltr "IHxs_0" in "append_comm"; refl
}
val rot_gen = ( append_nil_left_id & append_comm ) -->
hof"!xs!ys rotate(length(xs), append(xs,ys)) = append(ys, xs)"
val rot_gen_proof = Lemma( theory :+ ( "rot_gen" -> rot_gen ) ) {
impR; andL; allR; induction( hov"xs:list" )
//- BC
escargot
//- IC
allR
rewrite.many ltr "h4" in "rot_gen_1"
rewrite.many ltr "h6" in "rot_gen_1"
escargot
}
val lemma = append_nil_left_id & append_comm & rot_gen
val lemma_proof = Lemma( theory :+ ( "lemma" -> lemma ) ) {
andR; andR;
insert( append_nil_left_id_proof )
insert( append_comm_proof )
insert( rot_gen_proof )
}
val proof = Lemma( sequent ) {
cut( "lemma", lemma )
insert( lemma_proof )
escargot
}
} | gebner/gapt | examples/tip/prod/prop_32.scala | Scala | gpl-3.0 | 2,143 |
package scavlink.task.service
import java.security.SecureRandom
import org.parboiled.common.Base64
import spray.caching.LruCache
import spray.routing.authentication.{BasicUserContext, UserPass}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
case class Token(token: String) {
override def toString = token
}
object Token {
private val random = new SecureRandom
private val base64 = Base64.custom()
def generate(): Token = {
val bytes = new Array[Byte](24)
random.nextBytes(bytes)
new Token(base64.encodeToString(bytes, false))
}
def dummy(): Token = new Token("01234567890123456789012345678901")
}
/**
* Interface to a token store that plays nice with spray's authentication directives.
* @author Nick Rossi
*/
trait TokenStore {
def checkUserPass(up: Option[UserPass])(implicit ec: ExecutionContext) = check(up.map(u => new Token(u.pass)))
def checkString(token: Option[String])(implicit ec: ExecutionContext) = check(token.map(Token.apply))
def check(token: Option[Token])(implicit ec: ExecutionContext): Future[Option[BasicUserContext]]
def addUserPass(up: UserPass)(implicit ec: ExecutionContext): Token = addUser(BasicUserContext(up.user))
def addUser(user: BasicUserContext)(implicit ec: ExecutionContext): Token
}
/**
* Basic in-memory token store using spray LruCache.
* Tokens are reaped if unused after the configured duration.
* @author Nick Rossi
*/
class MemoryTokenStore(idleTimeout: FiniteDuration) extends TokenStore {
private val tokens = LruCache[BasicUserContext](timeToIdle = idleTimeout)
def check(token: Option[Token])(implicit ec: ExecutionContext): Future[Option[BasicUserContext]] =
tokens.get(token.getOrElse(None)) match {
case Some(future) => future.map(Option.apply)
case None => Future.successful(None)
}
def addUser(user: BasicUserContext)(implicit ec: ExecutionContext): Token = {
val token = Token.dummy()
tokens(token) { user }
token
}
}
| nickolasrossi/scavlink | src/main/scala/scavlink/task/service/TokenStore.scala | Scala | mit | 2,004 |
package dit4c.scheduler.service
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
import dit4c.scheduler.ScalaCheckHelpers
import akka.http.scaladsl.Http
import akka.actor._
import akka.testkit.TestProbe
import akka.stream.scaladsl._
import akka.http.scaladsl.model.ws.Message
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.testkit.WSProbe
import akka.stream.ActorMaterializer
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Random
import scala.concurrent.Promise
import akka.util.ByteString
import akka.http.scaladsl.model.Uri
import org.specs2.scalacheck.Parameters
import dit4c.scheduler.domain.Instance
import dit4c.protobuf.scheduler.outbound.OutboundMessage
import akka.http.scaladsl.model.StatusCodes
import org.bouncycastle.openpgp.PGPPublicKeyRing
import akka.http.scaladsl.model.headers.HttpCookiePair
import akka.http.scaladsl.model.headers.HttpCookie
import akka.http.scaladsl.server.directives.Credentials
import pdi.jwt.JwtJson
import pdi.jwt.algorithms.JwtAsymetricAlgorithm
import scala.util.Try
import java.security.interfaces.RSAPrivateKey
import pdi.jwt.JwtAlgorithm
import play.api.libs.json.Json
import dit4c.scheduler.domain.Cluster
class PortalMessageBridgeSpec(implicit ee: ExecutionEnv)
extends Specification
with ScalaCheck with ScalaCheckHelpers {
implicit val system = ActorSystem("PortalMessageBridgeSpec")
implicit val materializer = ActorMaterializer()
implicit val params = Parameters(minTestsOk = 5)
import ScalaCheckHelpers._
"PortalMessageBridgeSpec" >> {
"connection behaviour" >> {
"connects to a websocket server" >> {
newWithProbes { (wsProbe: WSProbe, _: TestProbe, _: ActorRef) =>
wsProbe.sendMessage("Hello")
done
}
}
"terminates on server complete" >> {
newWithProbes { (wsProbe: WSProbe, parentProbe: TestProbe, msgBridge: ActorRef) =>
parentProbe.watch(msgBridge)
wsProbe.sendMessage("Hello")
wsProbe.sendCompletion()
parentProbe.expectTerminated(msgBridge)
done
}
}
}
"incoming message handling" >> {
"StartInstance" >> prop({ (msgId: String, instanceId: String, clusterId: String, imageUrl: Uri) =>
import dit4c.protobuf.scheduler.{inbound => pb}
import dit4c.scheduler.service.{ClusterManager => cam}
import dit4c.scheduler.domain.{RktClusterManager => ram}
val msg = pb.InboundMessage(randomMsgId,
pb.InboundMessage.Payload.StartInstance(
pb.StartInstance(randomInstanceId, clusterId, imageUrl.toString)))
newWithProbes { (wsProbe: WSProbe, parentProbe: TestProbe, msgBridge: ActorRef) =>
wsProbe.sendMessage(ByteString(msg.toByteArray))
parentProbe.expectMsgPF(5.seconds) {
case cam.ClusterCommand(clusterId, cmd: ram.StartInstance) =>
// success
done
}
}
})
"DiscardInstance" >> prop({ (msgId: String, instanceId: String, clusterId: String) =>
import dit4c.protobuf.scheduler.{inbound => pb}
import dit4c.scheduler.service.{ClusterManager => cam}
import dit4c.scheduler.domain.{RktClusterManager => ram}
import dit4c.scheduler.domain.{instance => i}
val msg = pb.InboundMessage(randomMsgId,
pb.InboundMessage.Payload.DiscardInstance(
pb.DiscardInstance(randomInstanceId, clusterId)))
newWithProbes { (wsProbe: WSProbe, parentProbe: TestProbe, msgBridge: ActorRef) =>
wsProbe.sendMessage(ByteString(msg.toByteArray))
parentProbe.expectMsgPF(5.seconds) {
case cam.ClusterCommand(clusterId, ram.InstanceEnvelope(instanceId, Instance.Discard)) =>
// Success
done
}
}
})
}
"outgoing message handling" >> {
"InstanceStateUpdate" >> prop({ (msgId: String, instanceId: String, imageUrl: Uri, portalUri: Uri) =>
import dit4c.protobuf.scheduler.{outbound => pb}
import dit4c.scheduler.service.{ClusterManager => cam}
import dit4c.scheduler.domain.{RktClusterManager => ram}
val dummyLocalImageId = "sha512-"+Stream.fill(64)("0").mkString
val msgs: List[Instance.StatusReport] =
Instance.StatusReport(Instance.WaitingForImage, Instance.StartData(
instanceId, imageUrl.toString, None, portalUri.toString, None)) ::
Instance.StatusReport(Instance.Starting, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Running, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Stopping, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Exited, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Saving, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Saved, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Uploading, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Uploaded, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Discarded, Instance.StartData(
instanceId, imageUrl.toString,
Some(dummyLocalImageId), portalUri.toString, None)) ::
Instance.StatusReport(Instance.Errored, Instance.ErrorData(
instanceId, List("A bunch", "of errors", "occurred"))) ::
Nil
newWithProbes { (wsProbe: WSProbe, parentProbe: TestProbe, msgBridge: ActorRef) =>
msgs must contain(beLike[Instance.StatusReport] {
case msg =>
parentProbe.send(msgBridge, msg)
val wsMsg = wsProbe.expectMessage()
val om = OutboundMessage.parseFrom(wsMsg.asBinaryMessage.getStrictData.toArray)
(om.messageId must {
haveLength[String](32) and
beMatching("[0-9a-f]+")
}) and
(om.payload.instanceStateUpdate.isDefined must beTrue)
}).foreach
}
})
}
}
def randomMsgId = Random.alphanumeric.take(20).mkString
def randomInstanceId = Random.alphanumeric.take(20).mkString
def newWithProbes[A](f: (WSProbe, TestProbe, ActorRef) => A): A = {
import dit4c.common.KeyHelpers._
val parentProbe = TestProbe()
val keyManagerProbe = TestProbe()
val wsProbe = WSProbe()
val closePromise = Promise[Unit]()
val secretKeyRing =
parseArmoredSecretKeyRing({
import scala.sys.process._
this.getClass.getResource("unit_test_scheduler_keys.asc").cat.!!
}).right.get
val publicKeyInfo = KeyManager.PublicKeyInfo(
PGPFingerprint("28D6BE5749FA9CD2972E3F8BAD0C695EF46AFF94"),
secretKeyRing.toPublicKeyRing.armored)
val route = Route.seal {
// Source which never emits an element, and terminates on closePromise success
val closeSource = Source.maybe[Message]
.mapMaterializedValue { p => closePromise.future.foreach { _ => p.success(None) } }
import akka.http.scaladsl.server.Directives._
logRequest("websocket-server") {
path("ws") {
post {
fileUpload("keys") {
case (metadata, byteSource) =>
val f = byteSource.runFold(ByteString.empty)(_ ++ _)
.map(_.decodeString("utf8"))
.map(parseArmoredPublicKeyRing)
onSuccess(f) {
case Right(pkr: PGPPublicKeyRing) =>
val fingerprint = pkr.getPublicKey.fingerprint
val nonce = Random.alphanumeric.take(20).mkString
val wrappedKeys = JwtJson.encode(
Json.obj("keys" -> pkr.armored))
val cookie = HttpCookie.fromPair(HttpCookiePair("keys", wrappedKeys))
setCookie(cookie) {
redirect(s"/ws/$fingerprint", StatusCodes.SeeOther)
}
case Left(msg) =>
complete(StatusCodes.BadRequest, msg)
}
}
}
} ~
path("ws" / publicKeyInfo.keyFingerprint.string) {
cookie("keys") { cookiePair =>
authenticateOAuth2[String]("", {
case Credentials.Missing => None
case Credentials.Provided(token) =>
val claim = Json.parse(JwtJson.decode(cookiePair.value).get.content)
val keys = claim.\\("keys").as[String]
val pkr = parseArmoredPublicKeyRing(keys).right.get
pkr.authenticationKeys.view
.flatMap(k => k.asJavaPublicKey.map((k.fingerprint, _)))
.flatMap { case (fingerprint, key) =>
JwtJson.decode(token, key).toOption.map(_ => fingerprint)
}
.headOption
.map(_.string)
}) { authenticationKeyId =>
handleWebSocketMessages(wsProbe.flow.merge(closeSource, true))
}
}
}
}
}
val binding = Await.result(Http().bindAndHandle(Route.handlerFlow(route), "localhost", 0), 2.seconds)
try {
val msgBridgeRef = parentProbe.childActorOf(
Props(classOf[PortalMessageBridge],
keyManagerProbe.ref,
s"http://localhost:${binding.localAddress.getPort}/ws"),
"portal-message-bridge")
keyManagerProbe.expectMsg(KeyManager.GetPublicKeyInfo)
keyManagerProbe.reply(publicKeyInfo)
val claim = keyManagerProbe.expectMsgType[KeyManager.SignJwtClaim].claim
val tokens =
secretKeyRing.authenticationKeys
.flatMap { pk =>
Try(secretKeyRing.getSecretKey(pk.getFingerprint)).toOption
}
.flatMap(_.asJavaPrivateKey)
.map {
case k: RSAPrivateKey =>
JwtJson.encode(claim, k, JwtAlgorithm.RS512)
}
keyManagerProbe.reply(KeyManager.SignedJwtTokens(tokens))
parentProbe.expectMsg(ClusterManager.GetClusters)
parentProbe.reply(Cluster.Active("default", "Default Cluster", true))
wsProbe.expectMessage // Expect cluster state update
// Run block
f(wsProbe, parentProbe, msgBridgeRef)
} finally {
closePromise.success(()) // Close websocket server-side if still open
binding.unbind
}
}
} | dit4c/dit4c | dit4c-scheduler/src/test/scala/dit4c/scheduler/service/PortalMessageBridgeSpec.scala | Scala | mit | 11,259 |
package at.forsyte.apalache.tla.types
import at.forsyte.apalache.tla.lir.TestingPredefs
import org.junit.runner.RunWith
import org.scalatest.{BeforeAndAfter, FunSuite}
import org.scalatest.junit.JUnitRunner
@RunWith( classOf[JUnitRunner] )
class TestTypeReduction extends FunSuite with TestingPredefs with BeforeAndAfter {
var gen = new SmtVarGenerator
var tr = new TypeReduction( gen )
before {
gen = new SmtVarGenerator
tr = new TypeReduction( gen )
}
test( "Test nesting" ) {
val tau = FunT( IntT, SetT( IntT ) )
val m = Map.empty[TypeVar, SmtTypeVariable]
val rr = tr( tau, m )
assert( rr.t == fun( int, set( int ) ) )
}
test("Test tuples"){
val tau = SetT( FunT( TupT( IntT, StrT ), SetT( IntT ) ) )
val m = Map.empty[TypeVar, SmtTypeVariable]
val rr = tr(tau, m)
val idx = SmtIntVariable( 0 )
assert( rr.t == set( fun( tup( idx ), set( int ) ) ) )
assert( rr.phi.contains( hasIndex( idx, 0, int ) ) )
assert( rr.phi.contains( hasIndex( idx, 1, str ) ) )
}
}
| konnov/dach | tla-types/src/test/scala/at/forsyte/apalache/tla/types/TestTypeReduction.scala | Scala | apache-2.0 | 1,039 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.docgen.refcard
import org.neo4j.cypher.QueryStatisticsTestSupport
import org.neo4j.cypher.docgen.RefcardTest
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.InternalExecutionResult
class CollectionsTest extends RefcardTest with QueryStatisticsTestSupport {
val graphDescription = List("A KNOWS B")
val title = "Collections"
val css = "general c2-2 c3-2 c4-3 c5-2 c6-4"
override val linkId = "syntax-collections"
override def assert(name: String, result: InternalExecutionResult) {
name match {
case "returns-two" =>
assertStats(result, nodesCreated = 0)
assert(result.toList.size === 2)
case "returns-one" =>
assertStats(result, nodesCreated = 0)
assert(result.toList.size === 1)
case "returns-none" =>
assertStats(result, nodesCreated = 0)
assert(result.toList.size === 0)
}
}
override def parameters(name: String): Map[String, Any] =
name match {
case "parameters=name" =>
Map("value" -> "Bob")
case "parameters=names" =>
Map("names" -> List("A", "B"))
case "parameters=coll" =>
Map("coll" -> List(1, 2, 3))
case "parameters=range" =>
Map("firstNum" -> 1, "lastNum" -> 10, "step" -> 2)
case "parameters=subscript" =>
Map("startIdx" -> 1, "endIdx" -> -1, "idx" -> 0)
case "" =>
Map()
}
override val properties: Map[String, Map[String, Any]] = Map(
"A" -> Map("name" -> "Alice", "coll" -> Array(1, 2, 3), "age" -> 30),
"B" -> Map("name" -> "Bob", "coll" -> Array(1, 2, 3), "age" -> 40))
def text = """
###assertion=returns-one
RETURN
["a", "b", "c"] AS coll
###
Literal collections are declared in square brackets.
###assertion=returns-one parameters=coll
RETURN
size({coll}) AS len, {coll}[0] AS value
###
Collections can be passed in as parameters.
###assertion=returns-one parameters=range
RETURN
range({firstNum}, {lastNum}, {step}) AS coll
###
Range creates a collection of numbers (+step+ is optional), other functions returning collections are:
+labels+, +nodes+, +relationships+, +rels+, +filter+, +extract+.
###assertion=returns-one
//
MATCH (a)-[r:KNOWS*]->()
RETURN r AS rels
###
Relationship identifiers of a variable length path contain a collection of relationships.
###assertion=returns-two
MATCH (matchedNode)
RETURN matchedNode.coll[0] AS value,
size(matchedNode.coll) AS len
###
Properties can be arrays/collections of strings, numbers or booleans.
###assertion=returns-one parameters=subscript
WITH [1, 2, 3] AS coll
RETURN
coll[{idx}] AS value,
coll[{startIdx}..{endIdx}] AS slice
###
Collection elements can be accessed with +idx+ subscripts in square brackets.
Invalid indexes return +NULL+.
Slices can be retrieved with intervals from +start_idx+ to +end_idx+ each of which can be omitted or negative.
Out of range elements are ignored.
###assertion=returns-one parameters=names
//
UNWIND {names} AS name
MATCH (n {name: name})
RETURN avg(n.age)
###
With +UNWIND+, you can transform any collection back into individual rows.
The example matches all names from a list of names.
"""
}
| HuangLS/neo4j | manual/cypher/refcard-tests/src/test/scala/org/neo4j/cypher/docgen/refcard/CollectionsTest.scala | Scala | apache-2.0 | 3,972 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import scala.collection.JavaConverters._
import scala.util.Random
import org.dmg.pmml.{OpType, PMML}
import org.dmg.pmml.regression.{RegressionModel => PMMLRegressionModel}
import org.apache.spark.ml.feature.Instance
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, Vector, Vectors}
import org.apache.spark.ml.param.{ParamMap, ParamsSuite}
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.LinearDataGenerator
import org.apache.spark.sql.{DataFrame, Row}
class LinearRegressionSuite extends MLTest with DefaultReadWriteTest with PMMLReadWriteTest {
import testImplicits._
private val seed: Int = 42
@transient var datasetWithDenseFeature: DataFrame = _
@transient var datasetWithStrongNoise: DataFrame = _
@transient var datasetWithDenseFeatureWithoutIntercept: DataFrame = _
@transient var datasetWithSparseFeature: DataFrame = _
@transient var datasetWithWeight: DataFrame = _
@transient var datasetWithWeightConstantLabel: DataFrame = _
@transient var datasetWithWeightZeroLabel: DataFrame = _
@transient var datasetWithOutlier: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
datasetWithDenseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
datasetWithStrongNoise = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 100, seed, eps = 5.0), 2).map(_.asML).toDF()
/*
datasetWithDenseFeatureWithoutIntercept is not needed for correctness testing
but is useful for illustrating training model without intercept
*/
datasetWithDenseFeatureWithoutIntercept = sc.parallelize(
LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 10000, seed, eps = 0.1), 2).map(_.asML).toDF()
val r = new Random(seed)
// When feature size is larger than 4096, normal optimizer is chosen
// as the solver of linear regression in the case of "auto" mode.
val featureSize = 4100
datasetWithSparseFeature = sc.parallelize(LinearDataGenerator.generateLinearInput(
intercept = 0.0, weights = Seq.fill(featureSize)(r.nextDouble()).toArray,
xMean = Seq.fill(featureSize)(r.nextDouble()).toArray,
xVariance = Seq.fill(featureSize)(r.nextDouble()).toArray, nPoints = 200,
seed, eps = 0.1, sparsity = 0.7), 2).map(_.asML).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b <- c(17, 19, 23, 29)
w <- c(1, 2, 3, 4)
df <- as.data.frame(cbind(A, b))
*/
datasetWithWeight = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
/*
R code:
A <- matrix(c(0, 1, 2, 3, 5, 7, 11, 13), 4, 2)
b.const <- c(17, 17, 17, 17)
w <- c(1, 2, 3, 4)
df.const.label <- as.data.frame(cbind(A, b.const))
*/
datasetWithWeightConstantLabel = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(17.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(17.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(17.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithWeightZeroLabel = sc.parallelize(Seq(
Instance(0.0, 1.0, Vectors.dense(0.0, 5.0).toSparse),
Instance(0.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(0.0, 3.0, Vectors.dense(2.0, 11.0)),
Instance(0.0, 4.0, Vectors.dense(3.0, 13.0))
), 2).toDF()
datasetWithOutlier = {
val inlierData = LinearDataGenerator.generateLinearInput(
intercept = 6.3, weights = Array(4.7, 7.2), xMean = Array(0.9, -1.3),
xVariance = Array(0.7, 1.2), nPoints = 900, seed, eps = 0.1)
val outlierData = LinearDataGenerator.generateLinearInput(
intercept = -2.1, weights = Array(0.6, -1.2), xMean = Array(0.9, -1.3),
xVariance = Array(1.5, 0.8), nPoints = 100, seed, eps = 0.1)
sc.parallelize(inlierData ++ outlierData, 2).map(_.asML).toDF()
}
}
/**
* Enable the ignored test to export the dataset into CSV format,
* so we can validate the training accuracy compared with R's glmnet package.
*/
ignore("export test data into CSV format") {
datasetWithDenseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithDenseFeature")
datasetWithDenseFeatureWithoutIntercept.rdd.map {
case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile(
"target/tmp/LinearRegressionSuite/datasetWithDenseFeatureWithoutIntercept")
datasetWithSparseFeature.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithSparseFeature")
datasetWithOutlier.rdd.map { case Row(label: Double, features: Vector) =>
label + "," + features.toArray.mkString(",")
}.repartition(1).saveAsTextFile("target/tmp/LinearRegressionSuite/datasetWithOutlier")
}
test("params") {
ParamsSuite.checkParams(new LinearRegression)
val model = new LinearRegressionModel("linearReg", Vectors.dense(0.0), 0.0)
ParamsSuite.checkParams(model)
}
test("linear regression: default params") {
val lir = new LinearRegression
assert(lir.getLabelCol === "label")
assert(lir.getFeaturesCol === "features")
assert(lir.getPredictionCol === "prediction")
assert(lir.getRegParam === 0.0)
assert(lir.getElasticNetParam === 0.0)
assert(lir.getFitIntercept)
assert(lir.getStandardization)
assert(lir.getSolver === "auto")
assert(lir.getLoss === "squaredError")
assert(lir.getEpsilon === 1.35)
val model = lir.fit(datasetWithDenseFeature)
MLTestingUtils.checkCopyAndUids(lir, model)
assert(model.hasSummary)
val copiedModel = model.copy(ParamMap.empty)
assert(copiedModel.hasSummary)
model.setSummary(None)
assert(!model.hasSummary)
model.transform(datasetWithDenseFeature)
.select("label", "prediction")
.collect()
assert(model.getFeaturesCol === "features")
assert(model.getPredictionCol === "prediction")
assert(model.intercept !== 0.0)
assert(model.scale === 1.0)
assert(model.hasParent)
val numFeatures = datasetWithDenseFeature.select("features").first().getAs[Vector](0).size
assert(model.numFeatures === numFeatures)
}
test("linear regression: can transform data with LinearRegressionModel") {
withClue("training related params like loss are only validated during fitting phase") {
val original = new LinearRegression().fit(datasetWithDenseFeature)
val deserialized = new LinearRegressionModel(uid = original.uid,
coefficients = original.coefficients,
intercept = original.intercept)
val output = deserialized.transform(datasetWithDenseFeature)
assert(output.collect().size > 0) // simple assertion to ensure no exception thrown
}
}
test("linear regression: illegal params") {
withClue("LinearRegression with huber loss only supports L2 regularization") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setElasticNetParam(0.5)
.fit(datasetWithDenseFeature)
}
}
withClue("LinearRegression with huber loss doesn't support normal solver") {
intercept[IllegalArgumentException] {
new LinearRegression().setLoss("huber").setSolver("normal").fit(datasetWithDenseFeature)
}
}
}
test("linear regression handles singular matrices") {
// check for both constant columns with intercept (zero std) and collinear
val singularDataConstantColumn = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(1.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(1.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(1.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(1.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataConstantColumn)
// to make it clear that WLS did not solve analytically
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
val singularDataCollinearFeatures = sc.parallelize(Seq(
Instance(17.0, 1.0, Vectors.dense(10.0, 5.0).toSparse),
Instance(19.0, 2.0, Vectors.dense(14.0, 7.0)),
Instance(23.0, 3.0, Vectors.dense(22.0, 11.0)),
Instance(29.0, 4.0, Vectors.dense(26.0, 13.0))
), 2).toDF()
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setFitIntercept(true)
val model = trainer.fit(singularDataCollinearFeatures)
intercept[UnsupportedOperationException] {
model.summary.coefficientStandardErrors
}
assert(model.summary.objectiveHistory !== Array(0.0))
}
}
test("linear regression with intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = new LinearRegression().setSolver(solver)
// The result should be the same regardless of standardization without regularization
val trainer2 = (new LinearRegression).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
Using the following R code to load the data and train the model using glmnet package.
library("glmnet")
data <- read.csv("path", header=FALSE, stringsAsFactors=FALSE)
features <- as.matrix(data.frame(as.numeric(data$V2), as.numeric(data$V3)))
label <- as.numeric(data$V1)
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.298698
as.numeric.data.V2. 4.700706
as.numeric.data.V3. 7.199082
*/
val interceptR = 6.298698
val coefficientsR = Vectors.dense(4.700706, 7.199082)
assert(model1.intercept ~== interceptR relTol 1E-3)
assert(model1.coefficients ~= coefficientsR relTol 1E-3)
assert(model2.intercept ~== interceptR relTol 1E-3)
assert(model2.coefficients ~= coefficientsR relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept without regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setFitIntercept(false).setSolver(solver)
// Without regularization the results should be the same
val trainer2 = (new LinearRegression).setFitIntercept(false).setStandardization(false)
.setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val modelWithoutIntercept1 = trainer1.fit(datasetWithDenseFeatureWithoutIntercept)
val model2 = trainer2.fit(datasetWithDenseFeature)
val modelWithoutIntercept2 = trainer2.fit(datasetWithDenseFeatureWithoutIntercept)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0, lambda = 0,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.973403
as.numeric.data.V3. 5.284370
*/
val coefficientsR = Vectors.dense(6.973403, 5.284370)
assert(model1.intercept ~== 0 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR relTol 1E-2)
assert(model2.intercept ~== 0 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR relTol 1E-2)
/*
Then again with the data with no intercept:
> coefficientsWithoutIntercept
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data3.V2. 4.70011
as.numeric.data3.V3. 7.19943
*/
val coefficientsWithoutInterceptR = Vectors.dense(4.70011, 7.19943)
assert(modelWithoutIntercept1.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept1.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
assert(modelWithoutIntercept2.intercept ~== 0 absTol 1E-3)
assert(modelWithoutIntercept2.coefficients ~= coefficientsWithoutInterceptR relTol 1E-3)
}
}
test("linear regression with intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setSolver(solver).setStandardization(false)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian",
alpha = 1.0, lambda = 0.57 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.242284
as.numeric.d1.V2. 4.019605
as.numeric.d1.V3. 6.679538
*/
val interceptR1 = 6.242284
val coefficientsR1 = Vectors.dense(4.019605, 6.679538)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.416948
as.numeric.data.V2. 3.893869
as.numeric.data.V3. 6.724286
*/
val interceptR2 = 6.416948
val coefficientsR2 = Vectors.dense(3.893869, 6.724286)
assert(model2.intercept ~== interceptR2 relTol 1E-3)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-3)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L1 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(1.0).setRegParam(0.57)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.272927
as.numeric.data.V3. 4.782604
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(6.272927, 4.782604)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 1.0,
lambda = 0.57, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.data.V2. 6.207817
as.numeric.data.V3. 4.775780
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(6.207817, 4.775780)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.260103
as.numeric.d1.V2. 3.725522
as.numeric.d1.V3. 5.711203
*/
val interceptR1 = 5.260103
val coefficientsR1 = Vectors.dense(3.725522, 5.711203)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.790885
as.numeric.d1.V2. 3.432373
as.numeric.d1.V3. 5.919196
*/
val interceptR2 = 5.790885
val coefficientsR2 = Vectors.dense(3.432373, 5.919196)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with L2 regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.0).setRegParam(2.3)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.493430
as.numeric.d1.V3. 4.223082
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.493430, 4.223082)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.0, lambda = 2.3,
intercept = FALSE, standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.244324
as.numeric.d1.V3. 4.203106
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.244324, 4.203106)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression with intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6 ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 5.689855
as.numeric.d1.V2. 3.661181
as.numeric.d1.V3. 6.000274
*/
val interceptR1 = 5.689855
val coefficientsR1 = Vectors.dense(3.661181, 6.000274)
assert(model1.intercept ~== interceptR1 relTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3, lambda = 1.6
standardize=FALSE))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) 6.113890
as.numeric.d1.V2. 3.407021
as.numeric.d1.V3. 6.152512
*/
val interceptR2 = 6.113890
val coefficientsR2 = Vectors.dense(3.407021, 6.152512)
assert(model2.intercept ~== interceptR2 relTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("linear regression without intercept with ElasticNet regularization") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer1 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setSolver(solver)
val trainer2 = (new LinearRegression).setElasticNetParam(0.3).setRegParam(1.6)
.setFitIntercept(false).setStandardization(false).setSolver(solver)
val model1 = trainer1.fit(datasetWithDenseFeature)
val model2 = trainer2.fit(datasetWithDenseFeature)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.643748
as.numeric.d1.V3. 4.331519
*/
val interceptR1 = 0.0
val coefficientsR1 = Vectors.dense(5.643748, 4.331519)
assert(model1.intercept ~== interceptR1 absTol 1E-2)
assert(model1.coefficients ~= coefficientsR1 relTol 1E-2)
/*
coefficients <- coef(glmnet(features, label, family="gaussian", alpha = 0.3,
lambda = 1.6, intercept=FALSE, standardize=FALSE ))
> coefficients
3 x 1 sparse Matrix of class "dgCMatrix"
s0
(Intercept) .
as.numeric.d1.V2. 5.455902
as.numeric.d1.V3. 4.312266
*/
val interceptR2 = 0.0
val coefficientsR2 = Vectors.dense(5.455902, 4.312266)
assert(model2.intercept ~== interceptR2 absTol 1E-2)
assert(model2.coefficients ~= coefficientsR2 relTol 1E-2)
testTransformer[(Double, Vector)](datasetWithDenseFeature, model1,
"features", "prediction") {
case Row(features: DenseVector, prediction1: Double) =>
val prediction2 =
features(0) * model1.coefficients(0) + features(1) * model1.coefficients(1) +
model1.intercept
assert(prediction1 ~== prediction2 relTol 1E-5)
}
}
}
test("prediction on single instance") {
val trainer = new LinearRegression
val model = trainer.fit(datasetWithDenseFeature)
testPredictionModelSinglePrediction(model, datasetWithDenseFeature)
}
test("LinearRegression on blocks") {
for (dataset <- Seq(datasetWithDenseFeature, datasetWithStrongNoise,
datasetWithDenseFeatureWithoutIntercept, datasetWithSparseFeature, datasetWithWeight,
datasetWithWeightConstantLabel, datasetWithWeightZeroLabel, datasetWithOutlier);
fitIntercept <- Seq(true, false);
loss <- Seq("squaredError", "huber")) {
val lir = new LinearRegression()
.setFitIntercept(fitIntercept)
.setLoss(loss)
.setMaxIter(3)
val model = lir.fit(dataset)
Seq(4, 16, 64).foreach { blockSize =>
val model2 = lir.setBlockSize(blockSize).fit(dataset)
assert(model.intercept ~== model2.intercept relTol 1e-9)
assert(model.coefficients ~== model2.coefficients relTol 1e-9)
assert(model.scale ~== model2.scale relTol 1e-9)
}
}
}
test("linear regression model with constant label") {
/*
R code:
for (formula in c(b.const ~ . -1, b.const ~ .)) {
model <- lm(formula, data=df.const.label, weights=w)
print(as.vector(coef(model)))
}
[1] -9.221298 3.394343
[1] 17 0 0
*/
val expected = Seq(
Vectors.dense(0.0, -9.221298, 3.394343),
Vectors.dense(17.0, 0.0, 0.0))
Seq("auto", "l-bfgs", "normal").foreach { solver =>
var idx = 0
for (fitIntercept <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightConstantLabel)
val actual1 = Vectors.dense(model1.intercept, model1.coefficients(0),
model1.coefficients(1))
assert(actual1 ~== expected(idx) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightConstantLabel.schema.fieldNames.toSet + model1.getPredictionCol)
.subsetOf(model1.summary.predictions.schema.fieldNames.toSet))
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setPredictionCol("myPrediction")
.setSolver(solver)
.fit(datasetWithWeightZeroLabel)
val actual2 = Vectors.dense(model2.intercept, model2.coefficients(0),
model2.coefficients(1))
assert(actual2 ~== Vectors.dense(0.0, 0.0, 0.0) absTol 1e-4)
// Schema of summary.predictions should be a superset of the input dataset
assert((datasetWithWeightZeroLabel.schema.fieldNames.toSet + model2.getPredictionCol)
.subsetOf(model2.summary.predictions.schema.fieldNames.toSet))
idx += 1
}
}
}
test("regularized linear regression through origin with constant label") {
// The problem is ill-defined if fitIntercept=false, regParam is non-zero.
// An exception is thrown in this case.
Seq("auto", "l-bfgs", "normal").foreach { solver =>
for (standardization <- Seq(false, true)) {
val model = new LinearRegression().setFitIntercept(false)
.setRegParam(0.1).setStandardization(standardization).setSolver(solver)
intercept[IllegalArgumentException] {
model.fit(datasetWithWeightConstantLabel)
}
}
}
}
test("linear regression with l-bfgs when training is not needed") {
// When label is constant, l-bfgs solver returns results without training.
// There are two possibilities: If the label is non-zero but constant,
// and fitIntercept is true, then the model return yMean as intercept without training.
// If label is all zeros, then all coefficients are zero regardless of fitIntercept, so
// no training is needed.
for (fitIntercept <- Seq(false, true)) {
for (standardization <- Seq(false, true)) {
val model1 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightConstantLabel)
if (fitIntercept) {
assert(model1.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
val model2 = new LinearRegression()
.setFitIntercept(fitIntercept)
.setWeightCol("weight")
.setSolver("l-bfgs")
.fit(datasetWithWeightZeroLabel)
assert(model2.summary.objectiveHistory(0) ~== 0.0 absTol 1e-4)
}
}
}
test("linear regression model training summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver).setPredictionCol("myPrediction")
val model = trainer.fit(datasetWithDenseFeature)
val trainerNoPredictionCol = trainer.setPredictionCol("")
val modelNoPredictionCol = trainerNoPredictionCol.fit(datasetWithDenseFeature)
// Training results for the model should be available
assert(model.hasSummary)
assert(modelNoPredictionCol.hasSummary)
// Schema should be a superset of the input dataset
assert((datasetWithDenseFeature.schema.fieldNames.toSet + model.getPredictionCol).subsetOf(
model.summary.predictions.schema.fieldNames.toSet))
// Validate that we re-insert a prediction column for evaluation
val modelNoPredictionColFieldNames
= modelNoPredictionCol.summary.predictions.schema.fieldNames
assert(datasetWithDenseFeature.schema.fieldNames.toSet.subsetOf(
modelNoPredictionColFieldNames.toSet))
assert(modelNoPredictionColFieldNames.exists(s => s.startsWith("prediction_")))
// Residuals in [[LinearRegressionResults]] should equal those manually computed
datasetWithDenseFeature.select("features", "label")
.rdd
.map { case Row(features: DenseVector, label: Double) =>
val prediction =
features(0) * model.coefficients(0) + features(1) * model.coefficients(1) +
model.intercept
label - prediction
}
.zip(model.summary.residuals.rdd.map(_.getDouble(0)))
.collect()
.foreach { case (manualResidual: Double, resultResidual: Double) =>
assert(manualResidual ~== resultResidual relTol 1E-5)
}
/*
# Use the following R code to generate model training results.
# path/part-00000 is the file generated by running LinearDataGenerator.generateLinearInput
# as described before the beforeAll() method.
d1 <- read.csv("path/part-00000", header=FALSE, stringsAsFactors=FALSE)
fit <- glm(V1 ~ V2 + V3, data = d1, family = "gaussian")
names(f1)[1] = c("V2")
names(f1)[2] = c("V3")
f1 <- data.frame(as.numeric(d1$V2), as.numeric(d1$V3))
predictions <- predict(fit, newdata=f1)
l1 <- as.numeric(d1$V1)
residuals <- l1 - predictions
> mean(residuals^2) # MSE
[1] 0.00985449
> mean(abs(residuals)) # MAD
[1] 0.07961668
> cor(predictions, l1)^2 # r^2
[1] 0.9998737
> summary(fit)
Call:
glm(formula = V1 ~ V2 + V3, family = "gaussian", data = d1)
Deviance Residuals:
Min 1Q Median 3Q Max
-0.47082 -0.06797 0.00002 0.06725 0.34635
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 6.3022157 0.0018600 3388 <2e-16 ***
V2 4.6982442 0.0011805 3980 <2e-16 ***
V3 7.1994344 0.0009044 7961 <2e-16 ***
# R code for r2adj
lm_fit <- lm(V1 ~ V2 + V3, data = d1)
summary(lm_fit)$adj.r.squared
[1] 0.9998736
---
....
*/
assert(model.summary.meanSquaredError ~== 0.00985449 relTol 1E-4)
assert(model.summary.meanAbsoluteError ~== 0.07961668 relTol 1E-4)
assert(model.summary.r2 ~== 0.9998737 relTol 1E-4)
assert(model.summary.r2adj ~== 0.9998736 relTol 1E-4)
// Normal solver uses "WeightedLeastSquares". If no regularization is applied or only L2
// regularization is applied, this algorithm uses a direct solver and does not generate an
// objective history because it does not run through iterations.
if (solver == "l-bfgs") {
// Objective function should be monotonically decreasing for linear regression
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
} else {
// To clarify that the normal solver is used here.
assert(model.summary.objectiveHistory.length == 1)
assert(model.summary.objectiveHistory(0) == 0.0)
val devianceResidualsR = Array(-0.47082, 0.34635)
val seCoefR = Array(0.0011805, 0.0009044, 0.0018600)
val tValsR = Array(3980, 7961, 3388)
val pValsR = Array(0, 0, 0)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-4) }
model.summary.tValues.map(_.round).zip(tValsR).foreach{ x => assert(x._1 === x._2) }
model.summary.pValues.map(_.round).zip(pValsR).foreach{ x => assert(x._1 === x._2) }
}
}
}
test("linear regression model testset evaluation summary") {
Seq("auto", "l-bfgs", "normal").foreach { solver =>
val trainer = new LinearRegression().setSolver(solver)
val model = trainer.fit(datasetWithDenseFeature)
// Evaluating on training dataset should yield results summary equal to training summary
val testSummary = model.evaluate(datasetWithDenseFeature)
assert(model.summary.meanSquaredError ~== testSummary.meanSquaredError relTol 1E-5)
assert(model.summary.r2 ~== testSummary.r2 relTol 1E-5)
model.summary.residuals.select("residuals").collect()
.zip(testSummary.residuals.select("residuals").collect())
.forall { case (Row(r1: Double), Row(r2: Double)) => r1 ~== r2 relTol 1E-5 }
}
}
test("linear regression with weighted samples") {
val sqlContext = spark.sqlContext
import sqlContext.implicits._
val numClasses = 0
def modelEquals(m1: LinearRegressionModel, m2: LinearRegressionModel): Unit = {
assert(m1.coefficients ~== m2.coefficients relTol 0.01)
assert(m1.intercept ~== m2.intercept relTol 0.01)
}
val testParams = Seq(
// (elasticNetParam, regParam, fitIntercept, standardization)
(0.0, 0.21, true, true),
(0.0, 0.21, true, false),
(0.0, 0.21, false, false),
(1.0, 0.21, true, true)
)
// For squaredError loss
for (solver <- Seq("auto", "l-bfgs", "normal");
(elasticNetParam, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setElasticNetParam(elasticNetParam)
.setSolver(solver)
.setMaxIter(1)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithStrongNoise.as[LabeledPoint], estimator, modelEquals, seed)
}
// For huber loss
for ((_, regParam, fitIntercept, standardization) <- testParams) {
val estimator = new LinearRegression()
.setLoss("huber")
.setFitIntercept(fitIntercept)
.setStandardization(standardization)
.setRegParam(regParam)
.setMaxIter(1)
MLTestingUtils.testArbitrarilyScaledWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals)
MLTestingUtils.testOutliersWithSmallWeights[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, numClasses, modelEquals,
outlierRatio = 3)
MLTestingUtils.testOversamplingVsWeighting[LinearRegressionModel, LinearRegression](
datasetWithOutlier.as[LabeledPoint], estimator, modelEquals, seed)
}
}
test("linear regression model with l-bfgs with big feature datasets") {
val trainer = new LinearRegression().setSolver("auto")
val model = trainer.fit(datasetWithSparseFeature)
// Training results for the model should be available
assert(model.hasSummary)
// When LBFGS is used as optimizer, objective history can be restored.
assert(
model.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ .", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ .", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.920 -1.358 -1.109 0.960
Coefficients:
Estimate Std. Error t value Pr(>|t|)
(Intercept) 18.080 9.608 1.882 0.311
V1 6.080 5.556 1.094 0.471
V2 -0.600 1.960 -0.306 0.811
(Dispersion parameter for gaussian family taken to be 7.68)
Null deviance: 202.00 on 3 degrees of freedom
Residual deviance: 7.68 on 1 degrees of freedom
AIC: 18.783
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(6.080, -0.600))
val interceptR = 18.080
val devianceResidualsR = Array(-1.358, 1.920)
val seCoefR = Array(5.556, 1.960, 9.608)
val tValsR = Array(1.094, -0.306, 1.882)
val pValsR = Array(0.471, 0.811, 0.311)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept ~== interceptR absTol 1E-3)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
val modelWithL1 = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setRegParam(0.5)
.setElasticNetParam(1.0)
.fit(datasetWithWeight)
assert(modelWithL1.summary.objectiveHistory !== Array(0.0))
assert(
modelWithL1.summary
.objectiveHistory
.sliding(2)
.forall(x => x(0) >= x(1)))
}
test("linear regression summary with weighted samples and w/o intercept by normal solver") {
/*
R code:
model <- glm(formula = "b ~ . -1", data = df, weights = w)
summary(model)
Call:
glm(formula = "b ~ . -1", data = df, weights = w)
Deviance Residuals:
1 2 3 4
1.950 2.344 -4.600 2.103
Coefficients:
Estimate Std. Error t value Pr(>|t|)
V1 -3.7271 2.9032 -1.284 0.3279
V2 3.0100 0.6022 4.998 0.0378 *
---
Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
(Dispersion parameter for gaussian family taken to be 17.4376)
Null deviance: 5962.000 on 4 degrees of freedom
Residual deviance: 34.875 on 2 degrees of freedom
AIC: 22.835
Number of Fisher Scoring iterations: 2
*/
val model = new LinearRegression()
.setWeightCol("weight")
.setSolver("normal")
.setFitIntercept(false)
.fit(datasetWithWeight)
val coefficientsR = Vectors.dense(Array(-3.7271, 3.0100))
val interceptR = 0.0
val devianceResidualsR = Array(-4.600, 2.344)
val seCoefR = Array(2.9032, 0.6022)
val tValsR = Array(-1.284, 4.998)
val pValsR = Array(0.3279, 0.0378)
assert(model.coefficients ~== coefficientsR absTol 1E-3)
assert(model.intercept === interceptR)
model.summary.devianceResiduals.zip(devianceResidualsR).foreach { x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.coefficientStandardErrors.zip(seCoefR).foreach{ x =>
assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.tValues.zip(tValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
model.summary.pValues.zip(pValsR).foreach{ x => assert(x._1 ~== x._2 absTol 1E-3) }
}
test("read/write") {
def checkModelData(model: LinearRegressionModel, model2: LinearRegressionModel): Unit = {
assert(model.intercept === model2.intercept)
assert(model.coefficients === model2.coefficients)
}
val lr = new LinearRegression()
testEstimatorAndModelReadWrite(lr, datasetWithWeight, LinearRegressionSuite.allParamSettings,
LinearRegressionSuite.allParamSettings, checkModelData)
}
test("pmml export") {
val lr = new LinearRegression()
val model = lr.fit(datasetWithWeight)
def checkModel(pmml: PMML): Unit = {
val dd = pmml.getDataDictionary
assert(dd.getNumberOfFields === 3)
val fields = dd.getDataFields.asScala
assert(fields(0).getName().toString === "field_0")
assert(fields(0).getOpType() == OpType.CONTINUOUS)
val pmmlRegressionModel = pmml.getModels().get(0).asInstanceOf[PMMLRegressionModel]
val pmmlPredictors = pmmlRegressionModel.getRegressionTables.get(0).getNumericPredictors
val pmmlWeights = pmmlPredictors.asScala.map(_.getCoefficient()).toList
assert(pmmlWeights(0) ~== model.coefficients(0) relTol 1E-3)
assert(pmmlWeights(1) ~== model.coefficients(1) relTol 1E-3)
}
testPMMLWrite(sc, model, checkModel)
}
test("should support all NumericType labels and weights, and not support other types") {
for (solver <- Seq("auto", "l-bfgs", "normal")) {
val lr = new LinearRegression().setMaxIter(1).setSolver(solver)
MLTestingUtils.checkNumericTypes[LinearRegressionModel, LinearRegression](
lr, spark, isClassification = false) { (expected, actual) =>
assert(expected.intercept === actual.intercept)
assert(expected.coefficients === actual.coefficients)
}
}
}
test("linear regression (huber loss) with intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Using the following Python code to load the data and train the model using
scikit-learn package.
import pandas as pd
import numpy as np
from sklearn.linear_model import HuberRegressor
df = pd.read_csv("path", header = None)
X = df[df.columns[1:3]]
y = np.array(df[df.columns[0]])
huber = HuberRegressor(fit_intercept=True, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 4.68998007, 7.19429011])
>>> huber.intercept_
6.3002404351083037
>>> huber.scale_
0.077810159205220747
*/
val coefficientsPy = Vectors.dense(4.68998007, 7.19429011)
val interceptPy = 6.30024044
val scalePy = 0.07781016
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept ~== interceptPy relTol 1E-3)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept ~== interceptPy relTol 1E-3)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) without intercept without regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
huber = HuberRegressor(fit_intercept=False, alpha=0.0, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 6.71756703, 5.08873222])
>>> huber.intercept_
0.0
>>> huber.scale_
2.5560209922722317
*/
val coefficientsPy = Vectors.dense(6.71756703, 5.08873222)
val interceptPy = 0.0
val scalePy = 2.55602099
assert(model1.coefficients ~= coefficientsPy relTol 1E-3)
assert(model1.intercept === interceptPy)
assert(model1.scale ~== scalePy relTol 1E-3)
// Without regularization, with or without standardization will converge to the same solution.
assert(model2.coefficients ~= coefficientsPy relTol 1E-3)
assert(model2.intercept === interceptPy)
assert(model2.scale ~== scalePy relTol 1E-3)
}
test("linear regression (huber loss) with intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(true).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 1.97732633, 3.38816722])
>>> huber.intercept_
3.7527581430531227
>>> huber.scale_
3.787363673371801
*/
val coefficientsPy1 = Vectors.dense(1.97732633, 3.38816722)
val interceptPy1 = 3.75275814
val scalePy1 = 3.78736367
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept ~== interceptPy1 relTol 1E-2)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=True, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 1.73346444, 3.63746999])
>>> huber.intercept_
4.3017134790781739
>>> huber.scale_
3.6472742809286793
*/
val coefficientsPy2 = Vectors.dense(1.73346444, 3.63746999)
val interceptPy2 = 4.30171347
val scalePy2 = 3.64727428
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept ~== interceptPy2 relTol 1E-3)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("linear regression (huber loss) without intercept with L2 regularization") {
val trainer1 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(true)
val trainer2 = (new LinearRegression).setLoss("huber")
.setFitIntercept(false).setRegParam(0.21).setStandardization(false)
val model1 = trainer1.fit(datasetWithOutlier)
val model2 = trainer2.fit(datasetWithOutlier)
/*
Since scikit-learn HuberRegressor does not support standardization,
we do it manually out of the estimator.
xStd = np.std(X, axis=0)
scaledX = X / xStd
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(scaledX, y)
>>> np.array(huber.coef_ / xStd)
array([ 2.59679008, 2.26973102])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5766311924091791
*/
val coefficientsPy1 = Vectors.dense(2.59679008, 2.26973102)
val interceptPy1 = 0.0
val scalePy1 = 4.57663119
assert(model1.coefficients ~= coefficientsPy1 relTol 1E-2)
assert(model1.intercept === interceptPy1)
assert(model1.scale ~== scalePy1 relTol 1E-2)
/*
huber = HuberRegressor(fit_intercept=False, alpha=210, max_iter=100, epsilon=1.35)
huber.fit(X, y)
>>> huber.coef_
array([ 2.28423908, 2.25196887])
>>> huber.intercept_
0.0
>>> huber.scale_
4.5979643506051753
*/
val coefficientsPy2 = Vectors.dense(2.28423908, 2.25196887)
val interceptPy2 = 0.0
val scalePy2 = 4.59796435
assert(model2.coefficients ~= coefficientsPy2 relTol 1E-3)
assert(model2.intercept === interceptPy2)
assert(model2.scale ~== scalePy2 relTol 1E-3)
}
test("huber loss model match squared error for large epsilon") {
val trainer1 = new LinearRegression().setLoss("huber").setEpsilon(1E5)
val model1 = trainer1.fit(datasetWithOutlier)
val trainer2 = new LinearRegression()
val model2 = trainer2.fit(datasetWithOutlier)
assert(model1.coefficients ~== model2.coefficients relTol 1E-3)
assert(model1.intercept ~== model2.intercept relTol 1E-3)
}
}
object LinearRegressionSuite {
/**
* Mapping from all Params to valid settings which differ from the defaults.
* This is useful for tests which need to exercise all Params, such as save/load.
* This excludes input columns to simplify some tests.
*/
val allParamSettings: Map[String, Any] = Map(
"predictionCol" -> "myPrediction",
"regParam" -> 0.01,
"elasticNetParam" -> 0.1,
"maxIter" -> 2, // intentionally small
"fitIntercept" -> true,
"tol" -> 0.8,
"standardization" -> false,
"solver" -> "l-bfgs"
)
}
| zuotingbing/spark | mllib/src/test/scala/org/apache/spark/ml/regression/LinearRegressionSuite.scala | Scala | apache-2.0 | 53,542 |
/** MACHINE-GENERATED FROM AVRO SCHEMA. DO NOT EDIT DIRECTLY */
package example.idl.model
import org.apache.avro.Schema
import org.oedura.scavro.{AvroMetadata, AvroReader, AvroSerializeable}
import example.idl.{Card => JCard, Suit => JSuit}
sealed trait EnumProtocol extends AvroSerializeable with Product with Serializable
final object Suit extends Enumeration with EnumProtocol {
type Suit = Value
val SPADES, DIAMONDS, CLUBS, HEARTS = Value
}
final case class Card(suit: Suit.Value, number: Int) extends AvroSerializeable with EnumProtocol {
type J = JCard
override def toAvro: JCard = {
new JCard(suit match {
case Suit.SPADES => JSuit.SPADES
case Suit.DIAMONDS => JSuit.DIAMONDS
case Suit.CLUBS => JSuit.CLUBS
case Suit.HEARTS => JSuit.HEARTS
}, number)
}
}
final object Card {
implicit def reader = new AvroReader[Card] {
override type J = JCard
}
implicit val metadata: AvroMetadata[Card, JCard] = new AvroMetadata[Card, JCard] {
override val avroClass: Class[JCard] = classOf[JCard]
override val schema: Schema = JCard.getClassSchema()
override val fromAvro: (JCard) => Card = {
(j: JCard) => Card(j.getSuit match {
case JSuit.SPADES => Suit.SPADES
case JSuit.DIAMONDS => Suit.DIAMONDS
case JSuit.CLUBS => Suit.CLUBS
case JSuit.HEARTS => Suit.HEARTS
}, j.getNumber.toInt)
}
}
} | julianpeeters/avrohugger | avrohugger-core/src/test/expected/specific/example/idl/EnumProtocol.scala | Scala | apache-2.0 | 1,406 |
package blog2
import skinny.orm._
import scalikejdbc._
import org.joda.time._
case class PostTag(
id: Long,
tagId: Int,
postId: Int,
createdAt: DateTime
)
object PostTag extends SkinnyJoinTable[PostTag] {
override val connectionPoolName = Symbol("blog2")
override val tableName = "posts_tags"
override val defaultAlias = createAlias("pt")
override def extract(rs: WrappedResultSet, rn: ResultName[PostTag]): PostTag = new PostTag(
id = rs.get(rn.id),
tagId = rs.get(rn.tagId),
postId = rs.get(rn.postId),
createdAt = rs.get(rn.createdAt)
)
}
| skinny-framework/skinny-framework | orm/src/test/scala/blog2/PostTag.scala | Scala | mit | 603 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the Adres entity.
*/
class AdresGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val scn = scenario("Test the Adres entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login"))
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.repeat(2) {
exec(http("Get all adress")
.get("/api/adress")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new adres")
.post("/api/adress")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "straat":"SAMPLE_TEXT", "huisnummer":"SAMPLE_TEXT", "busnummer":"SAMPLE_TEXT", "postcode":"SAMPLE_TEXT", "gemeente":"SAMPLE_TEXT", "landISO3":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_adres_url")))
.pause(10)
.repeat(5) {
exec(http("Get created adres")
.get("${new_adres_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created adres")
.delete("${new_adres_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| timaar/Tiimspot | src/test/gatling/simulations/AdresGatlingTest.scala | Scala | apache-2.0 | 3,397 |
package org.clulab.learning
import java.util.zip.GZIPInputStream
import java.io.{BufferedInputStream, FileInputStream, FileOutputStream, FileWriter, ObjectInputStream, ObjectOutputStream, PrintWriter}
import org.slf4j.LoggerFactory
import scala.collection.mutable.{ArrayBuffer, ListBuffer}
import scala.io.{BufferedSource, Source}
import org.clulab.struct.Counter
import org.clulab.struct.Lexicon
import org.clulab.utils.Files
import org.clulab.utils.Serializer
/**
* Parent class for all datasets used for ranking problems
* User: mihais
* Date: 4/23/13
* Last Modified: Fix compiler issue: import scala.io.Source.
*/
trait RankingDataset[F] {
var featureLexicon = new Lexicon[F]
/**
* We follow the same convention for labels as svm_rank:
* - Labels take positive integers as values
* - Higher values indicate better rank, i.e., closer to the top
* - You can have equal values in a group. This means that those datums have equal rank
*/
val labels = new ArrayBuffer[Array[Int]]
def += (queryDatums:Iterable[Datum[Int, F]])
def numFeatures = featureLexicon.size
def size = labels.size
def datumSize = {
var count = 0
labels.foreach(q => count += q.length)
count
}
def querySize(queryOffset:Int) = labels(queryOffset).length
def featuresCounter(queryOffset:Int, datumOffset:Int):Counter[Int]
def mkQueryDatums(queryOffset:Int):Array[Datum[Int, F]]
def shuffle():(RankingDataset[F], Array[Int])
def bootstrapSample(n:Int):(RankingDataset[F])
}
class BVFRankingDataset[F] extends RankingDataset[F] {
val features = new ArrayBuffer[Array[Array[Int]]]
def += (queryDatums:Iterable[Datum[Int, F]]) {
val fvs = queryToArray(queryDatums)
labels += fvs.map(fv => fv._1)
features += fvs.map(fv => fv._2)
}
private def queryToArray(queryDatums:Iterable[Datum[Int, F]]):Array[(Int, Array[Int])] = {
val b = new ListBuffer[(Int, Array[Int])]
for(d <- queryDatums) {
d match {
case bd:BVFDatum[Int, F] => {
b += new Tuple2[Int, Array[Int]](bd.label, featuresToArray(bd.features))
}
case _ => throw new RuntimeException("ERROR: you cannot add a non BVFDatum to a BVFRankingDataset!")
}
}
b.toArray
}
private def featuresToArray(fs:Iterable[F]):Array[Int] = {
val fb = new ListBuffer[Int]
for(f <- fs) fb += featureLexicon.add(f)
fb.toList.sorted.toArray
}
def featuresCounter(queryOffset:Int, datumOffset:Int):Counter[Int] = {
val c = new Counter[Int]
features(queryOffset)(datumOffset).foreach(f => c.incrementCount(f))
c
}
def mkQueryDatums(queryOffset:Int):Array[Datum[Int, F]] = {
val datums = new ArrayBuffer[Datum[Int, F]]
for(i <- 0 until querySize(queryOffset)) {
val feats = new ListBuffer[F]
features(queryOffset)(i).foreach(f => feats += featureLexicon.get(f))
datums += new BVFDatum[Int, F](labels(queryOffset)(i), feats.toList)
}
datums.toArray
}
def shuffle():(RankingDataset[F], Array[Int]) = {
val datasetShuffled = new BVFRankingDataset[F]
// Step 1: Create new order map
var indicies = new ArrayBuffer[Int]()
for (i <- 0 until labels.size) indicies.append(i)
val orderMap = util.Random.shuffle(indicies).toArray
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon
datasetShuffled.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels and features, in shuffled order
for (i <- 0 until orderMap.size) {
datasetShuffled.labels.append( this.labels(orderMap(i)) )
datasetShuffled.features.append ( this.features(orderMap(i)) )
}
// Return shuffled dataset, and reordering information
(datasetShuffled, orderMap)
}
override def bootstrapSample(n:Int):RankingDataset[F] = {
// n - number of boostrap samples to draw from original dataset
val datasetBootstrapped = new BVFRankingDataset[F]
val datasetSize:Int = this.size
// Step 1: Create new order map
var orderMap = new Array[Int](n)
for (i <- 0 until n) orderMap(i) = util.Random.nextInt(datasetSize)
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon, to maintain feature indicies from parent dataset
datasetBootstrapped.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels, features, and values, in shuffled order
for (i <- 0 until n) {
datasetBootstrapped.labels.append( this.labels(orderMap(i)) )
datasetBootstrapped.features.append ( this.features(orderMap(i)) )
}
// Return bootstrapped dataset
datasetBootstrapped
}
}
class RVFRankingDataset[F] extends BVFRankingDataset[F] with FeatureTraversable[F, Double] {
val values = new ArrayBuffer[Array[Array[Double]]]
override def += (queryDatums:Iterable[Datum[Int, F]]) {
val fvs = queryToArray(queryDatums)
labels += fvs.map(fv => fv._1)
features += fvs.map(fv => fv._2)
values += fvs.map(fv => fv._3)
}
private def queryToArray(queryDatums:Iterable[Datum[Int, F]]):Array[(Int, Array[Int], Array[Double])] = {
val b = new ListBuffer[(Int, Array[Int], Array[Double])]
for(d <- queryDatums) {
d match {
case rd:RVFDatum[Int, F] => {
val fvs = featuresCounterToArray(d.featuresCounter)
b += new Tuple3[Int, Array[Int], Array[Double]](
rd.label,
fvs.map(fv => fv._1),
fvs.map(fv => fv._2))
}
case _ => throw new RuntimeException("ERROR: you cannot add a non RVFDatum to a RVFRankingDataset!")
}
}
b.toArray
}
protected def featuresCounterToArray(fs:Counter[F]):Array[(Int, Double)] = {
val fb = new ListBuffer[(Int, Double)]
for(f <- fs.keySet) {
fb += new Tuple2[Int, Double](featureLexicon.add(f), fs.getCount(f))
}
fb.sortBy(_._1).toArray
}
override def featuresCounter(queryOffset:Int, datumOffset:Int):Counter[Int] = {
val c = new Counter[Int]
val fs = features(queryOffset)(datumOffset)
val vs = values(queryOffset)(datumOffset)
for(i <- 0 until numFeatures) c.incrementCount(i, 0.0f) // Include each feature in the counter keyset
for(i <- 0 until fs.length) {
c.incrementCount(fs(i), vs(i))
}
c
}
override def mkQueryDatums(queryOffset:Int):Array[Datum[Int, F]] = {
val datums = new ArrayBuffer[Datum[Int, F]]
for(i <- 0 until querySize(queryOffset)) {
val feats = new Counter[F]
val fs = features(queryOffset)(i)
val vs = values(queryOffset)(i)
for(j <- 0 until fs.length) {
feats.incrementCount(featureLexicon.get(fs(j)), vs(j))
}
datums += new RVFDatum[Int, F](labels(queryOffset)(i), feats)
}
datums.toArray
}
override def shuffle():(RankingDataset[F], Array[Int]) = {
val datasetShuffled = new RVFRankingDataset[F]
// Step 1: Create new order map
var indicies = new ArrayBuffer[Int]()
for (i <- 0 until labels.size) indicies.append(i)
val orderMap = util.Random.shuffle(indicies).toArray
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon
datasetShuffled.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels, features, and values, in shuffled order
for (i <- 0 until orderMap.size) {
datasetShuffled.labels.append( this.labels(orderMap(i)) )
datasetShuffled.features.append ( this.features(orderMap(i)) )
datasetShuffled.values.append ( this.values(orderMap(i)) )
}
// Return shuffled dataset, and reordering information
(datasetShuffled, orderMap)
}
override def bootstrapSample(n:Int):RankingDataset[F] = {
// n - number of boostrap samples to draw from original dataset
val datasetBootstrapped = new RVFRankingDataset[F]
val datasetSize:Int = this.size
// Step 1: Create new order map
var orderMap = new Array[Int](n)
for (i <- 0 until n) orderMap(i) = util.Random.nextInt(datasetSize)
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon, to maintain feature indicies from parent dataset
datasetBootstrapped.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels, features, and values, in shuffled order
for (i <- 0 until n) {
datasetBootstrapped.labels.append( this.labels(orderMap(i)) )
datasetBootstrapped.features.append ( this.features(orderMap(i)) )
datasetBootstrapped.values.append ( this.values(orderMap(i)) )
}
// Return bootstrapped dataset
datasetBootstrapped
}
def saveTo[F](fileName:String) {
Serializer.save(this, fileName)
}
def featureUpdater: FeatureUpdater[F, Double] = new FeatureUpdater[F, Double] {
def foreach[U](fn: ((F, Double)) => U): Unit = {
for(i <- 0 until RVFRankingDataset.this.size) // group
for(j <- 0 until features(i).size) // datum
for (k <- 0 until features(i)(j).size) { // feature
val fi = features(i)(j)(k)
val v = values(i)(j)(k)
val f = featureLexicon.get(fi)
fn((f, v))
}
}
def updateAll(fn: ((F, Double)) => Double): Unit = {
for(i <- 0 until RVFRankingDataset.this.size) // group
for(j <- 0 until features(i).size) // datum
for (k <- 0 until features(i)(j).size) { // feature
val fi = features(i)(j)(k)
val v = values(i)(j)(k)
val f = featureLexicon.get(fi)
values(i)(j)(k) = fn((f, v))
}
}
}
}
object RVFRankingDataset {
val logger = LoggerFactory.getLogger(classOf[RVFRankingDataset[String]])
def mkDatasetFromSvmRankResource(path: String): RVFRankingDataset[String] = {
val stream = getClass.getClassLoader.getResourceAsStream(path)
val source = if (path endsWith ".gz") {
Source.fromInputStream(new GZIPInputStream(stream))
} else {
Source.fromInputStream(stream)
}
mkDatasetFromSvmRankFormat(source)
}
/** reads dataset from a file */
def mkDatasetFromSvmRankFormat(filename: String): RVFRankingDataset[String] = {
val source = if (filename endsWith ".gz") {
val stream = Files.newGZIPInputStream(filename)
Source.fromInputStream(stream)
} else {
Source.fromFile(filename)
}
mkDatasetFromSvmRankFormat(source)
}
def mkDatasetFromSvmRankFormat(source: BufferedSource): RVFRankingDataset[String] = {
val dataset = new RVFRankingDataset[String]
var crtQid = ""
var crtBlock:ArrayBuffer[Datum[Int, String]] = null
var blockCount = 0
var datumCount = 0
for(line <- source.getLines()) {
// strip comments following #
val pound = line.indexOf("#")
var content = line
if(pound > 0) {
content = line.substring(0, pound)
}
//logger.debug("Parsing line: " + content)
val bits = content.split("\\\\s+")
val label = bits(0).toInt // we support ONLY integer labels
assert(bits(1).startsWith("qid:") && bits(1).length > 4)
val qid = bits(1).substring(4)
val features = new Counter[String]
for(i <- 2 until bits.length) {
val fbits = bits(i).split(":")
if(fbits.length != 2) {
throw new RuntimeException("ERROR: invalid feature format: " + bits(i))
}
val f = fbits(0)
val v = fbits(1).toDouble
features.incrementCount(f, v)
}
val datum = new RVFDatum[Int, String](label, features)
datumCount += 1
if(qid == crtQid) {
// append to current block
crtBlock += datum
} else {
// store the crt block in the dataset
assert(crtBlock == null || crtBlock.size > 0)
if(crtBlock != null) {
dataset += crtBlock
blockCount += 1
}
// start a new one
crtBlock = new ArrayBuffer[Datum[Int, String]]()
crtQid = qid
// append the crt datum
crtBlock += datum
}
}
if(crtBlock.size > 0) {
dataset += crtBlock
blockCount += 1
}
logger.debug(s"Loaded $blockCount blocks with $datumCount datums.")
dataset
}
def mkDatumsFromSvmRankResource(path: String): Iterable[Iterable[Datum[Int, String]]] = {
val stream = getClass.getClassLoader.getResourceAsStream(path)
val source = if (path endsWith ".gz") {
Source.fromInputStream(new GZIPInputStream(stream))
} else {
Source.fromInputStream(stream)
}
mkDatumsFromSvmRankFormat(source)
}
/** reads dataset from a file */
def mkDatumsFromSvmRankFormat(filename: String): Iterable[Iterable[Datum[Int, String]]] = {
val source = if (filename endsWith ".gz") {
val stream = Files.newGZIPInputStream(filename)
Source.fromInputStream(stream)
} else {
Source.fromFile(filename)
}
mkDatumsFromSvmRankFormat(source)
}
def mkDatumsFromSvmRankFormat(source: BufferedSource): Iterable[Iterable[Datum[Int, String]]] = {
val queries = new ArrayBuffer[Iterable[Datum[Int, String]]]()
var crtQid = ""
var crtBlock:ArrayBuffer[Datum[Int, String]] = null
var blockCount = 0
var datumCount = 0
for(line <- source.getLines()) {
// strip comments following #
val pound = line.indexOf("#")
var content = line
if(pound > 0) {
content = line.substring(0, pound)
}
//logger.debug("Parsing line: " + content)
val bits = content.split("\\\\s+")
val label = bits(0).toInt // we support ONLY integer labels
assert(bits(1).startsWith("qid:") && bits(1).length > 4)
val qid = bits(1).substring(4)
val features = new Counter[String]
for(i <- 2 until bits.length) {
val fbits = bits(i).split(":")
if(fbits.length != 2) {
throw new RuntimeException("ERROR: invalid feature format: " + bits(i))
}
val f = fbits(0)
val v = fbits(1).toDouble
features.incrementCount(f, v)
}
val datum = new RVFDatum[Int, String](label, features)
datumCount += 1
if(qid == crtQid) {
// append to current block
crtBlock += datum
} else {
// store the crt block in the dataset
assert(crtBlock == null || crtBlock.size > 0)
if(crtBlock != null) {
queries += crtBlock
blockCount += 1
}
// start a new one
crtBlock = new ArrayBuffer[Datum[Int, String]]()
crtQid = qid
// append the crt datum
crtBlock += datum
}
}
if(crtBlock.size > 0) {
queries += crtBlock
blockCount += 1
}
logger.debug(s"Loaded $blockCount blocks with $datumCount datums.")
queries
}
def saveToSvmRankFormat( queries:Iterable[Iterable[Datum[Int, String]]],
featureLexicon:Lexicon[String],
fn:String) {
var qid = 0
val os = new PrintWriter(new FileWriter(fn))
for(query <- queries) {
qid += 1
for(datum <- query) {
os.print(datum.label)
os.print(s" qid:$qid")
val fs = new ListBuffer[(Int, Double)]
val c = datum.featuresCounter
for(k <- c.keySet) {
val fi = featureLexicon.get(k)
if(fi.isDefined) {
// logger.debug(s"Feature [$k] converted to index ${fi.get + 1}")
fs += new Tuple2(fi.get + 1, c.getCount(k))
}
}
val fss = fs.toList.sortBy(_._1)
for(t <- fss) {
os.print(s" ${t._1}:${t._2}")
}
os.println()
}
}
os.close()
}
def loadFrom[F](fileName:String):RVFRankingDataset[F] = {
Serializer.load(fileName)
}
}
class RVFKRankingDataset[F] extends RVFRankingDataset[F] {
/** Contains the String representation for each datum, on which the kernel is built */
val kernels = new ArrayBuffer[Array[String]]
override def += (queryDatums:Iterable[Datum[Int, F]]) {
val fvsk = queryToArray(queryDatums)
labels += fvsk.map(fv => fv._1)
features += fvsk.map(fv => fv._2)
values += fvsk.map(fv => fv._3)
kernels += fvsk.map(fv => fv._4)
}
private def queryToArray(queryDatums:Iterable[Datum[Int, F]]):Array[(Int, Array[Int], Array[Double], String)] = {
val b = new ListBuffer[(Int, Array[Int], Array[Double], String)]
for(d <- queryDatums) {
d match {
case rd:RVFKDatum[Int, F] => {
val fvs = featuresCounterToArray(d.featuresCounter)
b += new Tuple4[Int, Array[Int], Array[Double], String](
rd.label,
fvs.map(fv => fv._1),
fvs.map(fv => fv._2),
rd.kernel)
}
case _ => throw new RuntimeException("ERROR: you cannot add a non RVFKDatum to a RVFKRankingDataset!")
}
}
b.toArray
}
override def mkQueryDatums(queryOffset:Int):Array[Datum[Int, F]] = {
val datums = new ArrayBuffer[Datum[Int, F]]
for(i <- 0 until querySize(queryOffset)) {
val feats = new Counter[F]
val fs = features(queryOffset)(i)
val vs = values(queryOffset)(i)
for(j <- 0 until fs.length) {
feats.incrementCount(featureLexicon.get(fs(j)), vs(j))
}
val k = kernels(queryOffset)(i)
datums += new RVFKDatum[Int, F](labels(queryOffset)(i), feats, k)
}
datums.toArray
}
override def shuffle():(RankingDataset[F], Array[Int]) = {
val datasetShuffled = new RVFKRankingDataset[F]
// Step 1: Create new order map
var indicies = new ArrayBuffer[Int]()
for (i <- 0 until labels.size) indicies.append(i)
val orderMap = util.Random.shuffle(indicies).toArray
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon
datasetShuffled.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels, features, and values, in shuffled order
for (i <- 0 until orderMap.size) {
datasetShuffled.labels.append( this.labels(orderMap(i)) )
datasetShuffled.features.append ( this.features(orderMap(i)) )
datasetShuffled.values.append ( this.values(orderMap(i)) )
datasetShuffled.kernels.append ( this.kernels(orderMap(i)) )
}
// Return shuffled dataset, and reordering information
(datasetShuffled, orderMap)
}
override def bootstrapSample(n:Int):RankingDataset[F] = {
// n - number of boostrap samples to draw from original dataset
val datasetBootstrapped = new RVFKRankingDataset[F]
val datasetSize:Int = this.size
// Step 1: Create new order map
var orderMap = new Array[Int](n)
for (i <- 0 until n) orderMap(i) = util.Random.nextInt(datasetSize)
// Step 2: Copy member variables
// Step 2A: Copy over feature lexicon, to maintain feature indicies from parent dataset
datasetBootstrapped.featureLexicon = this.featureLexicon
// Step 2B: Copy over labels, features, and values, in shuffled order
for (i <- 0 until n) {
datasetBootstrapped.labels.append( this.labels(orderMap(i)) )
datasetBootstrapped.features.append ( this.features(orderMap(i)) )
datasetBootstrapped.values.append ( this.values(orderMap(i)) )
datasetBootstrapped.kernels.append( this.kernels(orderMap(i)) )
}
// Return bootstrapped dataset
datasetBootstrapped
}
}
| sistanlp/processors | main/src/main/scala/org/clulab/learning/RankingDataset.scala | Scala | apache-2.0 | 19,196 |
import scala.collection.JavaConverters._
import org.infinispan.manager.DefaultCacheManager
import org.infinispan.distexec.mapreduce.{Collator, Collector, Mapper, MapReduceTask, Reducer}
object MapReduceExample {
def main(args: Array[String]): Unit = {
val manager = new DefaultCacheManager("infinispan.xml")
val cache = manager.getCache[String, String]()
cache.put("1", "Hello world here I am")
cache.put("2", "Infinispan rules the world")
cache.put("3", "JUDCon is in Boston")
cache.put("4", "JBoss World is in Boston as well")
cache.put("12","JBoss Application Server")
cache.put("15", "Hello world")
cache.put("14", "Infinispan community")
cache.put("15", "Hello world")
cache.put("111", "Infinispan open source")
cache.put("112", "Boston is close to Toronto")
cache.put("113", "Toronto is a capital of Ontario")
cache.put("114", "JUDCon is cool")
cache.put("211", "JBoss World is awesome")
cache.put("212", "JBoss rules")
cache.put("213", "JBoss division of RedHat ")
cache.put("214", "RedHat community")
val task = new MapReduceTask[String, String, String, Int](cache, true, true)
val ranking =
task.mappedWith(new WordCountMapper)
.reducedWith(new WordCountReducer)
.execute(new WordCountCollator)
//.execute
/*
println("----- [MapReduceResult] START -----")
map.asScala.foreach { case (k, v) => println(s"Key[$k] = $v") }
println("----- [MapReduceResult] END -----")
*/
ranking.foreach(println)
cache.stop()
manager.stop()
}
}
@SerialVersionUID(1L)
class WordCountMapper extends Mapper[String, String, String, Int] {
def map(key: String, value: String, c: Collector[String, Int]): Unit = {
println(s"[Mapper] input key => [$key], input value => [$value]")
"""\s+""".r.split(value).foreach(c.emit(_, 1))
}
}
@SerialVersionUID(1L)
class WordCountReducer extends Reducer[String, Int] {
def reduce(key: String, iter: java.util.Iterator[Int]): Int = {
println(s"[Reducer] input key => [$key]")
iter.asScala.sum
}
}
class WordCountCollator extends Collator[String, Int, List[(String, Int)]] {
def collate(reducedResults: java.util.Map[String, Int]): List[(String, Int)] =
reducedResults
.asScala
.toSeq
.sortWith {
case (kv1, kv2) =>
kv1._2.compareTo(kv2._2) match {
case 0 => kv1._1.toLowerCase < kv2._1.toLowerCase
case n if n > 0 => true
case n if n < 0 => false
}
}
.toList
}
| kazuhira-r/infinispan-examples | infinispan-mapreduce/src/main/scala/MapReduceExample.scala | Scala | mit | 2,566 |
package scratchpad.wordcountmonoid
import scratchpad.monoid.Monoid
object Main{
def main(args: Array[String]): Unit = {
println(WordCountMonoid.countWords("this is a kitty cat inside the computer"))
}
}
object WordCountMonoid {
sealed trait WC
case class Stub(chars: String) extends WC
case class Part(lStub: String, words: Int, rStub: String) extends WC
val wcMonoid: Monoid[WC] = new Monoid[WC] {
override def op(a1: WC, a2: WC): WC = {
(a1, a2) match {
case (Stub(a), Stub(b)) => Stub(a + b)
case (Stub(a), Part(lStubB, lWordsB, rStubB)) => Part(a + lStubB, lWordsB, rStubB)
case (Part(lStubA, lWordsA, rStubA), Stub(b)) => Part(lStubA, lWordsA, rStubA + b)
case (Part(lStubA, lWordsA, rStubA), Part(lStubB, lWordsB, rStubB)) => {
if (rStubA.length > 0 || lStubB.length > 0) {
Part(lStubA, lWordsA + lWordsB + 1, rStubB)
} else {
Part(lStubA, lWordsA + lWordsB, rStubB)
}
}
}
}
override def zero: WC = {
Stub("")
}
}
def countWords(str: String): Int = {
val result: WC = Monoid.foldMapV(str.toCharArray, wcMonoid)((c: Char) => {
if (c == ' ') {
Part("", 0, "")
} else {
Stub(c + "")
}
})
result match {
case Stub(i) if i.length > 0 => 1
case Stub(_) => 0
case Part(l, w, r) => {
w + (if (l.length > 0) 1 else 0) + (if (r.length > 0) 1 else 0)
}
}
}
}
| waxmittmann/fpinscala | answers/src/main/scala/scratchpad/done/WordCountMonoid.scala | Scala | mit | 1,494 |
package models.conservation.events
import models.conservation.events.ConservationExtras.{
DescriptionAttributeValue,
ExtraAttributes
}
import play.api.libs.json.{JsObject, Json, Reads, Writes}
object ConservationExtras {
/**
* ExtraAttributes are used to provide additional fields to a specific
* {{{Conservation}}} type. Each {{{ExtraAttributes}}} implementation can have
* one or more attribute fields. And they can contain a type specific set of
* allowed values. These values are implemented as ADT's in the companion
* object for the specific {{{ExtraAttributes}}} implementation.
*
* The allowed values of the ADT typed attributes need to be provided as part
* of the response, when the client fetches the list of {{{ConservationType}}}s.
*/
sealed trait ExtraAttributes
trait ExtraAttributesOps {
val typeName: String
val allValues: Seq[DescriptionAttributeValue] = Seq.empty
}
trait DescriptionAttributeValue {
val id: Int
val enLabel: String
val noLabel: String
}
}
object ExtraAttributes {
private val discriminator = "type"
}
| MUSIT-Norway/musit | service_backend/app/models/conservation/events/ConservationExtras.scala | Scala | gpl-2.0 | 1,109 |
package com.eharmony.aloha.semantics.compiled.plugin
import com.eharmony.aloha.reflect.RefInfo
import com.eharmony.aloha.semantics.compiled.CompiledSemanticsPlugin
/**
* Provides a way to adapt a [[CompiledSemanticsPlugin]] to a new one with a
* different type parameter.
* @author deaktator
*/
trait MorphableCompiledSemanticsPlugin {
/**
* Attempt to create a new [[CompiledSemanticsPlugin]] with type parameter[B]
* @param ri reflection information that may be necessary to determine whether to create
* the [[CompiledSemanticsPlugin]] that was requested.
* @tparam B input type for the new [[CompiledSemanticsPlugin]] instance that might be created.
* A [[MorphableCompiledSemanticsPlugin]] instance may choose not allow morphing
* to all `B`. In that case, a `None` will be returned.
* @return
*/
def morph[B](implicit ri: RefInfo[B]): Option[CompiledSemanticsPlugin[B]]
}
| eHarmony/aloha | aloha-core/src/main/scala/com/eharmony/aloha/semantics/compiled/plugin/MorphableCompiledSemanticsPlugin.scala | Scala | mit | 955 |
package org.infinispan.spark.test
import org.infinispan.client.hotrod.RemoteCache
import org.infinispan.spark.domain.Runner
import org.scalatest.{BeforeAndAfterAll, Suite}
import scala.util.Random
/**
* Trait to be mixed-in by tests requiring a cache populated with [[org.infinispan.spark.domain.Runner]] objects.
*
* @author gustavonalle
*/
trait RunnersCache extends BeforeAndAfterAll {
this: Suite with RemoteTest =>
protected def getNumEntries: Int
override protected def beforeAll(): Unit = {
val random = new Random(System.currentTimeMillis())
val MinFinishTime = 3600
val MaxFinishTime = 4500
val MinAge = 15
val MaxAge = 60
val cacheRunners = getTargetCache.asInstanceOf[RemoteCache[Int,Runner]]
(1 to getNumEntries).par.foreach { i =>
val name = "Runner " + i
val finished = if (i % 2 == 0) true else false
val finishTime = random.nextInt((MaxFinishTime - MinFinishTime) + 1)
val age = Integer.valueOf(i * (MaxAge - MinAge) / getNumEntries + MinAge)
val runner = new Runner(name, finished, finishTime, age)
cacheRunners.put(i, runner)
}
super.beforeAll()
}
override protected def afterAll(): Unit = {
super.afterAll()
}
}
| rnowling/infinispan-spark | src/test/scala/org/infinispan/spark/test/RunnersCache.scala | Scala | apache-2.0 | 1,274 |
package ru.wordmetrix.dreamcrammer
import scala.annotation.tailrec
import scala.util.Random
import java.io._
import android.content.Intent
import android.media.{MediaPlayer, AudioManager}
import android.app.{IntentService, Service}
import android.widget.Toast
import android.os.{Binder, IBinder, AsyncTask, Handler, HandlerThread, Process, Message}
import android.support.v4.content.LocalBroadcastManager
import ru.wordmetrix.dreamcrammer.db._
import ru.wordmetrix._
class TaskBinder(val service : TaskService) extends Binder {
def getService() : TaskService = {
return service
}
}
class TaskEvaluator(val taskservice : TaskService) extends AsyncTask[AnyRef, Progress, (Task, Boolean)] {
override
def onProgressUpdate(progress : Progress* ) : Unit = progress.map(x => LocalBroadcastManager.getInstance(taskservice).sendBroadcast(new Intent(Task.Message.Progress.toString) {
putExtra("progress", x.asInstanceOf[Progress])
}))
override
def doInBackground(tasks : AnyRef*) : (Task,Boolean) = {
val task = tasks(0).asInstanceOf[Task]
log("status: %s", task.status)
log("kind: %s", task.kind)
onProgressUpdate(new Progress(task.id.get, 100, 10))
task match {
case Task(_,_,_,status, Task.Kind.Track, field, arg1,arg2,arg3) => {
val word = new Word(field)(taskservice.db)
log("Start download a track for %s",word.value)
try {
word.value.split("\\\\s+").sortBy(-_.size).toStream.map(x=>{
log("Try ... %s",x)
TrackCommon.get(x)
}).flatten.headOption match {
case Some(x) => word.setTrack(SomeData(x))
case None => word.setTrack(NoData)
}
(task,true)
} catch {
case x : Throwable => { log("Task %s failed", x, task); (task,false) }
}
/* TrackCommon.get(word.value.replace(" ","%20")) match {
case Some(x) => log("Track has been gotten", word.setTrack(x))
case None => (for (value <- word.value.split("\\\\s+")) yield {
log("Try ... %s",value)
TrackCommon.get(value)
}).flatten match {
case xs if (xs.size > 0) => word.setTrack(xs.reduce(_++_))
case xs if (xs.size == 0) => word.setNoTrack(true)
}
}*/
}
case Task(_,_,_,status, Task.Kind.Pronunciation, field, arg1,arg2,arg3) => {
val word = new Word(field)(taskservice.db)
log("Start download a pronunciation for %s",word.value)
try {
PronunciationWiktionary(word.value) orElse PronunciationDictionary(word.value) match {
case Some(x) => word.setIPA(SomeData(x))
case None => (for (value <- word.value.split("\\\\s+")) yield {
PronunciationWiktionary(value) orElse PronunciationDictionary(value)
}).flatten match {
case xs if (xs.size > 0) => word.setIPA(SomeData(xs.mkString(" ")))
case xs if (xs.size == 0) => word.setIPA(NoData)
}
}
(task,true)
} catch {
case x : Throwable => { log("Task %s failed", x, task); (task,false) }
}
}
case Task(Some(id),_,_,status, Task.Kind.Picture, field, arg1,arg2,arg3) => {
val word = new Word(field)(taskservice.db)
log("Start download a picture for %s",word.value)
val md5s = word.pictures.map(_.md5).toSet
val size = md5s.size + 3
try {
for (
(picture,n) <- ImgGoogle.get(word.value,size).flatten.zipWithIndex
if !isCancelled()
) {
//isCancelled()
onProgressUpdate(new Progress(id, size, n))
if (! md5s.contains(MD5(picture))) {
log("add picture")
word.addPicture(picture)
} else {
log("picture already exists (task)")
}
}
(task,true)
} catch {
case x : Throwable => { log("Task %s failed", x, task); (task,false) }
}
}
case Task(_,_,_,status, Task.Kind.MD5, field, arg1, ag2, arg3) => {
//taskservice.db.update("UPDATE picture SET picture_md5=''")
for ( picture <- taskservice.db.query("select picture_id, picture_body from picture where picture_md5 = '' or picture_md5 is null", x => new Picture(x.columnInt(0))(taskservice.db) {
override val body = x.columnBlob(1)
override val bodyOption = Some(body)
})) {
log("Picture %d md5 is: %s", picture.id, picture.md5)
}
(task,true)
}
case Task(_,_,_,status,_,field,arg1,arg2,arg3) => {
log("This feature hasn't implemented yet, task: %s", task)
(task,true)
}
}
}
override
def onPostExecute(result : (Task, Boolean)) = result match {
case (task, true) => taskservice.taskFinished(task); onProgressUpdate(new Progress(task.id.get, 100, 100))
case (task, false) => taskservice.taskAborted(task)
}
}
class TaskService extends Service
{
var lock : AnyRef = new Object
var callbacks = List[Int => Unit]()
val maxthread = 5
var nthreads = maxthread - 2
var task2thread = Map[Int, TaskEvaluator]()
val handler : Handler = new Handler();
val binder : IBinder = new TaskBinder(this)
def tasks() = schedule.items()
def delete() = {
schedule.delete()
LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(Task.Message.Progress.toString) {
putExtra("progress", new TaskAdopted(0))
})
}
def tasks(field : Int) = schedule.itemsByField(field)
def task(id : Int) = schedule.get(id)
def top(id : Int) = topOption(id).get
def topOption(id : Int) = schedule.topOption(id)
def suspend(task : Task) = task.id.map(id => {
log("try to suspend")
if (task2thread contains id) {
log("suspension")
task2thread(id).cancel(true)
taskAborted(task)
task2thread = task2thread - id
}
})
def abort(task : Task) = task.id.map(id => {
suspend(task)
schedule.delete(task)
LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(Task.Message.Progress.toString) {
putExtra("progress", new TaskAborted(task.id.get))
})
})
def resume(task : Task) = task.id.map(id => if (!task2thread.contains(task.id.get)) {
log("Threads : %s", nthreads)
setStatus(task, Task.Status.Running)
val te = new TaskEvaluator(this)
te.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR, task)
task2thread = task2thread + ( task.id.get -> te )
nthreads = nthreads - 1
})
override
def onBind(intent : Intent) : IBinder = binder
def seen(callback : Int => Unit) : Unit = {
callbacks = callback :: callbacks
println("callbacks :: " + callbacks)
}
lazy val db : DB = new SQLiteAndroid(this, "taylor.db", true)
lazy val schedule = new TaskSchedule()(db)
override
def onCreate() : Unit = {
Toast.makeText(this, "Service has started", Toast.LENGTH_SHORT).show()
for (
task <- (schedule.items(Task.Status.Running).take(maxthread) ++ schedule.items(Task.Status.Failed).take(maxthread) ++ schedule.items(Task.Status.Postponed)).take(maxthread)
) taskStart(task)
}
override
def onStartCommand(intent : Intent, flags : Int, startId : Int) : Int = {
Toast.makeText(this, "Command has come", Toast.LENGTH_SHORT).show()
Option(intent.getSerializableExtra("task").asInstanceOf[Task]).map( task =>
schedule.add(task) match {
case Some(task) => {
log("Scheduled task is %s", task)
LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(Task.Message.Progress.toString) {
putExtra("progress", new TaskAdopted(task.id.get))
})
taskStart(task)
log("Task started")
}
case None => {
log("Task already scheduled")
}
}
)
intent.getIntExtra("command",0) match {
case 1 => {
log("Reload failed tasks")
schedule.items(Task.Status.Failed).map(taskStart)
log("Tasks reloaded")
}
case x => {
log("Command already scheduled")
println(x)
}
}
Service.START_STICKY
}
override
def onDestroy() : Unit = {
Toast.makeText(this, "Service has done", Toast.LENGTH_SHORT).show();
}
def setStatus(task : Task, status : Task.Status.Value) = {
LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(Task.Message.Progress.toString) {
putExtra("progress", new ChangeStatus(task.id.get,status))
})
schedule.setStatus(task, status)
}
def taskStart(task : Task) = nthreads match {
case x if (x > 0) => {
log("Threads : %s", nthreads)
setStatus(task, Task.Status.Running)
val te = new TaskEvaluator(this)
task2thread = task2thread + (task.id.get -> te)
te.executeOnExecutor(AsyncTask.THREAD_POOL_EXECUTOR, task)
nthreads = nthreads - 1
}
case _ => {
log("Too little threads")
setStatus(task, Task.Status.Postponed)
}
}
def taskFinished(task : Task) = {
setStatus(task, Task.Status.Finished)
log("task %s finished", task)
task2thread = task2thread - task.id.get
LocalBroadcastManager.getInstance(this).sendBroadcast(new Intent(Task.Message.Reload.toString) {
putExtra("kind", task.kind.id)
putExtra("id", task.field)
})
taskNext()
}
def taskNext() = {
nthreads = nthreads + 1
schedule.headOption match {
case Some(task) => taskStart(task)
case None => {}
}
}
def taskAborted(task : Task) = {
setStatus(task, Task.Status.Failed)
log("task %s failed", task)
taskNext()
}
}
| electricmind/dreamcrammer | src/main/ru/wordmetrix/dreamcrammer/taskservice.scala | Scala | apache-2.0 | 10,956 |
package controllers.billing
import scalaz._
import Scalaz._
import scalaz.NonEmptyList._
import scalaz.Validation._
import io.megam.auth.funnel._
import io.megam.auth.funnel.FunnelErrors._
import models.billing._
import play.api.mvc._
import controllers.stack.Results
import net.liftweb.json._
import net.liftweb.json.JsonParser._
/**
* @author rajthilak
*
*/
object Billedhistories extends Controller with controllers.stack.APIAuthElement {
/**
* Create a new billing history for the user.
**/
def post = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result,Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
val clientAPIBody = freq.clientAPIBody.getOrElse(throw new Error("Body not found (or) invalid."))
models.billing.Billedhistories.create(email, clientAPIBody) match {
case Success(succ) =>
Status(CREATED)(
FunnelResponse(CREATED, "Billing history record created successfully.", "Megam::Billedhistories").toJson(true))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
/**
* GET: findbyEmail: List all the billing histories per email
* Email grabbed from header.
* Output: JSON (BillingHistoriesResult)
*/
def list = StackAction(parse.tolerantText) { implicit request =>
(Validation.fromTryCatchThrowable[Result,Throwable] {
reqFunneled match {
case Success(succ) => {
val freq = succ.getOrElse(throw new Error("Invalid header."))
val email = freq.maybeEmail.getOrElse(throw new Error("Email not found (or) invalid."))
models.billing.Billedhistories.findByEmail(email) match {
case Success(succ) =>
Ok(Results.resultset(models.Constants.BILLEDHISTORIESCOLLECTIONCLAZ, compactRender(Extraction.decompose(succ))))
case Failure(err) =>
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
case Failure(err) => {
val rn: FunnelResponse = new HttpReturningError(err)
Status(rn.code)(rn.toJson(true))
}
}
}).fold(succ = { a: Result => a }, fail = { t: Throwable => Status(BAD_REQUEST)(t.getMessage) })
}
}
| indykish/vertice_gateway | app/controllers/billing/Billedhistories.scala | Scala | mit | 2,838 |
package mesosphere.marathon.core.launchqueue.impl
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.base.{ Clock, ShutdownHooks }
import mesosphere.marathon.core.launchqueue.{ LaunchQueueConfig, LaunchQueueModule }
import mesosphere.marathon.core.leadership.AlwaysElectedLeadershipModule
import mesosphere.marathon.core.matcher.DummyOfferMatcherManager
import mesosphere.marathon.core.task.bus.TaskBusModule
import mesosphere.marathon.integration.setup.WaitTestSupport
import mesosphere.marathon.state.{ AppRepository, PathId }
import mesosphere.marathon.tasks.TaskFactory.CreatedTask
import mesosphere.marathon.tasks.{ TaskFactory, TaskIdUtil, TaskTracker }
import mesosphere.marathon.{ MarathonSpec, MarathonTestHelper }
import mesosphere.util.state.PersistentEntity
import org.apache.mesos.Protos.TaskID
import org.mockito.Mockito
import org.mockito.Mockito.{ when => call, _ }
import org.scalatest.{ BeforeAndAfter, GivenWhenThen }
import scala.concurrent.{ Future, Await }
import scala.concurrent.duration._
class LaunchQueueModuleTest extends MarathonSpec with BeforeAndAfter with GivenWhenThen {
test("empty queue returns no results") {
When("querying queue")
val apps = taskQueue.list
Then("no apps are returned")
assert(apps.isEmpty)
}
test("An added queue item is returned in list") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its contents")
val list = taskQueue.list
Then("we get back the added app")
assert(list.size == 1)
assert(list.head.app == app)
assert(list.head.tasksLeftToLaunch == 1)
assert(list.head.tasksLaunchedOrRunning == 0)
assert(list.head.taskLaunchesInFlight == 0)
verify(taskTracker).get(app.id)
}
test("An added queue item is reflected via count") {
Given("a task queue with one item")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker).get(app.id)
}
test("A purged queue item has a count of 0") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 0")
assert(count == 0)
verify(taskTracker).get(app.id)
}
test("A re-added queue item has a count of 1") {
Given("a task queue with one item which is purged")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
taskQueue.purge(app.id)
taskQueue.add(app)
When("querying its count")
val count = taskQueue.count(app.id)
Then("we get a count == 1")
assert(count == 1)
verify(taskTracker, times(2)).get(app.id)
}
test("adding a queue item registers new offer matcher") {
Given("An empty task tracker")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
When("Adding an app to the taskQueue")
taskQueue.add(app)
Then("A new offer matcher gets registered")
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
verify(taskTracker).get(app.id)
}
test("purging a queue item UNregisters offer matcher") {
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
When("The app is purged")
taskQueue.purge(app.id)
Then("No offer matchers remain registered")
assert(offerMatcherManager.offerMatchers.isEmpty)
verify(taskTracker).get(app.id)
}
test("an offer gets unsuccessfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(None)
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks == Seq.empty)
verify(taskTracker).get(app.id)
}
test("an offer gets successfully matched against an item in the queue") {
val offer = MarathonTestHelper.makeBasicOffer().build()
val taskId: TaskID = TaskIdUtil.newTaskId(app.id)
val mesosTask = MarathonTestHelper.makeOneCPUTask("").setTaskId(taskId).build()
val marathonTask = MarathonTask.newBuilder().setId(taskId.getValue).build()
val createdTask = CreatedTask(mesosTask, marathonTask)
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(Some(createdTask))
call(taskTracker.store(app.id, marathonTask)).thenReturn(Future.successful(mock[PersistentEntity]))
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer")
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory and respects the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks.map(_.taskInfo) == Seq(mesosTask))
verify(taskTracker).get(app.id)
verify(taskTracker).created(app.id, marathonTask)
verify(taskTracker).store(app.id, marathonTask)
}
test("an offer gets successfully matched against an item in the queue BUT storing fails") {
val offer = MarathonTestHelper.makeBasicOffer().build()
val taskId: TaskID = TaskIdUtil.newTaskId(app.id)
val mesosTask = MarathonTestHelper.makeOneCPUTask("").setTaskId(taskId).build()
val marathonTask = MarathonTask.newBuilder().setId(taskId.getValue).build()
val createdTask = CreatedTask(mesosTask, marathonTask)
Given("An app in the queue")
call(taskTracker.get(app.id)).thenReturn(Set.empty[MarathonTask])
call(taskFactory.newTask(app, offer, Set.empty[MarathonTask])).thenReturn(Some(createdTask))
call(taskTracker.store(app.id, marathonTask)).thenReturn(Future.failed(new RuntimeException("storing failed")))
taskQueue.add(app)
WaitTestSupport.waitUntil("registered as offer matcher", 1.second) {
offerMatcherManager.offerMatchers.size == 1
}
When("we ask for matching an offer ")
val matchFuture = offerMatcherManager.offerMatchers.head.matchOffer(clock.now() + 3.seconds, offer)
val matchedTasks = Await.result(matchFuture, 3.seconds)
Then("the offer gets passed to the task factory but not included in the answer")
verify(taskFactory).newTask(app, offer, Set.empty[MarathonTask])
assert(matchedTasks.offerId == offer.getId)
assert(matchedTasks.tasks.isEmpty)
verify(taskTracker).get(app.id)
verify(taskTracker).created(app.id, marathonTask)
verify(taskTracker).store(app.id, marathonTask)
verify(taskTracker, Mockito.timeout(1000)).terminated(app.id, marathonTask.getId)
}
private[this] val app = MarathonTestHelper.makeBasicApp().copy(id = PathId("/app"))
private[this] var shutdownHooks: ShutdownHooks = _
private[this] var clock: Clock = _
private[this] var taskBusModule: TaskBusModule = _
private[this] var offerMatcherManager: DummyOfferMatcherManager = _
private[this] var appRepository: AppRepository = _
private[this] var taskTracker: TaskTracker = _
private[this] var taskFactory: TaskFactory = _
private[this] var module: LaunchQueueModule = _
private[this] def taskQueue = module.taskQueue
before {
shutdownHooks = ShutdownHooks()
clock = Clock()
taskBusModule = new TaskBusModule()
offerMatcherManager = new DummyOfferMatcherManager()
taskTracker = mock[TaskTracker]("taskTracker")
taskFactory = mock[TaskFactory]("taskFactory")
appRepository = mock[AppRepository]("appRepository")
val config: LaunchQueueConfig = new LaunchQueueConfig {}
config.afterInit()
module = new LaunchQueueModule(
config,
AlwaysElectedLeadershipModule(shutdownHooks),
clock,
subOfferMatcherManager = offerMatcherManager,
taskStatusObservables = taskBusModule.taskStatusObservables,
maybeOfferReviver = None,
appRepository,
taskTracker,
taskFactory
)
}
after {
verifyNoMoreInteractions(appRepository)
verifyNoMoreInteractions(taskTracker)
verifyNoMoreInteractions(taskFactory)
shutdownHooks.shutdown()
}
}
| Yhgenomics/marathon | src/test/scala/mesosphere/marathon/core/launchqueue/impl/LaunchQueueModuleTest.scala | Scala | apache-2.0 | 9,358 |
package sectery.producers
import java.awt.Font
import java.awt.Graphics2D
import java.awt.RenderingHints
import java.awt.image.BufferedImage
import java.io.IOException
import sectery.Db
import sectery.Http
import sectery.Producer
import sectery.Response
import sectery.Rx
import sectery.Tx
import zio.Clock
import zio.ZIO
object Ascii extends Producer:
private val width = 80
private val height = 24
private val base = 16
private val ascii = """^@ascii\\s+(.+)$""".r
override def help(): Iterable[Info] =
Some(Info("@ascii", "@ascii <text>"))
override def apply(
m: Rx
): ZIO[Db.Db with Http.Http, Throwable, Iterable[Tx]] =
m match
case Rx(c, _, ascii(text)) =>
ZIO.succeed(
ascii(text).map { line => Tx(c, line) }
)
case _ =>
ZIO.succeed(None)
private def ascii(text: String): Seq[String] =
val i = new BufferedImage(width, height, BufferedImage.TYPE_INT_RGB)
val g = i.getGraphics().asInstanceOf[Graphics2D]
g.setFont(new Font(Font.MONOSPACED, Font.PLAIN, 12))
g.drawString(text, 0, base)
(0 until height)
.map { y =>
(0 until width)
.map { x =>
if i.getRGB(x, y) == -16777216 then " " else "#"
}
.mkString
.replaceAll("\\\\s*$", "")
}
.dropWhile(_.trim().length == 0)
.reverse
.dropWhile(_.trim().length == 0)
.reverse
| earldouglas/sectery | modules/producers/src/main/scala/sectery/producers/Ascii.scala | Scala | mit | 1,419 |
package org.example
object Foo extends App {
class Bar {
val x = 1
}
class Foo extends Bar {
def testMethod(i: Int, s: String) = {
i + s.length
}
}
val map = Map[String, Int]()
val foo = new Foo()
println("Hello, " + foo.x)
println(foo.testMethod(7, "seven"))
}
// for SearchServiceSpec
case class CaseClassWithCamelCaseName()
case class Bloo()
case object Blue
| eddsteel/ensime | testing/simple/src/main/scala/org/example/Foo.scala | Scala | gpl-3.0 | 400 |
package uScheduler.security
import java.security.{KeyFactory, PublicKey, Signature}
import java.security.spec.X509EncodedKeySpec
import java.net.{NetworkInterface, InetAddress}
import sun.misc.{BASE64Encoder, BASE64Decoder}
object LicenseServiceProvider {
/**
* getRequestCode.
* Gibt die MAC-Adresse des Computers als Base64-kodierten String zurück.
*/
def getRequestCode : String = {
val network = NetworkInterface.getByInetAddress(InetAddress.getLocalHost())
val mac = network.getHardwareAddress()
return (new BASE64Encoder()).encode(mac)
}
/**
* getPublicKey.
* Gibt den PubKey des assymetrischen kryptoverfahrens zurück, welcher fest in den LicensServiceProvider eincodiert ist.
* Dieser PubKey gehört zu einem privaten Schlüssel PrvKey, mit welchem die MAC-Adresse signiert werden soll.
* Aus dem PrvKey kann man den PubKey ableiten, jedoch nicht umgekehrt. Die dem PubKey kann nur genau ein PrvKey existieren.
*/
private def getPublicKey : PublicKey = {
var encKey = (new BASE64Decoder()).decodeBuffer("MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE/Z+F53UsEql1ssu35TzUw7SIr/FMwbSaF662D08c6ezVPhXHsAkyBXd1PAgorJlbMVdWynPVR+Ymge7HjFI+wg==")
var keySpec = new X509EncodedKeySpec(encKey)
var keyFactory = KeyFactory.getInstance("EC")
return keyFactory.generatePublic(keySpec)
}
/**
* verify.
* Prüft, ob die gegebene Lizenz mit dem PrvKey erzeugt wurde, welcher zu dem hier gegebenen PubKey gehört. Nur der Urheber der Software
* kennt den PrvKey, die zum PubKey gehört.
*/
def verify(lic : Array[Byte]) : Boolean = {
var signer = Signature.getInstance("SHA1withECDSA");
signer.initVerify(getPublicKey);
signer.update(getRequestCode.getBytes);
return (signer.verify(lic));
}
} | LarsHadidi/uScheduler | src/main/scala/uScheduler/security/LicenseServiceProvider.scala | Scala | apache-2.0 | 1,795 |
package io.estatico.effect.laws.imports
trait LawTypes {
type IsEq[A] = cats.laws.IsEq[A]
val IsEq = cats.laws.IsEq
type Eq[A] = cats.Eq[A]
val Eq = cats.Eq
}
| estatico/scala-effect | laws/src/main/scala/io/estatico/effect/laws/imports/LawTypes.scala | Scala | apache-2.0 | 170 |
import sbt._
import Keys._
import org.scalatra.sbt._
import com.typesafe.sbteclipse.plugin.EclipsePlugin.EclipseKeys
import play.twirl.sbt.SbtTwirl
import play.twirl.sbt.Import.TwirlKeys._
import sbtassembly._
import sbtassembly.AssemblyKeys._
object MyBuild extends Build {
val Organization = "gitbucket"
val Name = "gitbucket"
val Version = "3.6.0"
val ScalaVersion = "2.11.6"
val ScalatraVersion = "2.3.1"
lazy val project = Project (
"gitbucket",
file(".")
)
.settings(ScalatraPlugin.scalatraWithJRebel: _*)
.settings(
test in assembly := {},
assemblyMergeStrategy in assembly := {
case PathList("META-INF", xs @ _*) =>
(xs map {_.toLowerCase}) match {
case ("manifest.mf" :: Nil) => MergeStrategy.discard
case _ => MergeStrategy.discard
}
case x => MergeStrategy.first
}
)
.settings(
sourcesInBase := false,
organization := Organization,
name := Name,
version := Version,
scalaVersion := ScalaVersion,
resolvers ++= Seq(
Classpaths.typesafeReleases,
"amateras-repo" at "http://amateras.sourceforge.jp/mvn/"
),
scalacOptions := Seq("-deprecation", "-language:postfixOps"),
libraryDependencies ++= Seq(
"org.eclipse.jgit" % "org.eclipse.jgit.http.server" % "3.4.2.201412180340-r",
"org.eclipse.jgit" % "org.eclipse.jgit.archive" % "3.4.2.201412180340-r",
"org.scalatra" %% "scalatra" % ScalatraVersion,
"org.scalatra" %% "scalatra-specs2" % ScalatraVersion % "test",
"org.scalatra" %% "scalatra-json" % ScalatraVersion,
"org.json4s" %% "json4s-jackson" % "3.2.11",
"jp.sf.amateras" %% "scalatra-forms" % "0.1.0",
"commons-io" % "commons-io" % "2.4",
"org.pegdown" % "pegdown" % "1.5.0",
"org.apache.commons" % "commons-compress" % "1.9",
"org.apache.commons" % "commons-email" % "1.3.3",
"org.apache.httpcomponents" % "httpclient" % "4.3.6",
"org.apache.sshd" % "apache-sshd" % "0.11.0",
"com.typesafe.slick" %% "slick" % "2.1.0",
"com.novell.ldap" % "jldap" % "2009-10-07",
"com.h2database" % "h2" % "1.4.180",
// "ch.qos.logback" % "logback-classic" % "1.0.13" % "runtime",
"org.eclipse.jetty" % "jetty-webapp" % "8.1.16.v20140903" % "container;provided",
"org.eclipse.jetty.orbit" % "javax.servlet" % "3.0.0.v201112011016" % "container;provided;test" artifacts Artifact("javax.servlet", "jar", "jar"),
"junit" % "junit" % "4.12" % "test",
"com.mchange" % "c3p0" % "0.9.5",
"com.typesafe" % "config" % "1.2.1",
"com.typesafe.akka" %% "akka-actor" % "2.3.10",
"com.enragedginger" %% "akka-quartz-scheduler" % "1.3.0-akka-2.3.x" exclude("c3p0","c3p0")
),
play.twirl.sbt.Import.TwirlKeys.templateImports += "gitbucket.core._",
EclipseKeys.withSource := true,
javacOptions in compile ++= Seq("-target", "7", "-source", "7"),
testOptions in Test += Tests.Argument(TestFrameworks.Specs2, "junitxml", "console"),
javaOptions in Test += "-Dgitbucket.home=target/gitbucket_home_for_test",
testOptions in Test += Tests.Setup( () => new java.io.File("target/gitbucket_home_for_test").mkdir() ),
fork in Test := true,
packageOptions += Package.MainClass("JettyLauncher")
).enablePlugins(SbtTwirl)
}
| intermezzo-fr/gitbucket | project/build.scala | Scala | apache-2.0 | 3,312 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tools.data.utils.akka.stats
import akka.actor.{ActorRef, Cancellable}
import akka.stream._
import akka.stream.stage._
import cmwell.tools.data.ingester.Ingester._
import cmwell.tools.data.utils.akka.stats.IngesterStats.IngestStats
import cmwell.tools.data.utils.logging.DataToolsLogging
import cmwell.tools.data.utils.text.Files.toHumanReadable
import nl.grons.metrics4.scala.InstrumentedBuilder
import org.apache.commons.lang3.time.DurationFormatUtils
import scala.concurrent.duration._
object IngesterStats {
case class IngestStats(label: Option[String] = None,
ingestedBytes: Long,
ingestedInfotons: Long,
failedInfotons: Long)
def apply(isStderr: Boolean = false,
initDelay: FiniteDuration = 1.second,
interval: FiniteDuration = 1.second,
reporter: Option[ActorRef] = None,
label: Option[String] = None) = new IngesterStats(isStderr, initDelay, interval, reporter, label)
}
class IngesterStats(isStderr: Boolean,
initDelay: FiniteDuration = 1.second,
interval: FiniteDuration = 1.second,
reporter: Option[ActorRef] = None,
label: Option[String] = None)
extends GraphStage[FlowShape[IngestEvent, IngestEvent]]
with DataToolsLogging {
val in = Inlet[IngestEvent]("ingest-stats.in")
val out = Outlet[IngestEvent]("ingest-stats.out")
override val shape = FlowShape.of(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = {
new GraphStageLogic(shape) with InstrumentedBuilder {
val start = System.currentTimeMillis()
val metricRegistry = new com.codahale.metrics.MetricRegistry()
var totalIngestedBytes = metrics.meter("total-ingested-bytes")
var totalIngestedInfotons = metrics.meter("total-ingested-infotons")
var totalFailedInfotons = metrics.meter("total-failed-infotons")
var ingestedBytesInWindow = 0L
var lastTime = start
var nextPrint = 0L
var lastMessageSize = 0
val windowSizeMillis = 1000
var eventPoller: Option[Cancellable] = None
val name = label.fold("")(name => s"[$name]")
private var asyncCB: AsyncCallback[Unit] = _
val formatter = java.text.NumberFormat.getNumberInstance
override def preStart(): Unit = {
asyncCB = getAsyncCallback { _ =>
displayStats()
resetStats()
}
eventPoller = Some(materializer.schedulePeriodically(initDelay, interval, new Runnable() {
def run() = asyncCB.invoke(())
}))
pull(in)
}
def resetStats() = {
ingestedBytesInWindow = 0
}
def displayStats() = {
try {
val now = System.currentTimeMillis()
// print output message
val message =
s"[ingested: ${toHumanReadable(totalIngestedBytes.count)}] " +
s"[ingested infotons: ${formatter.format(totalIngestedInfotons.count)} " +
s"${formatter.format(totalIngestedInfotons.oneMinuteRate)}/sec] " +
s"[failed infotons: ${formatter.format(totalFailedInfotons.count)}] ".padTo(25, ' ') +
s"[rate=${toHumanReadable(totalIngestedBytes.oneMinuteRate)}/sec ".padTo(20, ' ') +
s"average rate=${toHumanReadable(totalIngestedBytes.meanRate)}/sec] ".padTo(30, ' ') +
s"[${DurationFormatUtils.formatDurationWords(now - start, true, true)}] "
if (isStderr) System.err.print("\\r" * lastMessageSize + message)
reporter.foreach {
_ ! IngestStats(
label = label,
ingestedBytes = totalIngestedBytes.count,
ingestedInfotons = totalIngestedInfotons.count,
failedInfotons = totalFailedInfotons.count
)
}
logger.debug(s"$name $message")
lastMessageSize = message.size
} catch {
case x => logger.error(s"error: $x", x)
}
}
def aggregateStats(ingestEvent: IngestEvent) = ingestEvent match {
case IngestSuccessEvent(sizeInBytes, numInfotons) =>
totalIngestedBytes.mark(sizeInBytes)
totalIngestedInfotons.mark(numInfotons)
ingestedBytesInWindow += sizeInBytes
case IngestFailEvent(numInfotons) =>
totalFailedInfotons.mark(numInfotons)
}
setHandler(
in,
new InHandler {
override def onPush(): Unit = {
val element = grab(in)
aggregateStats(element)
pull(in)
}
override def onUpstreamFailure(ex: Throwable): Unit = {
failStage(ex)
eventPoller.foreach(_.cancel())
}
override def onUpstreamFinish(): Unit = {
val now = System.currentTimeMillis()
val message =
s"ingested: ${toHumanReadable(totalIngestedBytes.count)} " +
s"ingested infotons: ${formatter.format(totalIngestedInfotons.count)}".padTo(30, ' ') +
s"failed infotons: ${formatter.format(totalFailedInfotons.count)}".padTo(25, ' ') +
s" average rate=${totalIngestedBytes.meanRate}/sec".padTo(30, ' ') +
s"[${DurationFormatUtils.formatDurationWords(now - start, true, true)}] "
System.err.println("")
System.err.println(message)
completeStage()
eventPoller.foreach(_.cancel())
}
}
)
setHandler(out, new OutHandler {
override def onPull(): Unit = {
if (!hasBeenPulled(in)) pull(in)
}
})
}
}
}
| hochgi/CM-Well | server/cmwell-data-tools/src/main/scala/cmwell/tools/data/utils/akka/stats/IngesterStats.scala | Scala | apache-2.0 | 6,366 |
/* sbt -- Simple Build Tool
* Copyright 2011 Mark Harrah
*/
package sbt
import java.io.File
import java.net.URI
import Def.{ displayFull, ScopedKey, ScopeLocal, Setting }
import Attributed.data
import BuildPaths.outputDirectory
import Scope.GlobalScope
import BuildStreams.Streams
import Path._
final class BuildStructure(val units: Map[URI, LoadedBuildUnit], val root: URI, val settings: Seq[Setting[_]], val data: Settings[Scope], val index: StructureIndex, val streams: State => Streams, val delegates: Scope => Seq[Scope], val scopeLocal: ScopeLocal) {
val rootProject: URI => String = Load getRootProject units
def allProjects: Seq[ResolvedProject] = units.values.flatMap(_.defined.values).toSeq
def allProjects(build: URI): Seq[ResolvedProject] = units.get(build).toList.flatMap(_.defined.values)
def allProjectRefs: Seq[ProjectRef] = units.toSeq flatMap { case (build, unit) => refs(build, unit.defined.values.toSeq) }
def allProjectRefs(build: URI): Seq[ProjectRef] = refs(build, allProjects(build))
val extra: BuildUtil[ResolvedProject] = BuildUtil(root, units, index.keyIndex, data)
private[this] def refs(build: URI, projects: Seq[ResolvedProject]): Seq[ProjectRef] = projects.map { p => ProjectRef(build, p.id) }
}
// information that is not original, but can be reconstructed from the rest of BuildStructure
final class StructureIndex(
val keyMap: Map[String, AttributeKey[_]],
val taskToKey: Map[Task[_], ScopedKey[Task[_]]],
val triggers: Triggers[Task],
val keyIndex: KeyIndex,
val aggregateKeyIndex: KeyIndex)
/**
* A resolved build unit. (`ResolvedBuildUnit` would be a better name to distinguish it from the loaded, but unresolved `BuildUnit`.)
* @param unit The loaded, but unresolved [[BuildUnit]] this was resolved from.
* @param defined The definitive map from project IDs to resolved projects.
* These projects have had [[Reference]]s resolved and [[AutoPlugin]]s evaluated.
* @param rootProjects The list of project IDs for the projects considered roots of this build.
* The first root project is used as the default in several situations where a project is not otherwise selected.
*/
final class LoadedBuildUnit(val unit: BuildUnit, val defined: Map[String, ResolvedProject], val rootProjects: Seq[String], val buildSettings: Seq[Setting[_]]) extends BuildUnitBase {
assert(!rootProjects.isEmpty, "No root projects defined for build unit " + unit)
/**
* The project to use as the default when one is not otherwise selected.
* [[LocalRootProject]] resolves to this from within the same build.
*/
val root = rootProjects.head
/** The base directory of the build unit (not the build definition).*/
def localBase = unit.localBase
/**
* The classpath to use when compiling against this build unit's publicly visible code.
* It includes build definition and plugin classes, but not classes for .sbt file statements and expressions.
*/
def classpath: Seq[File] = unit.definitions.target ++ unit.plugins.classpath
/**
* The class loader to use for this build unit's publicly visible code.
* It includes build definition and plugin classes, but not classes for .sbt file statements and expressions.
*/
def loader = unit.definitions.loader
/** The imports to use for .sbt files, `consoleProject` and other contexts that use code from the build definition. */
def imports = BuildUtil.getImports(unit)
override def toString = unit.toString
}
// TODO: figure out how to deprecate and drop buildNames
/**
* The built and loaded build definition, including loaded but unresolved [[Project]]s, for a build unit (for a single URI).
*
* @param base The base directory of the build definition, typically `<build base>/project/`.
* @param loader The ClassLoader containing all classes and plugins for the build definition project.
* Note that this does not include classes for .sbt files.
* @param builds The list of [[Build]]s for the build unit.
* In addition to auto-discovered [[Build]]s, this includes any auto-generated default [[Build]]s.
* @param projects The list of all [[Project]]s from all [[Build]]s.
* These projects have not yet been resolved, but they have had auto-plugins applied.
* In particular, each [[Project]]'s `autoPlugins` field is populated according to their configured `plugins`
* and their `settings` and `configurations` updated as appropriate.
* @param buildNames No longer used and will be deprecated once feasible.
*/
final class LoadedDefinitions(val base: File, val target: Seq[File], val loader: ClassLoader, val builds: Seq[Build], val projects: Seq[Project], val buildNames: Seq[String])
/** Auto-detected top-level modules (as in `object X`) of type `T` paired with their source names. */
final class DetectedModules[T](val modules: Seq[(String, T)]) {
/**
* The source names of the modules. This is "X" in `object X`, as opposed to the implementation class name "X$".
* The names are returned in a stable order such that `names zip values` pairs a name with the actual module.
*/
def names: Seq[String] = modules.map(_._1)
/**
* The singleton value of the module.
* The values are returned in a stable order such that `names zip values` pairs a name with the actual module.
*/
def values: Seq[T] = modules.map(_._2)
}
/** Auto-detected auto plugin. */
case class DetectedAutoPlugin(val name: String, val value: AutoPlugin, val hasAutoImport: Boolean)
/**
* Auto-discovered modules for the build definition project. These include modules defined in build definition sources
* as well as modules in binary dependencies.
*
* @param builds The [[Build]]s detected in the build definition. This does not include the default [[Build]] that sbt creates if none is defined.
*/
final class DetectedPlugins(val plugins: DetectedModules[Plugin], val autoPlugins: Seq[DetectedAutoPlugin], val builds: DetectedModules[Build]) {
/** Sequence of import expressions for the build definition. This includes the names of the [[Plugin]], [[Build]], and [[AutoImport]] modules, but not the [[AutoPlugin]] modules. */
lazy val imports: Seq[String] = BuildUtil.getImports(plugins.names ++ builds.names) ++
BuildUtil.importAllRoot(autoImports(autoPluginAutoImports)) ++
BuildUtil.importAll(autoImports(topLevelAutoPluginAutoImports)) ++
BuildUtil.importNamesRoot(autoPlugins.map(_.name).filter(nonTopLevelPlugin))
private[this] lazy val (autoPluginAutoImports, topLevelAutoPluginAutoImports) =
autoPlugins.flatMap {
case DetectedAutoPlugin(name, ap, hasAutoImport) =>
if (hasAutoImport) Some(name)
else None
}.partition(nonTopLevelPlugin)
/** A function to select the right [[AutoPlugin]]s from [[autoPlugins]] for a [[Project]]. */
lazy val deducePlugins: (Plugins, Logger) => Seq[AutoPlugin] = Plugins.deducer(autoPlugins.toList map { _.value })
private[this] def autoImports(pluginNames: Seq[String]) = pluginNames.map(_ + ".autoImport")
private[this] def nonTopLevelPlugin(name: String) = name.contains('.')
}
/**
* The built and loaded build definition project.
* @param base The base directory for the build definition project (not the base of the project itself).
* @param pluginData Evaluated tasks/settings from the build definition for later use.
* This is necessary because the build definition project is discarded.
* @param loader The class loader for the build definition project, notably excluding classes used for .sbt files.
* @param detected Auto-detected modules in the build definition.
*/
final class LoadedPlugins(val base: File, val pluginData: PluginData, val loader: ClassLoader, val detected: DetectedPlugins) {
@deprecated("Use the primary constructor.", "0.13.2")
def this(base: File, pluginData: PluginData, loader: ClassLoader, plugins: Seq[Plugin], pluginNames: Seq[String]) =
this(base, pluginData, loader,
new DetectedPlugins(new DetectedModules(pluginNames zip plugins), Nil, new DetectedModules(Nil))
)
@deprecated("Use detected.plugins.values.", "0.13.2")
val plugins: Seq[Plugin] = detected.plugins.values
@deprecated("Use detected.plugins.names.", "0.13.2")
val pluginNames: Seq[String] = detected.plugins.names
def fullClasspath: Seq[Attributed[File]] = pluginData.classpath
def classpath = data(fullClasspath)
}
/**
* The loaded, but unresolved build unit.
* @param uri The uniquely identifying URI for the build.
* @param localBase The working location of the build on the filesystem.
* For local URIs, this is the same as `uri`, but for remote URIs, this is the local copy or workspace allocated for the build.
*/
final class BuildUnit(val uri: URI, val localBase: File, val definitions: LoadedDefinitions, val plugins: LoadedPlugins) {
override def toString = if (uri.getScheme == "file") localBase.toString else (uri + " (locally: " + localBase + ")")
}
final class LoadedBuild(val root: URI, val units: Map[URI, LoadedBuildUnit]) {
BuildUtil.checkCycles(units)
def allProjectRefs: Seq[(ProjectRef, ResolvedProject)] = for ((uri, unit) <- units.toSeq; (id, proj) <- unit.defined) yield ProjectRef(uri, id) -> proj
def extra(data: Settings[Scope])(keyIndex: KeyIndex): BuildUtil[ResolvedProject] = BuildUtil(root, units, keyIndex, data)
private[sbt] def autos = GroupedAutoPlugins(units)
}
final class PartBuild(val root: URI, val units: Map[URI, PartBuildUnit])
sealed trait BuildUnitBase { def rootProjects: Seq[String]; def buildSettings: Seq[Setting[_]] }
final class PartBuildUnit(val unit: BuildUnit, val defined: Map[String, Project], val rootProjects: Seq[String], val buildSettings: Seq[Setting[_]]) extends BuildUnitBase {
def resolve(f: Project => ResolvedProject): LoadedBuildUnit = new LoadedBuildUnit(unit, defined mapValues f toMap, rootProjects, buildSettings)
def resolveRefs(f: ProjectReference => ProjectRef): LoadedBuildUnit = resolve(_ resolve f)
}
object BuildStreams {
type Streams = std.Streams[ScopedKey[_]]
final val GlobalPath = "$global"
final val BuildUnitPath = "$build"
final val StreamsDirectory = "streams"
def mkStreams(units: Map[URI, LoadedBuildUnit], root: URI, data: Settings[Scope]): State => Streams = s =>
s get Keys.stateStreams getOrElse std.Streams(path(units, root, data), displayFull, LogManager.construct(data, s))
def path(units: Map[URI, LoadedBuildUnit], root: URI, data: Settings[Scope])(scoped: ScopedKey[_]): File =
resolvePath(projectPath(units, root, scoped, data), nonProjectPath(scoped))
def resolvePath(base: File, components: Seq[String]): File =
(base /: components)((b, p) => new File(b, p))
def pathComponent[T](axis: ScopeAxis[T], scoped: ScopedKey[_], label: String)(show: T => String): String =
axis match {
case Global => GlobalPath
case This => sys.error("Unresolved This reference for " + label + " in " + displayFull(scoped))
case Select(t) => show(t)
}
def nonProjectPath[T](scoped: ScopedKey[T]): Seq[String] =
{
val scope = scoped.scope
pathComponent(scope.config, scoped, "config")(_.name) ::
pathComponent(scope.task, scoped, "task")(_.label) ::
pathComponent(scope.extra, scoped, "extra")(showAMap) ::
scoped.key.label ::
Nil
}
def showAMap(a: AttributeMap): String =
a.entries.toSeq.sortBy(_.key.label).map { case AttributeEntry(key, value) => key.label + "=" + value.toString } mkString (" ")
def projectPath(units: Map[URI, LoadedBuildUnit], root: URI, scoped: ScopedKey[_], data: Settings[Scope]): File =
scoped.scope.project match {
case Global => refTarget(GlobalScope, units(root).localBase, data) / GlobalPath
case Select(br @ BuildRef(uri)) => refTarget(br, units(uri).localBase, data) / BuildUnitPath
case Select(pr @ ProjectRef(uri, id)) => refTarget(pr, units(uri).defined(id).base, data)
case Select(pr) => sys.error("Unresolved project reference (" + pr + ") in " + displayFull(scoped))
case This => sys.error("Unresolved project reference (This) in " + displayFull(scoped))
}
def refTarget(ref: ResolvedReference, fallbackBase: File, data: Settings[Scope]): File =
refTarget(GlobalScope.copy(project = Select(ref)), fallbackBase, data)
def refTarget(scope: Scope, fallbackBase: File, data: Settings[Scope]): File =
(Keys.target in scope get data getOrElse outputDirectory(fallbackBase).asFile) / StreamsDirectory
} | niktrop/sbt | main/src/main/scala/sbt/BuildStructure.scala | Scala | bsd-3-clause | 12,633 |
package org.apress.prospark
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.stat.Statistics
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.StreamingContext
object CorrelationApp {
def main(args: Array[String]) {
if (args.length != 4) {
System.err.println(
"Usage: CorrelationApp <appname> <batchInterval> <hostname> <port>")
System.exit(1)
}
val Seq(appName, batchInterval, hostname, port) = args.toSeq
val conf = new SparkConf()
.setAppName(appName)
.setJars(SparkContext.jarOfClass(this.getClass).toSeq)
val ssc = new StreamingContext(conf, Seconds(batchInterval.toInt))
val substream = ssc.socketTextStream(hostname, port.toInt)
.filter(!_.contains("NaN"))
.map(_.split(" "))
.filter(f => f(1) != "0")
.map(f => f.map(f => f.toDouble))
val datastream = substream.map(f => Array(f(1).toDouble, f(2).toDouble, f(4).toDouble, f(5).toDouble, f(6).toDouble))
val walkingOrRunning = datastream.filter(f => f(0) == 4.0 || f(0) == 5.0).map(f => LabeledPoint(f(0), Vectors.dense(f.slice(1, 5))))
walkingOrRunning.map(f => f.features).foreachRDD(rdd => {
val corrSpearman = Statistics.corr(rdd, "spearman")
val corrPearson = Statistics.corr(rdd, "pearson")
println("Correlation Spearman: \\n" + corrSpearman)
println("Correlation Pearson: \\n" + corrPearson)
})
ssc.start()
ssc.awaitTermination()
}
} | ZubairNabi/prosparkstreaming | Chap9/src/main/scala/org/apress/prospark/L9-4Correlation.scala | Scala | apache-2.0 | 1,614 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.TensorModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Applies the Sigmoid function element-wise to the input Tensor,
* thus outputting a Tensor of the same dimension.
* Sigmoid is defined as: f(x) = 1 / (1 + exp(-x))
*/
@SerialVersionUID(6855417348268610044L)
class Sigmoid[@specialized(Float, Double) T: ClassTag](
implicit ev: TensorNumeric[T]) extends TensorModule[T] {
override def updateOutput(input: Tensor[T]): Tensor[T] = {
output.resizeAs(input)
output.map(input, (_, i) => ev.divide(ev.fromType[Int](1), ev.plus(ev.fromType[Int](1),
ev.exp(ev.negative(i)))))
output
}
override def updateGradInput(input: Tensor[T], gradOutput: Tensor[T]): Tensor[T] = {
gradInput.resizeAs(input)
gradInput.copy(gradOutput)
gradInput.map(output, (g, z) => ev.times(ev.times(g, ev.minus(ev.fromType[Int](1), z)), z))
gradInput
}
}
object Sigmoid {
def apply[T: ClassTag]()
(implicit ev: TensorNumeric[T]) : Sigmoid[T] = {
new Sigmoid[T]()
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/Sigmoid.scala | Scala | apache-2.0 | 1,804 |
//
// Copyright 2016 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package commbank.coppersmith
import scala.reflect.runtime.universe.TypeTag
import scalaz.syntax.std.boolean.ToBooleanOpsFromBoolean
import org.scalacheck.Prop.forAll
import org.specs2._
import Feature._, Value._, Type._
import Arbitraries._
import test.thrift.Customer
object GeneralFeatureSpec extends Specification with ScalaCheck { def is = s2"""
General Features - Test individual feature components
===========
Creating general feature metadata
must pass namespace through $metadataNamespace
must pass name through $metadataName
must pass description through $metadataDescription
must pass feature type through $metadataFeatureType
must pass range through $metadataRange
Generating general feature values
must use specified id as entity $valueEntity
must use specified name as name $valueName
must use value as defined $valueValue
"""
def general(
rfp: RangeFieldPair,
filter: Boolean,
namespace: Namespace = "",
name: Name = "",
desc: Description = "",
fType: Type = Nominal,
entity: Customer => EntityId = _.id
) = {
val feature: (Namespace, Name, Description, Type, Customer => EntityId) => Feature[Customer, Value] =
rfp match {
case StrRangeFieldPair(r, _) => general[Str](
fValue = c => filter.option(c.name),
range = r
)
case IntegralRangeFieldPair(r, _) => general[Integral](
fValue = c => filter.option(c.age),
range = r
)
case FloatingPointRangeFieldPair(r, _) => general[FloatingPoint](
fValue = c => filter.option(c.height),
range = r
)
}
feature.apply(namespace, name, desc, fType, entity)
}
def general[V <: Value : TypeTag](
fValue: Customer => Option[V],
range: Option[Feature.Value.Range[V]]
)(
namespace: Namespace,
name: Name,
desc: Description,
fType: Type,
entity: Customer => EntityId
) = Patterns.general[Customer, V](namespace, name, desc, fType, entity, fValue, range)
def metadataNamespace = forAll { (rfp: RangeFieldPair, filter: Boolean, namespace: Namespace) => {
val feature = general(rfp, filter, namespace = namespace)
feature.metadata.namespace must_== namespace
}}
def metadataName = forAll { (rfp: RangeFieldPair, filter: Boolean, name: Name) => {
val feature = general(rfp, filter, name = name)
feature.metadata.name must_== name
}}
def metadataDescription = forAll { (rfp: RangeFieldPair, filter: Boolean, desc: Description) => {
val feature = general(rfp, filter, desc = desc)
feature.metadata.description must_== desc
}}
def metadataFeatureType = forAll { (rfp: RangeFieldPair, filter: Boolean, fType: Type) => {
val feature = general(rfp, filter, fType = fType)
feature.metadata.featureType must_== fType
}}
def metadataRange = forAll { (rfp: RangeFieldPair, filter: Boolean) => {
val feature = general(rfp, filter)
feature.metadata.valueRange must_== rfp.range
}}
def valueEntity = forAll { (rfp: RangeFieldPair, c: Customer) => {
val feature = general(rfp, true, entity = _.id)
feature.generate(c) must beSome.like { case v => v.entity must_== c.id }
}}
def valueName = forAll {
(rfp: RangeFieldPair, ns: Namespace, name: String, fType: Type, c: Customer) => {
val feature = general(rfp, true, name = name)
feature.generate(c) must beSome.like { case v => v.name must_== name }
}
}
def valueValue = forAll { (rfp: RangeFieldPair, filter: Boolean, c: Customer) => {
val feature = general(rfp, filter)
val expectedValue = rfp match {
case _: StrRangeFieldPair => Str(Option(c.name))
case _: IntegralRangeFieldPair => Integral(Option(c.age))
case _: FloatingPointRangeFieldPair => FloatingPoint(Option(c.height))
}
val featureValue = feature.generate(c)
if (!filter) featureValue must beNone
else featureValue must beSome.like { case v => v.value must_== expectedValue }
}}
}
| CommBank/coppersmith | core/src/test/scala/commbank/coppersmith/GeneralFeatureSpec.scala | Scala | apache-2.0 | 4,799 |
/*
* Copyright (C) 2009-2011 Mathias Doenitz
* Inspired by a similar implementation by Nathan Hamblen
* (https://github.com/n8han/Databinder-Dispatch)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sbencoding
import java.util
import collection.immutable
/**
* The general type of a Bencoding AST node.
*/
sealed abstract class BcValue {
override def toString = toString((x) => new String(BencodingPrinter(x)))
def toString(printer: (BcValue => String)) = printer(this)
def toByteArray(printer: (BcValue => Array[Byte]) = BencodingPrinter) = printer(this)
def convertTo[T: BencodingReader]: T = bencodingReader[T].read(this)
/**
* Returns `this` if this JsValue is a JsObject, otherwise throws a DeserializationException with the given error msg.
*/
def asBcDict(errorMsg: String = "Bencoding dict expected"): BcDict = deserializationError(errorMsg)
/**
* Returns `this` if this JsValue is a JsObject, otherwise throws a DeserializationException.
*/
def asBcDict: BcDict = asBcDict()
}
/**
* A Bencoding object.
*/
case class BcDict private (fields: Map[String, BcValue]) extends BcValue {
override def asBcDict(errorMsg: String) = this
def getFields(fieldNames: String*): immutable.Seq[BcValue] = fieldNames.map(n => fields.getOrElse(n, BcNil))(collection.breakOut)
}
object BcDict {
def apply(members: BcField*) = new BcDict(Map(members.filterNot(_._2 eq BcNil): _*))
}
/**
* A Bencoding array.
*/
case class BcList(elements: Vector[BcValue]) extends BcValue with Iterable[BcValue] {
override def iterator: Iterator[BcValue] = elements.iterator
}
object BcList {
def apply(elements: BcValue*) = new BcList(elements.toVector)
}
/**
* A Bencoding string.
*/
case class BcString(value: Array[Byte]) extends BcValue {
override def equals(obj: scala.Any): Boolean = obj match {
case BcString(d) => util.Arrays.equals(this.value, d)
case _ => false
}
def sliding(size: Int): BcList = {
require(value.length % size == 0)
BcList(value.sliding(size, size).map(BcString(_)).toList: _*)
}
}
object BcString {
def apply(value: Symbol): BcString = BcString(value.name.getBytes("ascii"))
def apply(value: String, charset: String): BcString = BcString(value.getBytes(charset))
}
/**
* A Bencoding number.
*/
case class BcInt(value: Long) extends BcValue
case object BcNil extends BcValue | zhaoyao/sbencoding | src/main/scala/sbencoding/BcValue.scala | Scala | mit | 2,896 |
package org.bitcoins.core.serializers.transaction
import org.bitcoins.core.protocol.script.ScriptWitness
import org.bitcoins.core.protocol.transaction.TransactionWitness
import org.bitcoins.core.serializers.script.RawScriptWitnessParser
import scodec.bits.ByteVector
import scala.annotation.tailrec
/**
* Created by chris on 11/21/16.
* Serialization of
* [[org.bitcoins.core.protocol.transaction.TransactionWitness TransactionWitness]] as defined inside of
* [[https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki#specification BIP141]]
* [[https://github.com/bitcoin/bitcoin/blob/b4e4ba475a5679e09f279aaf2a83dcf93c632bdb/src/primitives/transaction.h#L232-L268]]
*/
sealed abstract class RawTransactionWitnessParser {
/**
* We can only tell how many [[org.bitcoins.core.protocol.script.ScriptWitness ScriptWitness]]
* we have if we have the number of inputs the transaction creates
*/
def read(bytes: ByteVector, numInputs: Int): TransactionWitness = {
@tailrec
def loop(
remainingBytes: ByteVector,
remainingInputs: Int,
accum: Vector[ScriptWitness]): Vector[ScriptWitness] = {
if (remainingInputs != 0) {
val w = RawScriptWitnessParser.read(remainingBytes)
val (_, newRemainingBytes) = remainingBytes.splitAt(w.bytes.size)
loop(newRemainingBytes, remainingInputs - 1, w +: accum)
} else accum.reverse
}
val witnesses = loop(bytes, numInputs, Vector.empty)
require(witnesses.size == numInputs)
TransactionWitness(witnesses)
}
def write(witness: TransactionWitness): ByteVector = {
witness.witnesses.foldLeft(ByteVector.empty)(_ ++ _.bytes)
}
}
object RawTransactionWitnessParser extends RawTransactionWitnessParser
| bitcoin-s/bitcoin-s-core | core/src/main/scala/org/bitcoins/core/serializers/transaction/RawTransactionWitnessParser.scala | Scala | mit | 1,755 |
package demo.components.elementalui
import chandu0101.macros.tojs.GhPagesMacros
import chandu0101.scalajs.react.components.elementalui._
import demo.components.CodeExample
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import scala.scalajs.js.`|`
object EuiMiscDemo {
val code = GhPagesMacros.exampleSource
// EXAMPLE:START
case class State(currentPage: Int = 1,
pageSize: Int = 25,
plural: String = "Potatoes",
singular: String = "Potato",
total: Int = 123,
limit: Int = 5
)
case class Backend($: BackendScope[Unit, State]) {
def handleClear(event: ReactEvent) =
$.modState(identity)
def handlePageSelect(page: Int) =
$.modState(_.copy(currentPage = page))
def handleCurrentPageChange(event: ReactEventI) = {
val value = event.target.value.toInt match {
case x if x < 0 => 0
case x => x
}
$.modState(_.copy(currentPage = value))
}
def handlePageSizeChange(event: ReactEventI) = {
val value = event.target.value.toInt match {
case x if x > 100 => 100
case x if x < 1 => 1
case x => x
}
$.modState(_.copy(pageSize = value))
}
def handleTotalChange(event: ReactEventI) = {
val value = event.target.value.toInt match {
case x if x > 1000 => 1000
case x if x < 1 => 1
case x => x
}
$.modState(_.copy(total = value))
}
def handleLimitChange(event: ReactEventI) = {
val value = event.target.value.toInt match {
case x if x < 1 => 1
case x => x
}
$.modState(_.copy(limit = value))
}
def handlePluralChange(event: ReactEventI) = {
$.modState(_.copy(plural = event.target.value))
}
def handleSingularChange(event: ReactEventI) =
$.modState(_.copy(singular = event.target.value))
def renderAlerts =
<.div(
Alert(`type` = AlertType.INFO)(<.strong("Info"), "Blah Blah Blah au au oeu oeau eouaoeu eou aouo u"),
Alert(`type` = AlertType.SUCCESS)(<.strong("Success"), "Blah Blah Blah au au oeu oeau eouaoeu eou aouo u"),
Alert(`type` = AlertType.WARNING)(<.strong("Warning"), "Blah Blah Blah au au oeu oeau eouaoeu eou aouo u"),
Alert(`type` = AlertType.DANGER)(<.strong("Error"), "Blah Blah Blah au au oeu oeau eouaoeu eou aouo u"))
def renderCards =
<.div(
Card()("Hello, this is a very simple card, but blah blah blah"),
Row()(
Col(xs = "1/2")(Card()("Use")),
Col(xs = "1/2")(Card()("Me"))),
Row()(
Col(xs = "1/3")(Card()("In")),
Col(xs = "1/3")(Card()("A")),
Col(xs = "1/3")(Card()("Grid"))))
def renderPagination(S: State) =
<.div(
Pagination(
currentPage = S.currentPage,
onPageSelect = handlePageSelect _,
pageSize = S.pageSize,
plural = S.plural,
singular = S.singular,
total = S.total,
limit = S.limit
)(),
InputGroup()(
InputGroupSection(grow = true)(
FormField(label = "Current Page")(
FormInput(
name = "currentPage",
`type` = "number",
value = S.currentPage: String | Int,
onChange = handleCurrentPageChange _,
placeholder = "Current Page")())
),
InputGroupSection(grow = true)(
FormField(label = "Page Size")(
FormInput(
name = "pageSize",
`type` = "number",
value = S.pageSize: String | Int,
onChange = handlePageSizeChange _,
placeholder = "Page Size")()
)
),
InputGroupSection(grow = true)(
FormField(label = "Plural")(
FormInput(
name = "plural",
`type` = "number",
value = S.plural: String | Int,
onChange = handlePluralChange _,
placeholder = "Plural")()))
,
InputGroupSection(grow = true)(
FormField(label = "Singular")(
FormInput(
name = "singular",
`type` = "number",
value = S.singular: String | Int,
onChange = handleSingularChange _,
placeholder = "Singular")()))
,
InputGroupSection(grow = true)(
FormField(label = "Total")(
FormInput(
name = "total",
`type` = "number",
value = S.total: String | Int,
onChange = handleTotalChange _,
placeholder = "Total")()))
,
InputGroupSection(grow = true)(
FormField(label = "Limit")(
FormInput(
name = "limit",
`type` = "number",
value = S.limit: String | Int,
onChange = handleLimitChange _,
placeholder = "Limit")()
)
)
)
)
def renderPills =
<.div(
Pill(label = "Create", `type` = PillType.success_inverted)(),
Pill(label = "First Pill", `type` = PillType.primary, onClear = handleClear _)(),
Pill(label = "Second Pill", `type` = PillType.primary, onClear = handleClear _)(),
Pill(label = "Third Pill", `type` = PillType.primary, onClear = handleClear _)(),
Pill(label = "Clear All")())
def render(S: State) =
CodeExample(code, "EuiMisc")(
<.div(
<.h1("Miscellaneous"),
<.h2("Alerts"),
renderAlerts,
<.h2("Cards"),
renderCards,
<.h2("Pagination"),
renderPagination(S),
<.h2("Pills"),
renderPills
)
)
}
val component = ReactComponentB[Unit]("EuiMiscDemo")
.initialState(State())
.renderBackend[Backend]
.buildU
// EXAMPLE:END
def apply() = component()
}
| elacin/scalajs-react-components | demo/src/main/scala/demo/components/elementalui/EuiMiscDemo.scala | Scala | apache-2.0 | 6,153 |
package org.kneelawk.simplecursemodpackdownloader.task
import scala.collection.mutable.HashSet
import scala.collection.mutable.Set
import java.util.concurrent.locks.ReentrantLock
import org.kneelawk.simplecursemodpackdownloader.util.LockUtil._
class TaskManifest {
private val tasks = new HashSet[Task]
private val tasksLock = new ReentrantLock
/** Add a task to the manifest.
*
*/
def addTask(task: Task) = lock(tasksLock)(tasks += task)
/** Add a task to the manifest.
*
*/
def +=(task: Task) = lock(tasksLock)(tasks += task)
/** Remove a task from the manifest.
*
*/
def removeTask(task: Task) = lock(tasksLock)(tasks -= task)
/** Remove a task from the manifest.
*
*/
def -=(task: Task) = lock(tasksLock)(tasks -= task)
/** Get a list of tasks in the manifest.
*
*/
def getTasks: Set[Task] = tasks
/** Get the lock in control of tasks.
*
*/
def getLock: ReentrantLock = tasksLock
/** Loop through every task in the manifest.
*
*/
def foreach[R](f: (Task) => R) = toList.foreach(f)
/** Remove all stopped tasks.
*
*/
def pruneTasks() {
for (elem <- toList) {
if (!elem.isAllive) removeTask(elem)
}
}
/** Converts this TaskManifest into an immutable list of tasks.
*
*/
def toList: List[Task] = lock(tasksLock)(tasks.toList)
} | Kneelawk/SimpleCurseModpackDownloader | src/main/scala/org/kneelawk/simplecursemodpackdownloader/task/TaskManifest.scala | Scala | mit | 1,375 |
package com.rxbytes.splitpal.ui.main.fragments.contacts
import android.database.Cursor
import com.rxbytes.splitpal.utils.commons.CommonUtils._
import contacts.PhoneContactColumns
import macroid.ContextWrapper
import scala.concurrent.Future
import scala.util.Try
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by pnagarjuna on 09/12/15.
*/
case class CompleteContact(contactId: Int,
displayName: String,
phoneNumber: String,
email: String,
emailData: String)
object ContactsFetcher {
case class ContactWithDisplayName(contactId: Int,
displayName: String,
hasPhoneNumber: Boolean)
case class ContactWithPhoneNumber(contactId: Int,
phoneNumber: String)
case class ContactWithNameAndPhone(contactId: Int,
displayName: String,
phoneNumber: String)
def queryContactsWithDisplayName(limit: Int, offset: Int)(implicit contextWrapper: ContextWrapper): Seq[ContactWithDisplayName] = {
val cursor = Option[Cursor](
contextWrapper.application.getContentResolver
.query(
PhoneContactColumns.CONTENT_URI,
Array(
PhoneContactColumns._ID,
PhoneContactColumns.DISPLAY_NAME,
PhoneContactColumns.HAS_PHONE_NUMBER
),
s"${PhoneContactColumns.HAS_PHONE_NUMBER} != ?",
Array("0"),
s" ${PhoneContactColumns._ID} ASC limit $limit offset $offset"
)
)
def conversionFunction: Cursor => ContactWithDisplayName = cursor => {
val id = cursor.getInt(cursor.getColumnIndex(PhoneContactColumns._ID))
val displayName = cursor.getString(cursor.getColumnIndex(PhoneContactColumns.DISPLAY_NAME))
val hasPhoneNumber = cursor.getString(cursor.getColumnIndex(PhoneContactColumns.HAS_PHONE_NUMBER))
ContactWithDisplayName(id, displayName, hasPhoneNumber.toInt > 0)
}
getListFromCursor(cursor, conversionFunction)
}
def queryContactsForPhoneNumber(contactsWithDisplayName: Seq[ContactWithDisplayName])
(implicit contextWrapper: ContextWrapper): Seq[ContactWithNameAndPhone] = {
contactsWithDisplayName.flatMap { contactWithDisplayName =>
val cursor = Option[Cursor](contextWrapper.application.getContentResolver.query(
PhoneContactColumns.PHONE_CONTENT_URI,
Array(
PhoneContactColumns.PHONE_CONTACT_ID,
PhoneContactColumns.NUMBER
),
s"${PhoneContactColumns.PHONE_CONTACT_ID} = ? and ${PhoneContactColumns.NUMBER} not null",
Array(contactWithDisplayName.contactId.toString),
null,
null
))
def conversionFunction: Cursor => ContactWithPhoneNumber = cursor => {
val id = cursor.getInt(cursor.getColumnIndex(PhoneContactColumns.PHONE_CONTACT_ID))
val number = cursor.getString(cursor.getColumnIndex(PhoneContactColumns.NUMBER))
ContactWithPhoneNumber(
id,
number.split("\\\\s+").map(_.trim).reduce(_ + _).replaceAll("[\\\\D]", "")
)
}
getListFromCursor(cursor, conversionFunction).map { contact =>
ContactWithNameAndPhone(contact.contactId, contactWithDisplayName.displayName, contact.phoneNumber)
}
}
}
def contacts(limit: Int, offset: Int)(implicit contextWrapper: ContextWrapper): Seq[Contact] = {
queryContactsForPhoneNumber(queryContactsWithDisplayName(limit, offset))
.map { contact => contact.phoneNumber -> contact }(scala.collection.breakOut).toMap
.map { pair => pair._2 }.toSeq
.map { contactWithNameAndPhone =>
Contact(1, None, contactWithNameAndPhone.phoneNumber, 100)
}
}
def contactsAsync(limit: Int, offset: Int)(implicit contextWrapper: ContextWrapper): Future[Seq[Contact]] =
tryToFuture(Try(contacts(limit, offset)))
}
| pamu/split-pal | src/main/scala/com/rxbytes/splitpal/ui/main/fragments/contacts/ContactsFetcher.scala | Scala | apache-2.0 | 4,065 |
/*
* (c) Copyright 2016 Hewlett Packard Enterprise Development LP
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cogx
/** The Cog library. All cog services can be accessed with a single call:
* {{{
* import cogx._
* }}}
*
* Update to the message below: this doesn't fix things, at least totally.
* If you're running into trouble and desperate for a temporary fix, add
* a dummy type declaration to force recompilation of this file.
*
* A bug in the IntelliJ IDEA editor forced a somewhat unconventional approach
* to creating this object. Rather than:
*
* package object cogx { }
*
* we use an alternate syntax that accomplishes the same thing:
*
* package cogx
* object `package` { }
*
* @author Greg Snider
*/
object `package` extends CogXInterface
trait CogXInterface
extends cogx.api.CogFunctionAPI
with cogx.api.ImplicitConversions
with cogx.compiler.gpu_operator.UserGPULibrary
with cogx.cogmath.algebra.complex.ComplexImplicits
{
//---------------------------------------------------------------------------
// Compiler / Runtime configuration parameters.
//---------------------------------------------------------------------------
val Cog = parameters.Cog
//---------------------------------------------------------------------------
// Geometry
//---------------------------------------------------------------------------
type Shape = cogx.cogmath.geometry.Shape
val Shape = cogx.cogmath.geometry.Shape
//---------------------------------------------------------------------------
// Element types
//---------------------------------------------------------------------------
type ElementType = cogx.platform.types.ElementTypes.ElementType
val Int8 = cogx.platform.types.ElementTypes.Int8
val Uint8 = cogx.platform.types.ElementTypes.Uint8
val Int16 = cogx.platform.types.ElementTypes.Int16
val Uint16 = cogx.platform.types.ElementTypes.Uint16
val Int32 = cogx.platform.types.ElementTypes.Int32
val Uint32 = cogx.platform.types.ElementTypes.Uint32
val Int64 = cogx.platform.types.ElementTypes.Int64
val Uint64 = cogx.platform.types.ElementTypes.Uint64
val Uint8Pixel = cogx.platform.types.ElementTypes.Uint8Pixel
val Float16 = cogx.platform.types.ElementTypes.Float16
val Float32 = cogx.platform.types.ElementTypes.Float32
val Float64 = cogx.platform.types.ElementTypes.Float64
val Complex16 = cogx.platform.types.ElementTypes.Complex16
val Complex32 = cogx.platform.types.ElementTypes.Complex32
val Complex64 = cogx.platform.types.ElementTypes.Complex64
//---------------------------------------------------------------------------
// Real linear algebra
//---------------------------------------------------------------------------
type Vector = cogx.cogmath.algebra.real.Vector
val Vector = cogx.cogmath.algebra.real.Vector
type Matrix = cogx.cogmath.algebra.real.Matrix
val Matrix = cogx.cogmath.algebra.real.Matrix
type Tensor3 = cogx.cogmath.algebra.real.Tensor3
val Logarithm = cogx.cogmath.algebra.real.Logarithm
type Logarithm = cogx.cogmath.algebra.real.Logarithm
//---------------------------------------------------------------------------
// Complex linear algebra
//---------------------------------------------------------------------------
type Complex = cogx.cogmath.algebra.complex.Complex
val Complex = cogx.cogmath.algebra.complex.Complex
type ComplexVector = cogx.cogmath.algebra.complex.ComplexVector
val ComplexVector = cogx.cogmath.algebra.complex.ComplexVector
type ComplexMatrix = cogx.cogmath.algebra.complex.ComplexMatrix
val ComplexMatrix = cogx.cogmath.algebra.complex.ComplexMatrix
type ComplexTensor3 = cogx.cogmath.algebra.complex.ComplexTensor3
//---------------------------------------------------------------------------
// FFT math
//---------------------------------------------------------------------------
val FFT2D = cogx.cogmath.fft.FFT2D
//---------------------------------------------------------------------------
// Fields
//---------------------------------------------------------------------------
type FieldType = cogx.platform.types.FieldType
type Field = cogx.compiler.parser.syntaxtree.Field
val Field = cogx.compiler.parser.syntaxtree.Field
type ScalarField = cogx.compiler.parser.syntaxtree.ScalarField
val ScalarField = cogx.compiler.parser.syntaxtree.ScalarField
type VectorField = cogx.compiler.parser.syntaxtree.VectorField
val VectorField = cogx.compiler.parser.syntaxtree.VectorField
type MatrixField = cogx.compiler.parser.syntaxtree.MatrixField
val MatrixField = cogx.compiler.parser.syntaxtree.MatrixField
type Pixel = cogx.platform.types.Pixel
type ColorField = cogx.compiler.parser.syntaxtree.ColorField
val ColorField = cogx.compiler.parser.syntaxtree.ColorField
type ComplexField = cogx.compiler.parser.syntaxtree.ComplexField
val ComplexField = cogx.compiler.parser.syntaxtree.ComplexField
type ComplexVectorField = cogx.compiler.parser.syntaxtree.ComplexVectorField
val ComplexVectorField = cogx.compiler.parser.syntaxtree.ComplexVectorField
//---------------------------------------------------------------------------
// Field access on the CPU
//---------------------------------------------------------------------------
type FieldReader = cogx.platform.cpumemory.readerwriter.FieldReader
type ScalarFieldReader = cogx.platform.cpumemory.readerwriter.ScalarFieldReader
type ScalarFieldWriter = cogx.platform.cpumemory.readerwriter.ScalarFieldWriter
type VectorFieldReader = cogx.platform.cpumemory.readerwriter.VectorFieldReader
type VectorFieldWriter = cogx.platform.cpumemory.readerwriter.VectorFieldWriter
type MatrixFieldReader = cogx.platform.cpumemory.readerwriter.MatrixFieldReader
type MatrixFieldWriter = cogx.platform.cpumemory.readerwriter.MatrixFieldWriter
type ComplexFieldReader = cogx.platform.cpumemory.readerwriter.ComplexFieldReader
type ComplexFieldWriter = cogx.platform.cpumemory.readerwriter.ComplexFieldWriter
type ComplexVectorFieldReader = cogx.platform.cpumemory.readerwriter.ComplexVectorFieldReader
type ComplexVectorFieldWriter = cogx.platform.cpumemory.readerwriter.ComplexVectorFieldWriter
type ColorFieldReader = cogx.platform.cpumemory.readerwriter.ColorFieldReader
type ColorFieldWriter = cogx.platform.cpumemory.readerwriter.ColorFieldWriter
type AbstractFieldMemory = cogx.platform.cpumemory.AbstractFieldMemory
type VectorFieldMemory = cogx.platform.cpumemory.VectorFieldMemory
type MatrixFieldMemory = cogx.platform.cpumemory.MatrixFieldMemory
type ComplexFieldMemory = cogx.platform.cpumemory.ComplexFieldMemory
type ImageMemory = cogx.platform.cpumemory.ColorFieldMemory
val FieldMemory = cogx.platform.cpumemory.FieldMemory
type Operator = cogx.compiler.cpu_operator.Operator
//---------------------------------------------------------------------------
// Options for handling field borders
//---------------------------------------------------------------------------
type BorderPolicy = cogx.platform.types.BorderPolicy
val BorderClamp = cogx.platform.types.BorderClamp
val BorderZero = cogx.platform.types.BorderZero
val BorderCyclic = cogx.platform.types.BorderCyclic
val BorderValid = cogx.platform.types.BorderValid
val BorderFull = cogx.platform.types.BorderFull
//---------------------------------------------------------------------------
// Options for convolution / crossCorrelation sampling policy
//---------------------------------------------------------------------------
type ConvolutionSamplingPolicy = cogx.platform.types.ConvolutionSamplingPolicy
val NoSamplingConvolution = cogx.platform.types.NoSamplingConvolution
type UpsampleInputConvolution = cogx.platform.types.UpsampleInputConvolution
val UpsampleInputConvolution = cogx.platform.types.UpsampleInputConvolution
type DownsampleOutputConvolution = cogx.platform.types.DownsampleOutputConvolution
val DownsampleOutputConvolution = cogx.platform.types.DownsampleOutputConvolution
//---------------------------------------------------------------------------
// Options for specifying how FFT should be used for convolution
//---------------------------------------------------------------------------
type ConvolutionFFTUsePolicy = cogx.platform.types.ConvolutionFFTUsePolicy
val UseFFTNever = cogx.platform.types.UseFFTNever
val UseFFTAlways = cogx.platform.types.UseFFTAlways
val UseFFTWhenBest = cogx.platform.types.UseFFTWhenBest
//---------------------------------------------------------------------------
// Options for specifying how SmallTensorMode should be used for convolution
//---------------------------------------------------------------------------
type ConvolutionSmallTensorUsePolicy = cogx.platform.types.ConvolutionSmallTensorUsePolicy
val UseSmallTensorNever = cogx.platform.types.UseSmallTensorNever
val UseSmallTensorAlways = cogx.platform.types.UseSmallTensorAlways
val UseSmallTensorWhenBest = cogx.platform.types.UseSmallTensorWhenBest
//---------------------------------------------------------------------------
// Options for specifying which technology is used for saving/restoring ComputeGraphs
//---------------------------------------------------------------------------
type CheckpointerType = cogx.platform.types.CheckpointerType
val Hdf5CheckpointerType = cogx.platform.types.Hdf5CheckpointerType
val JavaCheckpointerType = cogx.platform.types.JavaCheckpointerType
//---------------------------------------------------------------------------
// User GPU functions
//---------------------------------------------------------------------------
val GPUOperator = cogx.compiler.gpu_operator.GPUOperator
//---------------------------------------------------------------------------
// Input / Output
//---------------------------------------------------------------------------
type Sensor = cogx.compiler.parser.syntaxtree.Sensor
type UnpipelinedSensor = cogx.compiler.parser.syntaxtree.UnpipelinedSensor
type ColorSensor = cogx.compiler.parser.syntaxtree.ColorSensor
type UnpipelinedColorSensor = cogx.compiler.parser.syntaxtree.UnpipelinedColorSensor
type VectorSensor = cogx.compiler.parser.syntaxtree.VectorSensor
type UnpipelinedVectorSensor = cogx.compiler.parser.syntaxtree.UnpipelinedVectorSensor
type Actuator = cogx.compiler.parser.syntaxtree.Actuator
val Actuator = cogx.compiler.parser.syntaxtree.Actuator
type UnpipelinedActuator = cogx.compiler.parser.syntaxtree.UnpipelinedActuator
val UnpipelinedActuator = cogx.compiler.parser.syntaxtree.UnpipelinedActuator
type ColorActuator = cogx.compiler.parser.syntaxtree.ColorActuator
val ColorActuator = cogx.compiler.parser.syntaxtree.ColorActuator
type UnpipelinedColorActuator = cogx.compiler.parser.syntaxtree.UnpipelinedColorActuator
val UnpipelinedColorActuator = cogx.compiler.parser.syntaxtree.UnpipelinedColorActuator
type VectorActuator = cogx.compiler.parser.syntaxtree.VectorActuator
val VectorActuator = cogx.compiler.parser.syntaxtree.VectorActuator
type UnpipelinedVectorActuator = cogx.compiler.parser.syntaxtree.UnpipelinedVectorActuator
type ComputeGraph = cogx.runtime.ComputeGraph
val ComputeGraph = cogx.runtime.ComputeGraph
// Get a string describing the underlying hardware platform.
def platformDescription: String = cogx.platform.opencl.OpenCLPlatform.descriptor
//---------------------------------------------------------------------------
// Utilities
//---------------------------------------------------------------------------
type Random = cogx.utilities.Random
val Random = cogx.utilities.Random
}
| hpe-cct/cct-core | src/main/scala/cogx/package.scala | Scala | apache-2.0 | 12,322 |
package cn.gridx.scala.database.redis.basic
import java.util.Date
import com.redis._
import scala.collection.immutable.IndexedSeq
import scala.concurrent.{Await, Future}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
/**
* Created by tao on 7/14/16.
*/
object Test {
def main(args: Array[String]): Unit = {
val redisClient = new RedisClient("localhost", 6379)
println(redisClient.get("mykey"))
val clients = new RedisClientPool("localhost", 6379)
val numbers = Range(1, 2000).toList
val tasks =
for (i <- Range(0, 10000)) yield Future {
clients.withClient {
client => {
val v = client.get("kk")
if (v.isEmpty)
client.set("kk", 1)
else {
println(i + " : " + v.get)
client.set("kk", v.get.toInt + 1)
}
}
}
}
Await.result(Future.sequence(tasks), 10 seconds)
println("结束")
}
}
| TaoXiao/Scala | database/src/main/scala/cn/gridx/scala/database/redis/basic/Test.scala | Scala | apache-2.0 | 1,007 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.yarn
import java.io.File
import java.nio.charset.StandardCharsets.UTF_8
import java.util.regex.Matcher
import java.util.regex.Pattern
import scala.collection.mutable.HashMap
import scala.reflect.runtime._
import scala.util.Try
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.{Master, JobConf}
import org.apache.hadoop.security.Credentials
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.security.token.Token
import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.api.ApplicationConstants
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment
import org.apache.hadoop.yarn.api.records.{ApplicationAccessType, ContainerId, Priority}
import org.apache.hadoop.yarn.util.ConverterUtils
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.launcher.YarnCommandBuilderUtils
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.util.Utils
/**
* Contains util methods to interact with Hadoop from spark.
*/
class YarnSparkHadoopUtil extends SparkHadoopUtil {
private var tokenRenewer: Option[ExecutorDelegationTokenUpdater] = None
override def transferCredentials(source: UserGroupInformation, dest: UserGroupInformation) {
dest.addCredentials(source.getCredentials())
}
// Note that all params which start with SPARK are propagated all the way through, so if in yarn
// mode, this MUST be set to true.
override def isYarnMode(): Boolean = { true }
// Return an appropriate (subclass) of Configuration. Creating a config initializes some Hadoop
// subsystems. Always create a new config, dont reuse yarnConf.
override def newConfiguration(conf: SparkConf): Configuration =
new YarnConfiguration(super.newConfiguration(conf))
// Add any user credentials to the job conf which are necessary for running on a secure Hadoop
// cluster
override def addCredentials(conf: JobConf) {
val jobCreds = conf.getCredentials()
jobCreds.mergeAll(UserGroupInformation.getCurrentUser().getCredentials())
}
override def getCurrentUserCredentials(): Credentials = {
UserGroupInformation.getCurrentUser().getCredentials()
}
override def addCurrentUserCredentials(creds: Credentials) {
UserGroupInformation.getCurrentUser().addCredentials(creds)
}
override def addSecretKeyToUserCredentials(key: String, secret: String) {
val creds = new Credentials()
creds.addSecretKey(new Text(key), secret.getBytes(UTF_8))
addCurrentUserCredentials(creds)
}
override def getSecretKeyFromUserCredentials(key: String): Array[Byte] = {
val credentials = getCurrentUserCredentials()
if (credentials != null) credentials.getSecretKey(new Text(key)) else null
}
/**
* Get the list of namenodes the user may access.
*/
def getNameNodesToAccess(sparkConf: SparkConf): Set[Path] = {
sparkConf.get("spark.yarn.access.namenodes", "")
.split(",")
.map(_.trim())
.filter(!_.isEmpty)
.map(new Path(_))
.toSet
}
def getTokenRenewer(conf: Configuration): String = {
val delegTokenRenewer = Master.getMasterPrincipal(conf)
logDebug("delegation token renewer is: " + delegTokenRenewer)
if (delegTokenRenewer == null || delegTokenRenewer.length() == 0) {
val errorMessage = "Can't get Master Kerberos principal for use as renewer"
logError(errorMessage)
throw new SparkException(errorMessage)
}
delegTokenRenewer
}
/**
* Obtains tokens for the namenodes passed in and adds them to the credentials.
*/
def obtainTokensForNamenodes(
paths: Set[Path],
conf: Configuration,
creds: Credentials,
renewer: Option[String] = None
): Unit = {
if (UserGroupInformation.isSecurityEnabled()) {
val delegTokenRenewer = renewer.getOrElse(getTokenRenewer(conf))
paths.foreach { dst =>
val dstFs = dst.getFileSystem(conf)
logInfo("getting token for namenode: " + dst)
dstFs.addDelegationTokens(delegTokenRenewer, creds)
}
}
}
private[spark] override def startExecutorDelegationTokenRenewer(sparkConf: SparkConf): Unit = {
tokenRenewer = Some(new ExecutorDelegationTokenUpdater(sparkConf, conf))
tokenRenewer.get.updateCredentialsIfRequired()
}
private[spark] override def stopExecutorDelegationTokenRenewer(): Unit = {
tokenRenewer.foreach(_.stop())
}
private[spark] def getContainerId: ContainerId = {
val containerIdString = System.getenv(ApplicationConstants.Environment.CONTAINER_ID.name())
ConverterUtils.toContainerId(containerIdString)
}
/**
* Obtains token for the Hive metastore, using the current user as the principal.
* Some exceptions are caught and downgraded to a log message.
* @param conf hadoop configuration; the Hive configuration will be based on this
* @return a token, or `None` if there's no need for a token (no metastore URI or principal
* in the config), or if a binding exception was caught and downgraded.
*/
def obtainTokenForHiveMetastore(conf: Configuration): Option[Token[DelegationTokenIdentifier]] = {
try {
obtainTokenForHiveMetastoreInner(conf, UserGroupInformation.getCurrentUser().getUserName)
} catch {
case e: ClassNotFoundException =>
logInfo(s"Hive class not found $e")
logDebug("Hive class not found", e)
None
}
}
/**
* Inner routine to obtains token for the Hive metastore; exceptions are raised on any problem.
* @param conf hadoop configuration; the Hive configuration will be based on this.
* @param username the username of the principal requesting the delegating token.
* @return a delegation token
*/
private[yarn] def obtainTokenForHiveMetastoreInner(conf: Configuration,
username: String): Option[Token[DelegationTokenIdentifier]] = {
val mirror = universe.runtimeMirror(Utils.getContextOrSparkClassLoader)
// the hive configuration class is a subclass of Hadoop Configuration, so can be cast down
// to a Configuration and used without reflection
val hiveConfClass = mirror.classLoader.loadClass("org.apache.hadoop.hive.conf.HiveConf")
// using the (Configuration, Class) constructor allows the current configuratin to be included
// in the hive config.
val ctor = hiveConfClass.getDeclaredConstructor(classOf[Configuration],
classOf[Object].getClass)
val hiveConf = ctor.newInstance(conf, hiveConfClass).asInstanceOf[Configuration]
val metastoreUri = hiveConf.getTrimmed("hive.metastore.uris", "")
// Check for local metastore
if (metastoreUri.nonEmpty) {
require(username.nonEmpty, "Username undefined")
val principalKey = "hive.metastore.kerberos.principal"
val principal = hiveConf.getTrimmed(principalKey, "")
require(principal.nonEmpty, "Hive principal $principalKey undefined")
logDebug(s"Getting Hive delegation token for $username against $principal at $metastoreUri")
val hiveClass = mirror.classLoader.loadClass("org.apache.hadoop.hive.ql.metadata.Hive")
val closeCurrent = hiveClass.getMethod("closeCurrent")
try {
// get all the instance methods before invoking any
val getDelegationToken = hiveClass.getMethod("getDelegationToken",
classOf[String], classOf[String])
val getHive = hiveClass.getMethod("get", hiveConfClass)
// invoke
val hive = getHive.invoke(null, hiveConf)
val tokenStr = getDelegationToken.invoke(hive, username, principal).asInstanceOf[String]
val hive2Token = new Token[DelegationTokenIdentifier]()
hive2Token.decodeFromUrlString(tokenStr)
Some(hive2Token)
} finally {
Utils.tryLogNonFatalError {
closeCurrent.invoke(null)
}
}
} else {
logDebug("HiveMetaStore configured in localmode")
None
}
}
}
object YarnSparkHadoopUtil {
// Additional memory overhead
// 10% was arrived at experimentally. In the interest of minimizing memory waste while covering
// the common cases. Memory overhead tends to grow with container size.
val MEMORY_OVERHEAD_FACTOR = 0.10
val MEMORY_OVERHEAD_MIN = 384
val ANY_HOST = "*"
val DEFAULT_NUMBER_EXECUTORS = 2
// All RM requests are issued with same priority : we do not (yet) have any distinction between
// request types (like map/reduce in hadoop for example)
val RM_REQUEST_PRIORITY = Priority.newInstance(1)
def get: YarnSparkHadoopUtil = {
val yarnMode = java.lang.Boolean.valueOf(
System.getProperty("SPARK_YARN_MODE", System.getenv("SPARK_YARN_MODE")))
if (!yarnMode) {
throw new SparkException("YarnSparkHadoopUtil is not available in non-YARN mode!")
}
SparkHadoopUtil.get.asInstanceOf[YarnSparkHadoopUtil]
}
/**
* Add a path variable to the given environment map.
* If the map already contains this key, append the value to the existing value instead.
*/
def addPathToEnvironment(env: HashMap[String, String], key: String, value: String): Unit = {
val newValue = if (env.contains(key)) { env(key) + getClassPathSeparator + value } else value
env.put(key, newValue)
}
/**
* Set zero or more environment variables specified by the given input string.
* The input string is expected to take the form "KEY1=VAL1,KEY2=VAL2,KEY3=VAL3".
*/
def setEnvFromInputString(env: HashMap[String, String], inputString: String): Unit = {
if (inputString != null && inputString.length() > 0) {
val childEnvs = inputString.split(",")
val p = Pattern.compile(environmentVariableRegex)
for (cEnv <- childEnvs) {
val parts = cEnv.split("=") // split on '='
val m = p.matcher(parts(1))
val sb = new StringBuffer
while (m.find()) {
val variable = m.group(1)
var replace = ""
if (env.get(variable) != None) {
replace = env.get(variable).get
} else {
// if this key is not configured for the child .. get it from the env
replace = System.getenv(variable)
if (replace == null) {
// the env key is note present anywhere .. simply set it
replace = ""
}
}
m.appendReplacement(sb, Matcher.quoteReplacement(replace))
}
m.appendTail(sb)
// This treats the environment variable as path variable delimited by `File.pathSeparator`
// This is kept for backward compatibility and consistency with Hadoop's behavior
addPathToEnvironment(env, parts(0), sb.toString)
}
}
}
private val environmentVariableRegex: String = {
if (Utils.isWindows) {
"%([A-Za-z_][A-Za-z0-9_]*?)%"
} else {
"\\$([A-Za-z_][A-Za-z0-9_]*)"
}
}
/**
* The handler if an OOM Exception is thrown by the JVM must be configured on Windows
* differently: the 'taskkill' command should be used, whereas Unix-based systems use 'kill'.
*
* As the JVM interprets both %p and %%p as the same, we can use either of them. However,
* some tests on Windows computers suggest, that the JVM only accepts '%%p'.
*
* Furthermore, the behavior of the character '%' on the Windows command line differs from
* the behavior of '%' in a .cmd file: it gets interpreted as an incomplete environment
* variable. Windows .cmd files escape a '%' by '%%'. Thus, the correct way of writing
* '%%p' in an escaped way is '%%%%p'.
*
* @return The correct OOM Error handler JVM option, platform dependent.
*/
def getOutOfMemoryErrorArgument : String = {
if (Utils.isWindows) {
escapeForShell("-XX:OnOutOfMemoryError=taskkill /F /PID %%%%p")
} else {
"-XX:OnOutOfMemoryError='kill %p'"
}
}
/**
* Escapes a string for inclusion in a command line executed by Yarn. Yarn executes commands
* using either
*
* (Unix-based) `bash -c "command arg1 arg2"` and that means plain quoting doesn't really work.
* The argument is enclosed in single quotes and some key characters are escaped.
*
* (Windows-based) part of a .cmd file in which case windows escaping for each argument must be
* applied. Windows is quite lenient, however it is usually Java that causes trouble, needing to
* distinguish between arguments starting with '-' and class names. If arguments are surrounded
* by ' java takes the following string as is, hence an argument is mistakenly taken as a class
* name which happens to start with a '-'. The way to avoid this, is to surround nothing with
* a ', but instead with a ".
*
* @param arg A single argument.
* @return Argument quoted for execution via Yarn's generated shell script.
*/
def escapeForShell(arg: String): String = {
if (arg != null) {
if (Utils.isWindows) {
YarnCommandBuilderUtils.quoteForBatchScript(arg)
} else {
val escaped = new StringBuilder("'")
for (i <- 0 to arg.length() - 1) {
arg.charAt(i) match {
case '$' => escaped.append("\\$")
case '"' => escaped.append("\\\"")
case '\'' => escaped.append("'\\''")
case c => escaped.append(c)
}
}
escaped.append("'").toString()
}
} else {
arg
}
}
def getApplicationAclsForYarn(securityMgr: SecurityManager)
: Map[ApplicationAccessType, String] = {
Map[ApplicationAccessType, String] (
ApplicationAccessType.VIEW_APP -> securityMgr.getViewAcls,
ApplicationAccessType.MODIFY_APP -> securityMgr.getModifyAcls
)
}
/**
* Expand environment variable using Yarn API.
* If environment.$$() is implemented, return the result of it.
* Otherwise, return the result of environment.$()
* Note: $$() is added in Hadoop 2.4.
*/
private lazy val expandMethod =
Try(classOf[Environment].getMethod("$$"))
.getOrElse(classOf[Environment].getMethod("$"))
def expandEnvironment(environment: Environment): String =
expandMethod.invoke(environment).asInstanceOf[String]
/**
* Get class path separator using Yarn API.
* If ApplicationConstants.CLASS_PATH_SEPARATOR is implemented, return it.
* Otherwise, return File.pathSeparator
* Note: CLASS_PATH_SEPARATOR is added in Hadoop 2.4.
*/
private lazy val classPathSeparatorField =
Try(classOf[ApplicationConstants].getField("CLASS_PATH_SEPARATOR"))
.getOrElse(classOf[File].getField("pathSeparator"))
def getClassPathSeparator(): String = {
classPathSeparatorField.get(null).asInstanceOf[String]
}
/**
* Getting the initial target number of executors depends on whether dynamic allocation is
* enabled.
* If not using dynamic allocation it gets the number of executors reqeusted by the user.
*/
def getInitialTargetExecutorNumber(
conf: SparkConf,
numExecutors: Int = DEFAULT_NUMBER_EXECUTORS): Int = {
if (Utils.isDynamicAllocationEnabled(conf)) {
val minNumExecutors = conf.getInt("spark.dynamicAllocation.minExecutors", 0)
val initialNumExecutors =
conf.getInt("spark.dynamicAllocation.initialExecutors", minNumExecutors)
val maxNumExecutors = conf.getInt("spark.dynamicAllocation.maxExecutors", Int.MaxValue)
require(initialNumExecutors >= minNumExecutors && initialNumExecutors <= maxNumExecutors,
s"initial executor number $initialNumExecutors must between min executor number" +
s"$minNumExecutors and max executor number $maxNumExecutors")
initialNumExecutors
} else {
val targetNumExecutors =
sys.env.get("SPARK_EXECUTOR_INSTANCES").map(_.toInt).getOrElse(numExecutors)
// System property can override environment variable.
conf.getInt("spark.executor.instances", targetNumExecutors)
}
}
}
| chenc10/Spark-PAF | yarn/src/main/scala/org/apache/spark/deploy/yarn/YarnSparkHadoopUtil.scala | Scala | apache-2.0 | 16,823 |
package io.buoyant.k8s
import com.fasterxml.jackson.annotation.{JsonProperty, JsonSubTypes, JsonTypeInfo}
import com.fasterxml.jackson.core.`type`.TypeReference
import com.twitter.finagle.Service
import com.twitter.finagle.http.{Request, Response}
import com.twitter.io.Buf
import com.twitter.util.Future
import io.buoyant.k8s.{KubeObject => BaseObject}
import io.buoyant.test.Awaits
import org.scalatest.{FunSuite, OptionValues}
/**
* This test both exercises the versioning functionality in `io.buoyant.k8s`, and provides
* an example implementation. (See `namerd` for the current usage in the Buoyant suite of projects).
*/
class CustomResourceTest extends FunSuite with Awaits with OptionValues {
import CustomResourceTest._
object Fixtures {
val bookList = Buf.Utf8("""{"kind":"BookList","apiVersion":"buoyant.io/vTest","metadata":{"selfLink":"/apis/buoyant.io/vTest/namespaces/test/books"},"items":[{"metadata":{"name":"programming-in-scala","selfLink":"/apis/buoyant.io/vTest/namespaces/test/books/programming-in-scala"},"title":"Programming in Scala","author":"Martin Odersky"}]} """)
}
test("namespaced: get books list") {
import Fixtures._
val service = Service.mk[Request, Response] { req =>
assert(req.uri == s"/apis/buoyant.io/vTest/namespaces/test/books")
val rsp = Response()
rsp.version = req.version
rsp.setContentTypeJson()
rsp.headerMap("Transfer-Encoding") = "chunked"
rsp.writer.write(bookList) before rsp.writer.close()
Future.value(rsp)
}
val ns = Api(service).withNamespace("test")
val books = await(ns.books.get()).value
assert(books.items.length == 1)
val book = books.items.head
assert(book.title == "Programming in Scala")
}
}
object CustomResourceTest {
trait Object extends BaseObject
case class Book(
title: String,
author: String,
metadata: Option[ObjectMeta],
apiVersion: Option[String],
kind: Option[String]
) extends Object
implicit object BookDescriptor extends ObjectDescriptor[Book, BookWatch] {
def listName = "books"
def toWatch(o: Book) = BookWatch.Modified(o)
}
case class BookList(
@JsonProperty("items") items: Seq[Book],
kind: Option[String] = None,
metadata: Option[ObjectMeta] = None,
apiVersion: Option[String] = None
) extends KubeList[Book]
@JsonTypeInfo(use = JsonTypeInfo.Id.NAME, include = JsonTypeInfo.As.PROPERTY, property = "type")
@JsonSubTypes(Array(
new JsonSubTypes.Type(value = classOf[BookWatch.Added], name = "ADDED"),
new JsonSubTypes.Type(value = classOf[BookWatch.Modified], name = "MODIFIED"),
new JsonSubTypes.Type(value = classOf[BookWatch.Deleted], name = "DELETED"),
new JsonSubTypes.Type(value = classOf[BookWatch.Error], name = "ERROR")
))
sealed trait BookWatch extends Watch[Book]
object BookWatch {
case class Added(`object`: Book) extends BookWatch with Watch.Added[Book]
case class Modified(`object`: Book) extends BookWatch with Watch.Modified[Book]
case class Deleted(`object`: Book) extends BookWatch with Watch.Deleted[Book]
case class Error(
@JsonProperty(value = "object") status: Status
) extends BookWatch with Watch.Error[Book]
}
implicit private val bookTypeRef = new TypeReference[Book] {}
implicit private val bookListTypeRef = new TypeReference[BookList] {}
implicit private val bookWatchTypeRef = new TypeReference[BookWatch] {}
implicit private val booksWatchIsOrdered =
new ResourceVersionOrdering[Book, BookWatch]
case class Api(client: Client) extends CustomResourceVersion[Object] {
def owner = "buoyant.io"
def ownerVersion = "vTest"
override def withNamespace(ns: String) = new NsApi(client, ns)
def books = listResource[Book, BookWatch, BookList]()
}
class NsApi(client: Client, ns: String) extends NsCustomResourceVersion[Object](client, "buoyant.io", "vTest", ns) {
def books = listResource[Book, BookWatch, BookList]()
}
}
| linkerd/linkerd | k8s/src/test/scala/io/buoyant/k8s/CustomResourceTest.scala | Scala | apache-2.0 | 3,991 |
package com.mnubo.dbevolv.plugin
import com.typesafe.config.ConfigFactory
import sbt.Attributed._
import sbt.Keys._
import sbt._
import sbtassembly.AssemblyPlugin
import sbtassembly.AssemblyPlugin.autoImport._
import sbtdocker.DockerKeys._
import sbtdocker.Instructions._
import sbtdocker.immutable.Dockerfile
import sbtdocker.staging.CopyFile
import sbtdocker.{DockerPlugin, ImageName, Instruction}
import sbtrelease.ReleasePlugin.autoImport._
import sbtrelease.ReleaseStateTransformations._
import sbtrelease._
import scala.io.Source
object DbevolvPlugin extends AutoPlugin {
private val config = ConfigFactory.parseFile(new File("db.conf"))
private val schemaName = config.getString("schema_name")
private val dockerNamespace = if (config.hasPath("docker_namespace")) Some(config.getString("docker_namespace")) else None
private val dbevolvVersion = Source.fromInputStream(getClass.getResourceAsStream("/version.txt")).getLines().mkString
private val dbDependencies = Map(
"cassandra" -> Seq("com.datastax.cassandra" % "cassandra-driver-core" % "3.0.0"),
"elasticsearch" -> Seq("com.mnubo" %% "dbevolv-elasticsearch" % dbevolvVersion),
"elasticsearch2" -> Seq("com.mnubo" %% "dbevolv-elasticsearch2" % dbevolvVersion),
"mysql" -> Seq("mysql" % "mysql-connector-java" % "5.1.35")
)
override def requires = DockerPlugin && AssemblyPlugin && ReleasePlugin
object autoImport {
val buildTestContainer = taskKey[Unit]("Build test database container, and test migrations along the way.")
val buildAndPushTestContainer = taskKey[Unit]("Build test database container, and then push it.")
}
import autoImport._
override lazy val projectSettings: Seq[Setting[_]] = Seq(
// Avoid the user to give a name to the SBT project: use the schema name defined in the config.
name := schemaName,
// Specify what is the main class to run in the fat jar
mainClass in assembly := Some("com.mnubo.dbevolv.Dbevolv"),
// We just need the dbevolv library to build a schema. We automatically infer the version to use.
libraryDependencies ++= Seq(
"com.mnubo" %% "dbevolv" % dbevolvVersion excludeAll (
ExclusionRule("org.joda", "joda-convert"),
ExclusionRule("org.slf4j", "slf4j-log4j12"),
ExclusionRule("com.sun.jmx", "jmxri"),
ExclusionRule("com.sun.jdmk", "jmxtools")
)
) ++ dbDependencies(config.getString("database_kind")),
assemblyMergeStrategy in assembly := {
case "org/joda/time/base/BaseDateTime.class" => MergeStrategy.first
case "META-INF/io.netty.versions.properties" => MergeStrategy.first
case x => (assemblyMergeStrategy in assembly).value(x)
},
// Give the fat jar a simple name
assemblyJarName := s"$schemaName-schema-manager.jar",
buildTestContainer <<= buildTestContainerTask(doPush = false),
buildAndPushTestContainer <<= buildTestContainerTask(doPush = true),
dockerBuildAndPush <<= (dockerBuildAndPush dependsOn buildAndPushTestContainer),
dockerfile in docker := {
val artifact = (assembly in assembly).value
val artifactTargetPath = s"/app/${artifact.name}"
val base = Seq[Instruction](
From("mnubo/jre8:u91"),
Add(CopyFile(artifact), artifactTargetPath),
Add(CopyFile(new File("db.conf")), "/app/db.conf"),
Add(CopyFile(new File("migrations")), "/app/migrations/"),
WorkDir("/app"),
EntryPoint.exec(Seq("java", "-jar", artifactTargetPath))
)
Dockerfile(
if (new File("src").exists())
base :+ Add(CopyFile(new File("src")), "/app/src/")
else
base
)
},
imageNames in docker := Seq(
ImageName(
namespace = dockerNamespace,
repository = name.value + "-mgr",
tag = Some(version.value)
),
ImageName(
namespace = dockerNamespace,
repository = name.value + "-mgr",
tag = Some("latest")
)
),
// Auto increment the version every time we run the build in Jenkins by using the sbt-release plugin.
releasePublishArtifactsAction := {
dockerBuildAndPush.value
// Clean ourselves
streams.value.log.info(s"Cleaning images...")
(imageNames in docker).value.foreach(img => s"docker rmi -f $img".!)
streams.value.log.info(s"Images cleaned.")
},
releaseVersion := identity, // The current version is already the good one
releaseNextVersion := { (ver: String) => sbtrelease.Version(ver).map(_.bumpBugfix.string).getOrElse(versionFormatError) }, // Don't 'snapshot' the version
// Don't need to commit the release version, since it is already the good one.
releaseProcess := Seq[ReleaseStep](
setupRemoteTracking,
inquireVersions,
runTest,
setReleaseVersion,
dockerOnlyPublishArtifacts,
tagRelease,
setNextVersion,
commitNextVersion,
pushChanges
)
)
import Utilities._
private lazy val dockerOnlyPublishArtifacts = ReleaseStep(
action = { st: State =>
val extracted = st.extract
val ref = extracted.get(thisProjectRef)
extracted.runAggregated(releasePublishArtifactsAction in Global in ref, st)
},
enableCrossBuild = true
)
private lazy val setupRemoteTracking: ReleaseStep = ReleaseStep(
action = identity,
check = { st: State =>
val cmd = "git checkout -t -B master origin/master"
st.log.info(cmd)
cmd.!
st
},
enableCrossBuild = true
)
private def buildTestContainerTask(doPush: Boolean) = Def.task[Unit] {
streams.value.log.info(s"Building a test container. dbevolv version: $dbevolvVersion")
val cp = (fullClasspath in Compile).value
val args =
if (doPush)
Seq("push")
else
Seq()
val scalaRun = (runner in run).value
sbt.Defaults.toError(scalaRun.run(
"com.mnubo.dbevolv.TestDatabaseBuilder",
data(cp),
args,
streams.value.log
))
}
}
| mnubo/dbevolv | plugin/src/main/scala/com/mnubo/dbevolv/plugin/DbevolvPlugin.scala | Scala | apache-2.0 | 6,233 |
/*
* Copyright 2014 Ralf Steuerwald
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ifmaps
import org.scalatest.FunSuite
import ifmaps._
import org.w3c.dom.Document
import scala.collection.JavaConversions._
import ifmaps.transformation.Extractor.ResultItemExtractor
import ifmaps.transformation.Converter._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ResultItemExtractorTest extends FunSuite {
class ResultItemMock extends JResultItem {
override def getIdentifier1(): JIdentifier = null
override def getIdentifier2(): JIdentifier = null
override def setIdentifier1(id: JIdentifier) = ()
override def setIdentifier2(id: JIdentifier) = ()
override def getIdentifier() = Array[JIdentifier](null, null)
override def holdsLink() = false
override def addMetadata(md: Document) = ()
override def getMetadata() = List.empty[Document]
}
test("ifmapj result item with both identifier set to null") {
val jResultItem = new ResultItemMock
val resultItem = jResultItem match {
case ResultItemExtractor(result) => Some(result)
case _ => None
}
assert(resultItem === None)
}
test("ifmapj result item with first identifier set to null") {
val jResultItem = new ResultItemMock {
val d = Device("42").toIfmapj
override def getIdentifier2() = d
override def getIdentifier() = Array(null, d)
}
val resultItem = jResultItem match {
case ResultItemExtractor(result) => Some(result)
case _ => None
}
assert(resultItem.isDefined)
assert(resultItem.get.id1.isInstanceOf[Device])
}
test("ifmapj result item with second identifier set to null") {
val jResultItem = new ResultItemMock {
val d = Device("42").toIfmapj
override def getIdentifier1() = d
override def getIdentifier() = Array(d, null)
}
val resultItem = jResultItem match {
case ResultItemExtractor(result) => Some(result)
case _ => None
}
assert(resultItem.isDefined)
assert(resultItem.get.id1.isInstanceOf[Device])
}
test("ifmapj result with non null identifier") {
val jResultItem = new ResultItemMock {
val d = Device("42").toIfmapj
val i = IpAddress("10.1.1.1").toIfmapj
override def getIdentifier1() = d
override def getIdentifier2() = i
override def getIdentifier() = Array(d, i)
}
val resultItem = jResultItem match {
case ResultItemExtractor(result) => Some(result)
case _ => None
}
assert(resultItem.isDefined)
assert(resultItem.get.id1.isInstanceOf[Device])
assert(resultItem.get.id2.isDefined)
assert(resultItem.get.id2.get.isInstanceOf[IpAddress])
}
} | rsteuerw/ifmaps | src/test/scala/ifmaps/ResultItemExtractorTest.scala | Scala | apache-2.0 | 3,276 |
package com.twitter.finagle.service
import com.twitter.conversions.time._
import com.twitter.finagle.{ChannelClosedException, Failure, TimeoutException, WriteException}
import com.twitter.util.{
TimeoutException => UtilTimeoutException, Duration, JavaSingleton, Return, Throw, Try}
import java.util.{concurrent => juc}
import java.{util => ju}
import scala.collection.JavaConverters._
/**
* A function defining retry behavior for a given value type `A`.
*
* The [[Function1]] returns [[None]] if no more retries should be made
* and [[Some]] if another retry should happen. The returned `Some` has
* a [[Duration]] field for how long to wait for the next retry as well
* as the next `RetryPolicy` to use.
*
* @see [[SimpleRetryPolicy]] for a Java friendly API.
*/
abstract class RetryPolicy[-A] extends (A => Option[(Duration, RetryPolicy[A])]) {
/**
* Creates a new `RetryPolicy` based on the current `RetryPolicy` in which values of `A`
* are first checked against a predicate function, and only if the predicate returns true
* will the value be passed on to the current `RetryPolicy`.
*
* The predicate function need not be a pure function, but can change its behavior over
* time. For example, the predicate function's decision can be based upon backpressure
* signals supplied by things like failure rates or latency, which allows `RetryPolicy`s
* to dynamically reduce the number of retries in response to backpressure.
*
* The predicate function is only called on the first failure in a chain. Any additional
* chained RetryPolicies returned by the current policy will then see additional failures
* unfiltered. Contrast this will `filterEach`, which applies the filter to each `RetryPolicy`
* in the chain.
*/
def filter[B <: A](pred: B => Boolean): RetryPolicy[B] =
RetryPolicy { e =>
if (!pred(e)) None else this(e)
}
/**
* Similar to `filter`, but the predicate is applied to each `RetryPolicy` in the chain
* returned by the current RetryPolicy. For example, if the current `RetryPolicy` returns
* `Some((D, P'))` for value `E` (of type `A`), and the given predicate returns true for `E`,
* then the value returned from the filtering `RetryPolicy` will be `Some((D, P''))` where
* `P''` is equal to `P'.filterEach(pred)`.
*
* One example where this is useful is to dynamically and fractionally allow retries based
* upon backpressure signals. If, for example, the predicate function returned true or false
* based upon a probability distribution computed from a backpressure signal, it could return
* true 50% of the time, giving you a 50% chance of performing a single retry, a 25% chance of
* performing 2 retries, 12.5% chance of performing 3 retries, etc. This might be more
* desirable than just using `filter` where you end up with a 50% chance of no retries and
* 50% chance of the full number of retries.
*/
def filterEach[B <: A](pred: B => Boolean): RetryPolicy[B] =
RetryPolicy { e =>
if (!pred(e))
None
else {
this(e).map {
case (backoff, p2) => (backoff, p2.filterEach(pred))
}
}
}
/**
* Applies a dynamically chosen retry limit to an existing `RetryPolicy` that may allow for
* more retries. When the returned `RetryPolicy` is first invoked, it will call the `maxRetries`
* by-name parameter to get the current maximum retries allowed. Regardless of the number
* of retries that the underlying policy would allow, it is capped to be no greater than the
* number returned by `maxRetries` on the first failure in the chain.
*
* Using a dynamically chosen retry limit allows for the retry count to be tuned at runtime
* based upon backpressure signals such as failure rate or request latency.
*/
def limit(maxRetries: => Int): RetryPolicy[A] =
RetryPolicy[A] { e =>
val triesRemaining = maxRetries
if (triesRemaining <= 0)
None
else {
this(e).map {
case (backoff, p2) => (backoff, p2.limit(triesRemaining - 1))
}
}
}
}
/**
* A retry policy abstract class. This is convenient to use for Java programmers. Simply implement
* the two abstract methods `shouldRetry` and `backoffAt` and you're good to go!
*/
abstract class SimpleRetryPolicy[A](i: Int) extends RetryPolicy[A]
with (A => Option[(Duration, RetryPolicy[A])])
{
def this() = this(0)
final def apply(e: A) = {
if (shouldRetry(e)) {
backoffAt(i) match {
case Duration.Top =>
None
case howlong =>
Some((howlong, new SimpleRetryPolicy[A](i + 1) {
def shouldRetry(a: A) = SimpleRetryPolicy.this.shouldRetry(a)
def backoffAt(retry: Int) = SimpleRetryPolicy.this.backoffAt(retry)
}))
}
} else {
None
}
}
override def andThen[B](that: Option[(Duration, RetryPolicy[A])] => B): A => B =
that.compose(this)
override def compose[B](that: B => A): B => Option[(Duration, RetryPolicy[A])] =
that.andThen(this)
/**
* Given a value, decide whether it is retryable. Typically the value is an exception.
*/
def shouldRetry(a: A): Boolean
/**
* Given a number of retries, return how long to wait till the next retry. Note that this is
* zero-indexed. To implement a finite number of retries, implement a method like:
* `if (i > 3) return never`
*/
def backoffAt(retry: Int): Duration
/**
* A convenience method to access Duration.Top from Java. This is a sentinel value that
* signals no-further-retries.
*/
final val never = Duration.Top
}
object RetryPolicy extends JavaSingleton {
object RetryableWriteException {
def unapply(thr: Throwable): Option[Throwable] = thr match {
// We don't retry interruptions by default since they
// indicate that the request was discarded.
case f: Failure if f.isFlagged(Failure.Interrupted) => None
case f: Failure if f.isFlagged(Failure.Restartable) => Some(f.show)
case WriteException(exc) => Some(exc)
case _ => None
}
}
/**
* Failures that are generally retryable because the request failed
* before it finished being written to the remote service.
* See [[com.twitter.finagle.WriteException]].
*/
val WriteExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(RetryableWriteException(_)) => true
}
val TimeoutAndWriteExceptionsOnly: PartialFunction[Try[Nothing], Boolean] =
WriteExceptionsOnly.orElse {
case Throw(Failure(Some(_: TimeoutException))) => true
case Throw(Failure(Some(_: UtilTimeoutException))) => true
case Throw(_: TimeoutException) => true
case Throw(_: UtilTimeoutException) => true
}
val ChannelClosedExceptionsOnly: PartialFunction[Try[Nothing], Boolean] = {
case Throw(_: ChannelClosedException) => true
}
val Never: RetryPolicy[Try[Nothing]] = new RetryPolicy[Try[Nothing]] {
def apply(t: Try[Nothing]): Option[(Duration, Nothing)] = None
}
/**
* Converts a `RetryPolicy[Try[Nothing]]` to a `RetryPolicy[(Req, Try[Rep])]`
* that acts only on exceptions.
*/
private[finagle] def convertExceptionPolicy[Req, Rep](
policy: RetryPolicy[Try[Nothing]]
): RetryPolicy[(Req, Try[Rep])] =
new RetryPolicy[(Req, Try[Rep])] {
def apply(input: (Req, Try[Rep])): Option[(Duration, RetryPolicy[(Req, Try[Rep])])] = input match {
case (_, t@Throw(_)) =>
policy(t.asInstanceOf[Throw[Nothing]]) match {
case Some((howlong, nextPolicy)) => Some((howlong, convertExceptionPolicy(nextPolicy)))
case None => None
}
case (_, Return(_)) => None
}
}
/**
* Lifts a function of type `A => Option[(Duration, RetryPolicy[A])]` in the `RetryPolicy` type.
*/
def apply[A](f: A => Option[(Duration, RetryPolicy[A])]): RetryPolicy[A] =
new RetryPolicy[A] {
def apply(e: A): Option[(Duration, RetryPolicy[A])] = f(e)
}
/**
* Try up to a specific number of times, based on the supplied `PartialFunction[A, Boolean]`.
* A value of type `A` is considered retryable if and only if the PartialFunction
* is defined at and returns true for that value.
*
* The returned policy has jittered backoffs between retries.
*
* @param numTries the maximum number of attempts (including retries) that can be made.
* A value of `1` means one attempt and no retries on failure.
* A value of `2` means one attempt and then a single retry if the failure meets the
* criteria of `shouldRetry`.
* @param shouldRetry which `A`-typed values are considered retryable.
*/
def tries[A](
numTries: Int,
shouldRetry: PartialFunction[A, Boolean]
): RetryPolicy[A] = {
val backoffs = Backoff.decorrelatedJittered(5.millis, 200.millis)
backoff[A](backoffs.take(numTries - 1))(shouldRetry)
}
/**
* Try up to a specific number of times of times on failures that are
* [[com.twitter.finagle.service.RetryPolicy.WriteExceptionsOnly]].
*
* The returned policy has jittered backoffs between retries.
*
* @param numTries the maximum number of attempts (including retries) that can be made.
* A value of `1` means one attempt and no retries on failure.
* A value of `2` means one attempt and then a single retry if the failure meets the
* criteria of [[com.twitter.finagle.service.RetryPolicy.WriteExceptionsOnly]].
*/
def tries(numTries: Int): RetryPolicy[Try[Nothing]] = tries(numTries, WriteExceptionsOnly)
private[this] val AlwaysFalse = Function.const(false) _
/**
* Retry based on a series of backoffs defined by a `Stream[Duration]`. The
* stream is consulted to determine the duration after which a request is to
* be retried. A `PartialFunction` argument determines which request types
* are retryable.
*
* @see [[backoffJava]] for a Java friendly API.
*/
def backoff[A](
backoffs: Stream[Duration]
)(shouldRetry: PartialFunction[A, Boolean]): RetryPolicy[A] = {
RetryPolicy { e =>
if (shouldRetry.applyOrElse(e, AlwaysFalse)) {
backoffs match {
case howlong #:: rest =>
Some((howlong, backoff(rest)(shouldRetry)))
case _ =>
None
}
} else {
None
}
}
}
/**
* A version of [[backoff]] usable from Java.
*
* @param backoffs can be created via [[Backoff.toJava]].
*/
def backoffJava[A](
backoffs: juc.Callable[ju.Iterator[Duration]],
shouldRetry: PartialFunction[A, Boolean]
): RetryPolicy[A] = {
backoff[A](backoffs.call().asScala.toStream)(shouldRetry)
}
/**
* Combines multiple `RetryPolicy`s into a single combined `RetryPolicy`, with interleaved
* backoffs. For a given value of `A`, each policy in `policies` is tried in order. If all
* policies return `None`, then the combined `RetryPolicy` returns `None`. If policy `P` returns
* `Some((D, P'))`, then the combined `RetryPolicy` returns `Some((D, P''))`, where `P''` is a
* new combined `RetryPolicy` with the same sub-policies, with the exception of `P` replaced by
* `P'`.
*
* The ordering of policies matters: earlier policies get a chance to handle the failure
* before later policies; a catch-all policy, if any, should be last.
*
* As an example, let's say you combine two `RetryPolicy`s, `R1` and `R2`, where `R1` handles
* only exception `E1` with a backoff of `(10.milliseconds, 20.milliseconds, 30.milliseconds)`,
* while `R2` handles only exception `E2` with a backoff of `(15.milliseconds, 25.milliseconds)`.
*
* If a sequence of exceptions, `(E2, E1, E1, E2)`, is fed in order to the combined retry policy,
* the backoffs seen will be `(15.milliseconds, 10.milliseconds, 20.milliseconds,
* 25.milliseconds)`.
*
* The maximum number of retries the combined policy could allow under the worst case scenario
* of exceptions is equal to the sum of the individual maximum retries of each subpolicy. To
* put a cap on the combined maximum number of retries, you can call `limit` on the combined
* policy with a smaller cap.
*/
def combine[A](policies: RetryPolicy[A]*): RetryPolicy[A] =
RetryPolicy[A] { e =>
// stores the first matched backoff
var backoffOpt: Option[Duration] = None
val policies2 =
policies.map { p =>
if (backoffOpt.nonEmpty)
p
else {
p(e) match {
case None => p
case Some((backoff, p2)) =>
backoffOpt = Some(backoff)
p2
}
}
}
backoffOpt match {
case None => None
case Some(backoff) => Some((backoff, combine(policies2: _*)))
}
}
}
| sveinnfannar/finagle | finagle-core/src/main/scala/com/twitter/finagle/service/RetryPolicy.scala | Scala | apache-2.0 | 12,850 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming.continuous
import java.util.concurrent.{ArrayBlockingQueue, BlockingQueue}
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import org.apache.spark.{SparkEnv, TaskContext}
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.execution.streaming.continuous._
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousPartitionReader, ContinuousStream, PartitionOffset}
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWrite
import org.apache.spark.sql.streaming.StreamTest
import org.apache.spark.sql.types.{DataType, IntegerType, StructType}
class ContinuousQueuedDataReaderSuite extends StreamTest with MockitoSugar {
case class LongPartitionOffset(offset: Long) extends PartitionOffset
val coordinatorId = s"${getClass.getSimpleName}-epochCoordinatorIdForUnitTest"
val startEpoch = 0
var epochEndpoint: RpcEndpointRef = _
override def beforeEach(): Unit = {
super.beforeEach()
epochEndpoint = EpochCoordinatorRef.create(
mock[StreamingWrite],
mock[ContinuousStream],
mock[ContinuousExecution],
coordinatorId,
startEpoch,
spark,
SparkEnv.get)
EpochTracker.initializeCurrentEpoch(0)
}
override def afterEach(): Unit = {
SparkEnv.get.rpcEnv.stop(epochEndpoint)
epochEndpoint = null
super.afterEach()
}
private val mockContext = mock[TaskContext]
when(mockContext.getLocalProperty(ContinuousExecution.START_EPOCH_KEY))
.thenReturn(startEpoch.toString)
when(mockContext.getLocalProperty(ContinuousExecution.EPOCH_COORDINATOR_ID_KEY))
.thenReturn(coordinatorId)
/**
* Set up a ContinuousQueuedDataReader for testing. The blocking queue can be used to send
* rows to the wrapped data reader.
*/
private def setup(): (BlockingQueue[UnsafeRow], ContinuousQueuedDataReader) = {
val queue = new ArrayBlockingQueue[UnsafeRow](1024)
val partitionReader = new ContinuousPartitionReader[InternalRow] {
var index = -1
var curr: UnsafeRow = _
override def next() = {
curr = queue.take()
index += 1
true
}
override def get = curr
override def getOffset = LongPartitionOffset(index)
override def close() = {}
}
val reader = new ContinuousQueuedDataReader(
0,
partitionReader,
new StructType().add("i", "int"),
mockContext,
dataQueueSize = sqlContext.conf.continuousStreamingExecutorQueueSize,
epochPollIntervalMs = sqlContext.conf.continuousStreamingExecutorPollIntervalMs)
(queue, reader)
}
private def unsafeRow(value: Int) = {
UnsafeProjection.create(Array(IntegerType : DataType))(
new GenericInternalRow(Array(value: Any)))
}
test("basic data read") {
val (input, reader) = setup()
input.add(unsafeRow(12345))
assert(reader.next().getInt(0) == 12345)
}
test("basic epoch marker") {
val (input, reader) = setup()
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
assert(reader.next() == null)
}
test("new rows after markers") {
val (input, reader) = setup()
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
assert(reader.next() == null)
assert(reader.next() == null)
assert(reader.next() == null)
input.add(unsafeRow(11111))
input.add(unsafeRow(22222))
assert(reader.next().getInt(0) == 11111)
assert(reader.next().getInt(0) == 22222)
}
test("new markers after rows") {
val (input, reader) = setup()
input.add(unsafeRow(11111))
input.add(unsafeRow(22222))
assert(reader.next().getInt(0) == 11111)
assert(reader.next().getInt(0) == 22222)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
assert(reader.next() == null)
assert(reader.next() == null)
assert(reader.next() == null)
}
test("alternating markers and rows") {
val (input, reader) = setup()
input.add(unsafeRow(11111))
assert(reader.next().getInt(0) == 11111)
input.add(unsafeRow(22222))
assert(reader.next().getInt(0) == 22222)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
assert(reader.next() == null)
input.add(unsafeRow(33333))
assert(reader.next().getInt(0) == 33333)
input.add(unsafeRow(44444))
assert(reader.next().getInt(0) == 44444)
epochEndpoint.askSync[Long](IncrementAndGetEpoch)
assert(reader.next() == null)
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/continuous/ContinuousQueuedDataReaderSuite.scala | Scala | apache-2.0 | 5,589 |
/*
*************************************************************************************
* Copyright 2014 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.components
import com.normation.rudder.domain.policies._
import com.normation.rudder.repository.FullActiveTechniqueCategory
import com.normation.inventory.domain.NodeId
import com.normation.rudder.domain.nodes.NodeInfo
import scala.xml._
import net.liftweb.http._
import net.liftweb.common._
import com.normation.rudder.domain.reports._
import net.liftweb.util.Helpers._
import net.liftweb.util.Helpers
import net.liftweb.http.js.JsCmds._
import net.liftweb.http.js.JE._
import net.liftweb.http.js.JsCmd
import bootstrap.liftweb.RudderConfig
import com.normation.rudder.web.model.WBTextField
import com.normation.rudder.web.model.WBTextAreaField
import com.normation.rudder.web.model.WBSelectField
import com.normation.rudder.web.services.ComplianceData
import com.normation.rudder.rule.category.RuleCategory
import org.joda.time.DateTime
import org.joda.time.format.PeriodFormatterBuilder
import org.joda.time.Interval
import com.normation.rudder.web.services.ReportLine
import com.normation.rudder.web.services.ChangeLine
import com.normation.rudder.services.reports.NodeChanges
import net.liftweb.http.js.JsExp
import net.liftweb.http.js.JsObj
import com.normation.rudder.web.ChooseTemplate
object RuleCompliance {
private def details = ChooseTemplate(
"templates-hidden" :: "components" :: "ComponentRuleEditForm" :: Nil
, "component-details"
)
}
/**
* This component display the compliance of a Rule by showing compliance of every Directive
* It generates all Data and put them in a DataTable
*/
class RuleCompliance (
rule : Rule
, rootRuleCategory: RuleCategory
) extends Loggable {
private[this] val reportingService = RudderConfig.reportingService
private[this] val recentChangesService = RudderConfig.recentChangesService
private[this] val categoryService = RudderConfig.ruleCategoryService
private[this] val configService = RudderConfig.configService
//fresh value when refresh
private[this] val roRuleRepository = RudderConfig.roRuleRepository
private[this] val getFullDirectiveLib = RudderConfig.roDirectiveRepository.getFullDirectiveLibrary _
private[this] val getAllNodeInfos = RudderConfig.nodeInfoService.getAll _
import RuleCompliance._
def tagsEditForm = new TagsEditForm(rule.tags)
def display : NodeSeq = {
(
"#ruleName" #> rule.name &
"#ruleCategory" #> categoryService.shortFqdn(rootRuleCategory, rule.categoryId) &
"#tagField *" #> tagsEditForm.viewTags("viewRuleTags", "ruleViewTagsApp", true) &
"#rudderID" #> rule.id.value &
"#ruleShortDescription" #> rule.shortDescription &
"#ruleLongDescription" #> rule.longDescription &
"#compliancedetails" #> showCompliance
)(details)
}
/*
* For each table : the subtable is contained in td : details
* when + is clicked: it gets the content of td details then process it
* as a datatable
*
* The table are empty when the page is displayed, then there is a JS call
* to refresh() that fill them.
*/
def showCompliance : NodeSeq = {
<div id="directiveComplianceSection" class="unfoldedSection" onclick="$('#directiveCompliance').toggle(400); $('#directiveComplianceSection').toggleClass('foldedSection');$('#directiveComplianceSection').toggleClass('unfoldedSection');">
<div class="section-title">Compliance by Directive</div>
</div>
<div id="directiveCompliance">
<table id="reportsGrid" cellspacing="0"> </table>
</div>
<div id="nodeComplianceSection" class="unfoldedSection" onclick="$('#nodeCompliance').toggle(400); $('#nodeComplianceSection').toggleClass('foldedSection');$('#nodeComplianceSection').toggleClass('unfoldedSection');">
<div class="section-title">Compliance by Node</div>
</div>
<div id="nodeCompliance">
<table id="nodeReportsGrid" cellspacing="0"> </table>
</div>
<div id="recentChangesSection" class="unfoldedSection" onclick="$('#recentChanges').toggle(400); $('#recentChangesSection').toggleClass('foldedSection');$('#recentChangesSection').toggleClass('unfoldedSection');">
<div class="section-title">Recent changes</div>
</div>
<div id="recentChanges" class="tw-bs">
<div class="alert alert-info " style="font-size: 14px">
<div style="width:90%;display:inline-block;">
<span class="glyphicon glyphicon-info-sign"></span>
Details of changes for each period are displayed below the graph. Click to change the selected period.
</div>
<div class="recentChange_refresh">
{SHtml.ajaxButton(<img src='/images/icRefresh.png'/>, () => refresh() , ("class","recentChangeGraph refreshButton btn btn-default") , ("title","Refresh"))}
</div>
</div>
<div id="changesChart"> </div>
</div>
<hr class="spacer" />
<span >Changes during period <b id="selectedPeriod"> --- </b> (selected in graph above)</span>
<table id="changesGrid" cellspacing="0"> </table> ++
Script(After(0,JsRaw(s"""
function refresh() {${refresh().toJsCmd}};
createDirectiveTable(true, false, "${S.contextPath}")("reportsGrid",[],refresh);
createNodeComplianceTable("nodeReportsGrid",[],"${S.contextPath}", refresh);
createChangesTable("changesGrid",[],"${S.contextPath}", refresh);
refresh();
""")))
}
def refresh() = {
//we want to be able to see at least one if the other fails
SHtml.ajaxInvoke(() => refreshCompliance()) &
SHtml.ajaxInvoke(() => refreshGraphChanges()) &
SHtml.ajaxInvoke(() => refreshTableChanges(None))
}
def refreshGraphChanges() : JsCmd = {
try {
( for {
changesOnRule <- recentChangesService.countChangesByRuleByInterval().map( _.getOrElse(rule.id, Map()))
} yield {
JsRaw(s"""
var recentChanges = ${NodeChanges.json(changesOnRule, recentChangesService.getCurrentValidIntervals(None)).toJsCmd};
var data = recentChanges.y
data.splice(0,0,'Recent changes')
var x = recentChanges.x
x.splice(0,0,'x')
var selectedIndex = x.length-2;
//recentChart variable has to be global because we need it to refresh the graph clicking on compliance tab.
recentChart = c3.generate({
data: {
x: 'x'
, columns: [ x , data ]
, type: 'bar'
, onclick: function (d, element) {
selectedIndex = d.index;
${SHtml.ajaxCall(JsRaw("recentChanges.t[selectedIndex]"), s => refreshTableChanges(Some(s.toLong)))}
selectInterval(x[selectedIndex+1],element);
}
, onmouseover : function (element) {
changeCursor(element.value);
}
, onmouseout : function (element) {
changeCursor(element.value);
}
}
, legend : {
show : false
}
, bindto : '#changesChart'
, bar: {
width: {
ratio: 1 // this makes bar width 50% of length between ticks
}
}
, axis: {
x: {
type: 'categories'
}
}
, grid: {
x: { show: true }
, y: { show: true }
}
, onrendered: function () {
var element = document.getElementsByClassName('c3-bar-'+(selectedIndex).toString())[0];
selectInterval(x[selectedIndex+1],element);
}
} );
createTooltip();
""")
}) match {
case Full(cmd) => cmd
case eb:EmptyBox =>
val fail = eb ?~! "Could not refresh recent changes"
logger.error(fail.messageChain)
Noop
}
} catch {
case oom: OutOfMemoryError =>
val msg = "NodeChanges can not be retrieved du to OutOfMemory error. That mean that either your installation is missing " +
"RAM (see: http://www.rudder-project.org/doc-3.2/_performance_tuning.html#_java_out_of_memory_error) or that the number of recent changes is " +
"overwhelming, and you hit: http://www.rudder-project.org/redmine/issues/7735. Look here for workaround"
logger.error(msg)
Noop
}
}
/*
* Refresh the tables with details on events.
* The argument is the starting timestamp of the interval
* to check. If None is provided, the last current interval
* is used.
* We set a hard limit of 10 000 events by interval of 6 hours.
*/
def refreshTableChanges(intervalStartTimestamp: Option[Long]) : JsCmd = {
val intervals = recentChangesService.getCurrentValidIntervals(None).sortBy(_.getStartMillis)
val failure = Failure("No interval defined. It's likelly a bug, please contact report it to rudder-project.org/redmine")
val int = intervalStartTimestamp.orElse(intervals.lastOption.map(_.getStartMillis)) match {
case Some(t) => intervals.find { i => t == i.getStartMillis } match {
case Some(i) => Full(i)
case None => failure
}
case None => failure
}
try {
( for {
currentInterval <- int
changesOnRule <- recentChangesService.getChangesForInterval(rule.id, currentInterval, Some(10000))
directiveLib <- getFullDirectiveLib()
allNodeInfos <- getAllNodeInfos()
} yield {
val changesLine = ChangeLine.jsonByInterval(Map((currentInterval, changesOnRule)), Some(rule.name), directiveLib, allNodeInfos)
val changesArray = changesLine.in.toList.flatMap{case a:JsArray => a.in.toList; case _ => Nil}
JsRaw(s"""
refreshTable("changesGrid", ${JsArray(changesArray).toJsCmd});
""")
}) match {
case Full(cmd) => cmd
case eb:EmptyBox =>
val fail = eb ?~! "Could not refresh recent changes"
logger.error(fail.messageChain)
Noop
}
} catch {
case oom: OutOfMemoryError =>
val msg = "NodeChanges can not be retrieved du to OutOfMemory error. That mean that either your installation is missing " +
"RAM (see: http://www.rudder-project.org/doc-3.2/_performance_tuning.html#_java_out_of_memory_error) or that the number of recent changes is " +
"overwhelming, and you hit: http://www.rudder-project.org/redmine/issues/7735. Look here for workaround"
logger.error(msg)
Noop
}
}
def refreshCompliance() : JsCmd = {
( for {
reports <- reportingService.findDirectiveRuleStatusReportsByRule(rule.id)
updatedRule <- roRuleRepository.get(rule.id)
directiveLib <- getFullDirectiveLib()
allNodeInfos <- getAllNodeInfos()
globalMode <- configService.rudder_global_policy_mode()
} yield {
val directiveData = ComplianceData.getRuleByDirectivesComplianceDetails(reports, updatedRule, allNodeInfos, directiveLib, globalMode).json.toJsCmd
val nodeData = ComplianceData.getRuleByNodeComplianceDetails(directiveLib, reports, allNodeInfos, globalMode).json.toJsCmd
JsRaw(s"""
refreshTable("reportsGrid", ${directiveData});
refreshTable("nodeReportsGrid", ${nodeData});
createTooltip();
""")
}
) match {
case Full(cmd) => cmd
case eb : EmptyBox =>
val fail = eb ?~! s"Error while computing Rule ${rule.name} (${rule.id.value})"
logger.error(fail.messageChain)
Noop
}
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/components/RuleCompliance.scala | Scala | gpl-3.0 | 13,202 |
package org.elasticmq.persistence
import org.joda.time.DateTime
import org.scalatest.OptionValues
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class CreateQueueMetadataTest extends AnyFunSuite with Matchers with OptionValues {
test("CreateQueueMetadata and CreateQueue structures should be exchangeable") {
val createQueue = CreateQueueMetadata(
name = "xyz",
defaultVisibilityTimeoutSeconds = Some(5),
delaySeconds = Some(10),
receiveMessageWaitSeconds = Some(15),
created = 10,
lastModified = 15,
deadLettersQueue = Some(DeadLettersQueue("dlq", 3)),
isFifo = true,
hasContentBasedDeduplication = true,
copyMessagesTo = Some("xyz_copy"),
moveMessagesTo = Some("xyz_move"),
tags = Map("abc" -> "123")
)
CreateQueueMetadata.from(createQueue.toQueueData) shouldBe createQueue
}
}
| adamw/elasticmq | persistence/persistence-core/src/test/scala/org/elasticmq/persistence/CreateQueueMetadataTest.scala | Scala | apache-2.0 | 912 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.mqtt.source
import java.util
import java.util.Collections
import com.datamountaineer.streamreactor.connect.mqtt.config.{MqttSourceConfig, MqttSourceSettings}
import com.datamountaineer.streamreactor.connect.utils.JarManifest
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.kafka.common.config.ConfigDef
import org.apache.kafka.connect.connector.Task
import org.apache.kafka.connect.source.SourceConnector
import scala.collection.JavaConverters._
class MqttSourceConnector extends SourceConnector with StrictLogging {
private val configDef = MqttSourceConfig.config
private var configProps: util.Map[String, String] = _
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
/**
* States which SinkTask class to use
**/
override def taskClass(): Class[_ <: Task] = classOf[MqttSourceTask]
/**
* Set the configuration for each work and determine the split
*
* @param maxTasks The max number of task workers be can spawn
* @return a List of configuration properties per worker
**/
override def taskConfigs(maxTasks: Int): util.List[util.Map[String, String]] = {
val settings = MqttSourceSettings(MqttSourceConfig(configProps))
val kcql = settings.kcql
if (maxTasks == 1 || kcql.length == 1) {
Collections.singletonList(configProps)
} else {
val groups = kcql.length / maxTasks + kcql.length % maxTasks
settings.kcql.grouped(groups)
.zipWithIndex
.map { case (p, index) =>
val map = settings.copy(kcql = p, clientId = settings.clientId + "-" + index).asMap()
import scala.collection.JavaConversions._
configProps
.filterNot { case (k, _) => map.containsKey(k) }
.foreach { case (k, v) => map.put(k, v) }
map
}
.toList.asJava
}
}
/**
* Start the sink and set to configuration
*
* @param props A map of properties for the connector and worker
**/
override def start(props: util.Map[String, String]): Unit = {
configProps = props
}
override def stop(): Unit = {}
override def config(): ConfigDef = configDef
override def version(): String = manifest.version()
}
| CodeSmell/stream-reactor | kafka-connect-mqtt/src/main/scala/com/datamountaineer/streamreactor/connect/mqtt/source/MqttSourceConnector.scala | Scala | apache-2.0 | 2,883 |
package sims.test.gui
import processing.core.PApplet
import processing.core.PConstants._
import sims.dynamics._
class RichShape(shape: Shape) {
private implicit def double2Float(x: Double): Float = x.toFloat
def toGraphical(implicit parent: PApplet) = new GraphicalShape(shape) {
val top = parent
val render = shape match {
case c: Circle => () => {
top.pushMatrix()
top.stroke(0, 0, 0)
top.fill(0, 0, 255, 200)
top.translate(c.position.x, c.position.y)
top.rotate(-c.rotation)
top.ellipseMode(CENTER)
top.ellipse(0, 0, c.radius * 2, c.radius * 2)
top.line(0,0, c.radius, 0)
top.popMatrix()
}
case r: Rectangle => () => {
top.pushMatrix()
top.translate(r.position.x, r.position.y)
top.rotate(-r.rotation)
top.fill(255, 0, 0, 200)
top.rectMode(CENTER)
top.rect(0, 0, r.halfWidth * 2, r.halfHeight * 2)
top.popMatrix()
}
case _ => throw new IllegalArgumentException("Cannot create graphical shape: unknown shape.")
}
}
}
object RichShape {
implicit def shapeToRichShape(s: Shape) = new RichShape(s)
} | jodersky/sims2 | src/test/scala/sims/test/gui/RichShape.scala | Scala | bsd-3-clause | 1,118 |
package org.jetbrains.plugins.scala.lang.psi.api.base
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScAnnotationsHolder
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.light.ScPrimaryConstructorWrapper
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator._
import org.jetbrains.plugins.scala.lang.psi.types.api.{TypeParameter, TypeParameterType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{ScMethodType, ScTypePolymorphicType}
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, CachedInsidePsiElement, ModCount}
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScPrimaryConstructor extends ScMember with ScMethodLike with ScAnnotationsHolder {
def hasMalformedSignature: Boolean = parameterList.clauses.exists {
_.parameters.dropRight(1).exists(_.isRepeatedParameter)
}
/**
* @return has access modifier
*/
def hasModifier: Boolean
def getClassNameText: String
def parameterList: ScParameters
def parameters : Seq[ScClassParameter] = parameterList.clauses.flatMap(_.unsafeClassParameters)
/**
* return only parameters, which are additionally members.
*/
def valueParameters: Seq[ScClassParameter] = parameters.filter((p: ScClassParameter) => p.isVal || p.isVar)
/**
* All classes must have one non-implicit parameter list. If this is not declared in in the code,
* it is assumed by the compiler.
*
* In addition, view and context bounds generate an additional implicit parameter section.
*/
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def effectiveParameterClauses: Seq[ScParameterClause] = {
def emptyParameterList: ScParameterClause =
ScalaPsiElementFactory.createEmptyClassParamClauseWithContext(parameterList)
val clausesWithInitialEmpty = parameterList.clauses match {
case Seq() => Seq(emptyParameterList)
case Seq(clause) if clause.isImplicit => Seq(emptyParameterList, clause)
case clauses => clauses
}
clausesWithInitialEmpty ++ syntheticParamClause
}
def effectiveFirstParameterSection: Seq[ScClassParameter] = effectiveParameterClauses.head.unsafeClassParameters
private def syntheticParamClause: Option[ScParameterClause] = {
val hasImplicit = parameterList.clauses.exists(_.isImplicit)
ScalaPsiUtil.syntheticParamClause(containingClass, parameterList, classParam = true, hasImplicit)
}
def methodType(result: Option[ScType]): ScType = {
val parameters: ScParameters = parameterList
val clauses = parameters.clauses
val returnType: ScType = result.getOrElse({
val clazz = getParent.asInstanceOf[ScTypeDefinition]
val typeParameters = clazz.typeParameters
val parentClazz = ScalaPsiUtil.getPlaceTd(clazz)
val designatorType: ScType =
if (parentClazz != null)
ScProjectionType(ScThisType(parentClazz), clazz, superReference = false)
else ScDesignatorType(clazz)
if (typeParameters.isEmpty) designatorType
else {
ScParameterizedType(designatorType, typeParameters.map(TypeParameterType(_)))
}
})
if (clauses.isEmpty) return new ScMethodType(returnType, Seq.empty, false)
val res = clauses.foldRight[ScType](returnType){(clause: ScParameterClause, tp: ScType) =>
new ScMethodType(tp, clause.getSmartParameters, clause.isImplicit)
}
res.asInstanceOf[ScMethodType]
}
def polymorphicType: ScType = {
val typeParameters = getParent.asInstanceOf[ScTypeDefinition].typeParameters
if (typeParameters.isEmpty) methodType
else ScTypePolymorphicType(methodType, typeParameters.map(TypeParameter(_)))
}
def getParamByName(name: String, clausePosition: Int = -1): Option[ScParameter] = {
clausePosition match {
case -1 =>
for (param <- parameters if ScalaNamesUtil.equivalent(param.name, name)) return Some(param)
None
case i if i < 0 => None
case i if i >= effectiveParameterClauses.length => None
case i =>
val clause: ScParameterClause = effectiveParameterClauses.apply(i)
for (param <- clause.parameters if ScalaNamesUtil.equivalent(param.name, name)) return Some(param)
None
}
}
@Cached(ModCount.getBlockModificationCount, this)
def getFunctionWrappers: Seq[ScPrimaryConstructorWrapper] = {
val buffer = new ArrayBuffer[ScPrimaryConstructorWrapper]()
buffer += new ScPrimaryConstructorWrapper(this)
for {
first <- parameterList.clauses.headOption
if first.hasRepeatedParam
if hasAnnotation("scala.annotation.varargs")
} {
buffer += new ScPrimaryConstructorWrapper(this, isJavaVarargs = true)
}
val params = parameters
for (i <- params.indices if params(i).baseDefaultParam) {
buffer += new ScPrimaryConstructorWrapper(this, forDefault = Some(i + 1))
}
buffer
}
}
object ScPrimaryConstructor {
object ofClass {
def unapply(pc: ScPrimaryConstructor): Option[ScClass] = {
pc.containingClass match {
case c: ScClass => Some(c)
case _ => None
}
}
}
} | gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/api/base/ScPrimaryConstructor.scala | Scala | apache-2.0 | 5,518 |
package com.twitter.jvm
import org.junit.runner.RunWith
import org.scalatest.FunSpec
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class NumProcsTest extends FunSpec {
describe("numProcs") {
it("should be the number of available processors according to the runtime by default") {
assert(System.getProperty("com.twitter.jvm.numProcs") === null)
assert(numProcs() === Runtime.getRuntime().availableProcessors())
}
it("should be settable as a flag") {
val old = numProcs()
numProcs.parse("10.0")
assert(numProcs() === 10.0)
numProcs.parse()
assert(numProcs() === old)
}
}
}
| mosesn/util | util-jvm/src/test/scala/com/twitter/jvm/NumProcsTest.scala | Scala | apache-2.0 | 663 |
package user
case class UserIds(id: String, username: String, externalId: String)
| Leonti/receipts-rest-service | app/src/main/scala/user/UserIds.scala | Scala | mit | 83 |
package scalpel.port
object InterProc {
val jsonMarker = "//ZxJ/"
}
trait InterleavedReader {
lazy val marker = InterProc.jsonMarker
def log(s:String):Unit
def handleJson(s:String):Unit
def readLine(s:String) = {
if(s.startsWith(marker)) handleJson(s.substring(marker.length))
else log(s)
}
}
| lossyrob/scalpel | src/main/scala/scalpel/port/InterleavedReader.scala | Scala | bsd-3-clause | 326 |
package xitrum.handler.inbound
import io.netty.channel.Channel
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.ChannelHandlerContext
import io.netty.channel.SimpleChannelInboundHandler
import io.netty.handler.codec.haproxy.HAProxyMessage
import io.netty.handler.codec.http.HttpRequest
import io.netty.util.AttributeKey
object ProxyProtocolHandler {
private val HAPROXY_PROTOCOL_SOURCE_IP: AttributeKey[String] =
AttributeKey.valueOf("HAProxyMessageSourceIp").asInstanceOf[AttributeKey[String]]
def setRemoteIp(channel: Channel, sourceIp: String): Unit = {
channel.attr(HAPROXY_PROTOCOL_SOURCE_IP).set(sourceIp)
}
def setRemoteIp(channel: Channel, request: HttpRequest): Unit = {
channel.attr(HAPROXY_PROTOCOL_SOURCE_IP).get() match {
case sourceIp: String =>
val headers = request.headers
val xForwardedFor = headers.get("X-Forwarded-For")
if (xForwardedFor != null) {
headers.set("X-Forwarded-For", xForwardedFor.concat(s", $sourceIp"))
} else {
headers.add("X-Forwarded-For", sourceIp)
}
case _ =>
}
}
}
@Sharable
class ProxyProtocolHandler extends SimpleChannelInboundHandler[HAProxyMessage] {
override def channelRead0(ctx: ChannelHandlerContext, msg: HAProxyMessage): Unit = {
ProxyProtocolHandler.setRemoteIp(ctx.channel, msg.sourceAddress)
ctx.channel.pipeline.remove(this)
}
}
| xitrum-framework/xitrum | src/main/scala/xitrum/handler/inbound/ProxyProtocolHandler.scala | Scala | mit | 1,429 |
package util.tasks
import org.scalatest._
/**
* TimedTaskExecutor test
* Created by CAB on 13.10.2014.
*/
class TimedTaskExecutorTest extends WordSpecLike with Matchers {
//TimedTask executor
val executor = new TimedTaskExecutor{}
//Test
"should start" in {
executor.start()}
"should execute task" in {
var fl = false
val t1 = new TimedTask(System.currentTimeMillis(), 1){def execute() = {fl = true}}
executor.addTask(t1)
Thread.sleep(100)
assert(fl == true)}
"should execute task with time out" in {
var fl = false
val t1 = new TimedTask(System.currentTimeMillis() + 2000, 1){def execute() = {fl = true}}
executor.addTask(t1)
Thread.sleep(1000)
assert(fl == false)
Thread.sleep(1100)
assert(fl == true)}
"should execute task by priorityColumn" in {
var fl = false
val t = System.currentTimeMillis() + 100
val t1 = new TimedTask(t, 1){def execute() = {fl = true}}
val t2 = new TimedTask(t, 2){def execute() = {Thread.sleep(1000)}}
executor.addTask(t1)
executor.addTask(t2)
Thread.sleep(500)
assert(fl == false)
Thread.sleep(1100)
assert(fl == true)}
"should paused and should run after un pause" in{
var fl = false
val t = System.currentTimeMillis()
val t1 = new TimedTask(t + 100, 2){def execute() = {Thread.sleep(500)}}
val t2 = new TimedTask(t + 200, 1){def execute() = {fl = true}}
executor.addTask(t1)
executor.addTask(t2)
Thread.sleep(400)
executor.setPaused(true)
Thread.sleep(400)
assert(fl == false)
executor.setPaused(false)
Thread.sleep(10)
assert(fl == true)}
// "should not add task if already exist" in {
// var i = 0
// class ExTask(val ct:Long, p:Int, s:String) extends TimedTask(ct, 1, s){
// def execute() = {i += 1}}
// val ct = System.currentTimeMillis() + 100
// executor.addTask(new ExTask(ct, 1, "qq"))
// executor.addTask(new ExTask(ct, 2, "qq"))
// Thread.sleep(400)
// assert(i == 1)}
"return number like task" in {
val ex = new TimedTaskExecutor{}
class TT1(t:Int) extends TimedTask(t, 1){def execute() = {}}
class TT2(t:Int) extends TimedTask(t, 1){def execute() = {}}
//
ex.addTask(new TT1(0))
ex.addTask(new TT2(0))
ex.addTask(new TT1(0))
ex.addTask(new TT2(0))
ex.addTask(new TT1(0))
//
assert(ex.getNumTaskLike(new TT1(1)) == 3)
assert(ex.getNumTaskLike(new TT2(2)) == 2)}
"should stop only after end current task" in {
var fl = false
val ct = System.currentTimeMillis()
val t1 = new TimedTask(ct, 1){def execute() = {
Thread.sleep(1000)
fl = true}}
executor.addTask(t1)
Thread.sleep(100)
executor.stop()
assert(fl == true)
assert(System.currentTimeMillis() > ct + 900)}}
| AlexCAB/FreelanceAnalytics | test/util/tasks/TimedTaskExecutorTest.scala | Scala | mit | 2,840 |
package com.example.http4s
package war
import cats.effect.IO
import fs2.Scheduler
import javax.servlet.{ServletContextEvent, ServletContextListener}
import javax.servlet.annotation.WebListener
import org.http4s.servlet.syntax._
@WebListener
class Bootstrap extends ServletContextListener {
lazy val (scheduler, shutdownSched) = Scheduler.allocate[IO](corePoolSize = 2).unsafeRunSync()
override def contextInitialized(sce: ServletContextEvent): Unit = {
implicit val scheduler: Scheduler = this.scheduler
val ctx = sce.getServletContext
ctx.mountService("example", new ExampleService[IO].service)
()
}
override def contextDestroyed(sce: ServletContextEvent): Unit =
shutdownSched.unsafeRunSync()
}
| reactormonk/http4s | examples/war/src/main/scala/com/example/http4s/war/Bootstrap.scala | Scala | apache-2.0 | 730 |
package com.twitter.finagle.zipkin.core
import java.net.InetSocketAddress
import org.scalatest.funsuite.AnyFunSuite
class EndpointTest extends AnyFunSuite {
private[this] val unresolved = InetSocketAddress.createUnresolved("nope", 44)
test("toIpv4 with null") {
assert(0 == Endpoint.toIpv4(unresolved.getAddress))
}
test("fromSocketAddress with unresolved InetSocketAddress") {
val endpoint = Endpoint.fromSocketAddress(unresolved)
assert(0 == endpoint.ipv4)
assert(unresolved.getPort == endpoint.port)
}
}
| twitter/finagle | finagle-zipkin-core/src/test/scala/com/twitter/finagle/zipkin/core/EndpointTest.scala | Scala | apache-2.0 | 539 |
import scala.language.postfixOps
object Test extends dotty.runtime.LegacyApp {
// lists
println(List(1, 2, 3, 4).slice(1, 2))
println(List(1, 2, 3, 4).slice(2, 1))
println(List(1, 2, 3, 4).slice(-1, 1))
println(List(1, 2, 3, 4).slice(1, -1))
println(List(1, 2, 3, 4).slice(-2, 2))
println
println(List(1, 2, 3, 4) take 3)
println(List(1, 2, 3) take 3)
println(List(1, 2) take 3)
println((List(): List[Int]) take 3)
println(List[Nothing]() take 3)
println
println(List(1, 2, 3, 4) drop 3)
println(List(1, 2, 3) drop 3)
println(List(1, 2) drop 3)
println((List(): List[Int]) drop 3)
println(List[Nothing]() drop 3)
println
// arrays
println(Array(1, 2, 3, 4).slice(1, 2).deep)
println(Array(1, 2, 3, 4).slice(2, 1).deep)
println(Array(1, 2, 3, 4).slice(-1, 1).deep)
println(Array(1, 2, 3, 4).slice(1, -1).deep)
println(Array(1, 2, 3, 4).slice(-2, 2).deep)
println
println(Array(1, 2, 3, 4) take 3 deep)
println(Array(1, 2, 3) take 3 deep)
println(Array(1, 2) take 3 deep)
println((Array(): Array[Int]) take 3 deep)
// println(Array[Nothing]() take 3) // contrib #757
println
println(Array(1, 2, 3, 4) drop 3 deep)
println(Array(1, 2, 3) drop 3 deep)
println(Array(1, 2) drop 3 deep)
println((Array(): Array[Int]) drop 3 deep)
// println(Array[Nothing]() drop 3)
println
}
| yusuke2255/dotty | tests/run/slices.scala | Scala | bsd-3-clause | 1,353 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn.ops
import com.intel.analytics.bigdl.nn.CAddTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.T
import org.scalatest.{FlatSpec, Matchers}
class AddSpec extends FlatSpec with Matchers {
"Add operation" should "works correctly" in {
import com.intel.analytics.bigdl.numeric.NumericFloat
val input =
T(
Tensor(T(1f, 2f, 3f)),
Tensor(T(2f, 2f, 4f)),
Tensor(T(7f, 3f, 1f))
)
val expectOutput = Tensor(T(10f, 7f, 8f))
val output = CAddTable().forward(input)
output should be(expectOutput)
}
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/ops/AddSpec.scala | Scala | apache-2.0 | 1,232 |
package io.github.edadma.numbers
case class QuaternionInt(a: Int, b: Int, c: Int, d: Int) extends AbstractQuaternionRational[Int, QuaternionInt] {
protected def fractional(a: Int): Double = a.toDouble
protected def quaternion(a: Int, b: Int, c: Int, d: Int): QuaternionInt = QuaternionInt(a, b, c, d)
protected def promote: QuaternionDouble = QuaternionDouble(a, b, c, d)
protected def divide(a: Int, b: Int): Int = unsup
def zero: QuaternionInt = QuaternionInt.zero
def one: QuaternionInt = QuaternionInt.one
def i: QuaternionInt = QuaternionInt.i
def doubleValue: Double = abs
def floatValue: Float = abs.toFloat
def intValue: Int = abs.toInt
def longValue: Long = abs.toLong
}
object QuaternionInt {
val i: QuaternionInt = QuaternionInt(0, 1, 0, 0)
val j: QuaternionInt = QuaternionInt(0, 0, 1, 0)
val k: QuaternionInt = QuaternionInt(0, 0, 0, 1)
val zero: QuaternionInt = QuaternionInt(0, 0, 0, 0)
val one: QuaternionInt = QuaternionInt(1, 0, 0, 0)
def apply(a: Int) = new QuaternionInt(a, 0, 0, 0)
implicit def int2quaternion(a: Int): QuaternionInt = QuaternionInt(a)
}
| edadma/numbers | shared/src/main/scala/io/github/edadma/numbers/QuaternionInt.scala | Scala | mit | 1,133 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.mvc
import akka.stream._
import akka.stream.scaladsl._
import akka.actor.ActorSystem
import akka.util.ByteString
import org.specs2.mutable.Specification
import play.core.test.FakeHeaders
import play.core.test.FakeRequest
import scala.concurrent.Await
import scala.concurrent.duration.Duration
class MultipartBodyParserSpec extends Specification {
"Multipart body parser" should {
implicit val system = ActorSystem()
implicit val materializer = Materializer.matFromSystem
implicit val executionContext = system.dispatcher
val playBodyParsers = PlayBodyParsers(tfc = new InMemoryTemporaryFileCreator(10))
"return an error if temporary file creation fails" in {
val fileSize = 100
val boundary = "-----------------------------14568445977970839651285587160"
val header =
s"--$boundary\\r\\n" +
"Content-Disposition: form-data; name=\\"uploadedfile\\"; filename=\\"uploadedfile.txt\\"\\r\\n" +
"Content-Type: application/octet-stream\\r\\n" +
"\\r\\n"
val content = Array.ofDim[Byte](fileSize)
val footer =
"\\r\\n" +
"\\r\\n" +
s"--$boundary--\\r\\n"
val body = Source(
ByteString(header) ::
ByteString(content) ::
ByteString(footer) ::
Nil
)
val bodySize = header.length + fileSize + footer.length
val request = FakeRequest(
method = "POST",
uri = "/x",
headers = FakeHeaders(
Seq("Content-Type" -> s"multipart/form-data; boundary=$boundary", "Content-Length" -> bodySize.toString)
),
body = body
)
val response = playBodyParsers.multipartFormData.apply(request).run(body)
Await.result(response, Duration.Inf) must throwA[IOOperationIncompleteException]
}
}
}
| benmccann/playframework | core/play/src/test/scala/play/api/mvc/MultipartBodyParserSpec.scala | Scala | apache-2.0 | 1,901 |
package im.actor.server.group
import java.time.{ LocalDateTime, ZoneOffset }
import akka.actor.Status
import akka.pattern.pipe
import com.google.protobuf.ByteString
import im.actor.api.rpc.Update
import im.actor.api.rpc.groups._
import im.actor.api.rpc.messaging.ServiceMessage
import im.actor.api.rpc.misc.Extension
import im.actor.api.rpc.users.Sex
import im.actor.server.ApiConversions._
import im.actor.server.acl.ACLUtils
import im.actor.server.history.HistoryUtils
import im.actor.server.{ persist ⇒ p, models }
import im.actor.server.event.TSEvent
import im.actor.server.file.{ ImageUtils, Avatar }
import im.actor.server.group.GroupErrors._
import im.actor.server.office.PushTexts
import im.actor.server.dialog.group.GroupDialogOperations
import im.actor.server.sequence.SeqUpdatesManager._
import im.actor.server.sequence.{ SeqState, SeqStateDate }
import im.actor.server.user.UserOffice
import ACLUtils._
import im.actor.server.util.IdUtils._
import ImageUtils._
import org.joda.time.DateTime
import slick.driver.PostgresDriver.api._
import scala.concurrent.Future
import scala.concurrent.forkjoin.ThreadLocalRandom
private[group] trait GroupCommandHandlers extends GroupsImplicits with GroupCommandHelpers {
this: GroupProcessor ⇒
import GroupCommands._
import GroupEvents._
protected def createInternal(typ: GroupType, creatorUserId: Int, title: String, userIds: Seq[Int], isHidden: Option[Boolean], isHistoryShared: Option[Boolean], extensions: Seq[Extension] = Seq.empty): Unit = {
val accessHash = genAccessHash()
val date = now()
val created = GroupEvents.Created(groupId, Some(typ), creatorUserId, accessHash, title, (userIds.toSet + creatorUserId).toSeq, isHidden, isHistoryShared, extensions)
val state = initState(date, created)
persist(TSEvent(date, created)) { _ ⇒
context become working(state)
val rng = ThreadLocalRandom.current()
// FIXME: invite other members
val update = UpdateGroupInvite(groupId, creatorUserId, date.getMillis, rng.nextLong())
db.run(for {
_ ← createInDb(state, rng.nextLong())
_ ← p.GroupUser.create(groupId, creatorUserId, creatorUserId, date, None, isAdmin = true)
_ ← DBIO.from(UserOffice.broadcastUserUpdate(creatorUserId, update, pushText = None, isFat = true, deliveryId = Some(s"creategroup_${groupId}_${update.randomId}")))
} yield CreateInternalAck(accessHash)) pipeTo sender() onFailure {
case e ⇒
log.error(e, "Failed to create group internally")
}
}
}
protected def create(groupId: Int, typ: GroupType, creatorUserId: Int, creatorAuthId: Long, title: String, randomId: Long, userIds: Set[Int]): Unit = {
val accessHash = genAccessHash()
val rng = ThreadLocalRandom.current()
userIds.filterNot(_ == creatorUserId) foreach { userId ⇒
val randomId = rng.nextLong()
context.parent ! Invite(groupId, userId, creatorUserId, creatorAuthId, randomId)
}
val date = now()
val created = GroupEvents.Created(groupId, Some(typ), creatorUserId, accessHash, title, Seq(creatorUserId), isHidden = Some(false), isHistoryShared = Some(false))
val state = initState(date, created)
persist(TSEvent(date, created)) { _ ⇒
context become working(state)
val serviceMessage = GroupServiceMessages.groupCreated
val update = UpdateGroupInvite(groupId = groupId, inviteUserId = creatorUserId, date = date.getMillis, randomId = randomId)
db.run(
for {
_ ← p.Group.create(
models.Group(
id = groupId,
creatorUserId = state.creatorUserId,
accessHash = state.accessHash,
title = state.title,
isPublic = (state.typ == GroupType.Public),
createdAt = state.createdAt,
about = None,
topic = None
),
randomId
)
_ ← p.GroupUser.create(groupId, creatorUserId, creatorUserId, date, None, isAdmin = true)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(creatorUserId),
models.Peer.group(state.id),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
seqstate ← if (isBot(state, creatorUserId)) DBIO.successful(SeqState(0, ByteString.EMPTY))
else DBIO.from(UserOffice.broadcastClientUpdate(creatorUserId, creatorAuthId, update, pushText = None, isFat = true, deliveryId = Some(s"creategroup_${groupId}_${randomId}")))
} yield CreateAck(state.accessHash, seqstate, date.getMillis)
) pipeTo sender() onFailure {
case e ⇒
log.error(e, "Failed to create a group")
}
}
val botUserId = nextIntId(rng)
val botToken = accessToken(rng)
val botAdded = GroupEvents.BotAdded(botUserId, botToken)
persist(TSEvent(now(), botAdded)) { tsEvt ⇒
context become working(updatedState(tsEvt, state))
(for {
_ ← UserOffice.create(botUserId, nextAccessSalt(ThreadLocalRandom.current()), "Bot", "US", Sex.Unknown, isBot = true)
_ ← db.run(p.GroupBot.create(groupId, botUserId, botToken))
_ ← integrationTokensKv.upsert(botToken, groupId)
} yield ()) onFailure {
case e ⇒
log.error(e, "Failed to create group bot")
}
}
}
protected def invite(group: Group, userId: Int, inviterUserId: Int, inviterAuthId: Long, randomId: Long, date: DateTime): Future[SeqStateDate] = {
val dateMillis = date.getMillis
val memberIds = group.members.keySet
val inviteeUpdate = UpdateGroupInvite(groupId = groupId, randomId = randomId, inviteUserId = inviterUserId, date = dateMillis)
val userAddedUpdate = UpdateGroupUserInvited(groupId = groupId, userId = userId, inviterUserId = inviterUserId, date = dateMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.userInvited(userId)
for {
_ ← db.run(p.GroupUser.create(groupId, userId, inviterUserId, date, None, isAdmin = false))
_ ← UserOffice.broadcastUserUpdate(userId, inviteeUpdate, pushText = Some(PushTexts.Invited), isFat = true, deliveryId = Some(s"invite_${groupId}_${randomId}"))
// TODO: #perf the following broadcasts do update serializing per each user
_ ← Future.sequence(memberIds.toSeq.filterNot(_ == inviterUserId).map(UserOffice.broadcastUserUpdate(_, userAddedUpdate, Some(PushTexts.Added), isFat = true, deliveryId = Some(s"useradded_${groupId}_${randomId}")))) // use broadcastUsersUpdate maybe?
seqstate ← UserOffice.broadcastClientUpdate(inviterUserId, inviterAuthId, userAddedUpdate, pushText = None, isFat = true, deliveryId = Some(s"useradded_${groupId}_${randomId}"))
// TODO: Move to a History Writing subsystem
_ ← db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(inviterUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
} yield {
SeqStateDate(seqstate.seq, seqstate.state, dateMillis)
}
}
protected def setJoined(group: Group, joiningUserId: Int, joiningUserAuthId: Long, invitingUserId: Int): Unit = {
if (!hasMember(group, joiningUserId) || isInvited(group, joiningUserId)) {
val replyTo = sender()
persist(TSEvent(now(), GroupEvents.UserJoined(joiningUserId, invitingUserId))) { evt ⇒
val newState = workWith(evt, group)
val memberIds = group.members.keySet
val action: DBIO[(SeqStateDate, Vector[Int], Long)] = {
for {
updates ← {
val date = new DateTime
val randomId = ThreadLocalRandom.current().nextLong()
for {
exists ← p.GroupUser.exists(groupId, joiningUserId)
_ ← if (exists) DBIO.successful(()) else p.GroupUser.create(groupId, joiningUserId, invitingUserId, date, Some(LocalDateTime.now(ZoneOffset.UTC)), isAdmin = false)
seqstatedate ← DBIO.from(GroupDialogOperations.sendMessage(groupId, joiningUserId, joiningUserAuthId, randomId, GroupServiceMessages.userJoined, isFat = true))
} yield (seqstatedate, memberIds.toVector :+ invitingUserId, randomId)
}
} yield updates
}
db.run(action) pipeTo replyTo onFailure {
case e ⇒
replyTo ! Status.Failure(e)
}
}
} else {
sender() ! Status.Failure(GroupErrors.UserAlreadyInvited)
}
}
protected def kick(group: Group, kickedUserId: Int, kickerUserId: Int, kickerAuthId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserKicked(kickedUserId, kickerUserId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserKick(groupId, kickedUserId, kickerUserId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userKicked(kickedUserId)
db.run(removeUser(kickedUserId, group.members.keySet, kickerAuthId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def leave(group: Group, userId: Int, authId: Long, randomId: Long): Unit = {
val replyTo = sender()
val date = new DateTime
persist(TSEvent(now(), GroupEvents.UserLeft(userId, date.getMillis))) { evt ⇒
workWith(evt, group)
val update = UpdateGroupUserLeave(groupId, userId, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.userLeft(userId)
db.run(removeUser(userId, group.members.keySet, authId, serviceMessage, update, date, randomId)) pipeTo replyTo onFailure {
case e ⇒ replyTo ! Status.Failure(e)
}
}
}
protected def updateAvatar(group: Group, clientUserId: Int, clientAuthId: Long, avatarOpt: Option[Avatar], randomId: Long): Unit = {
persistStashingReply(TSEvent(now(), AvatarUpdated(avatarOpt)), group) { evt ⇒
val date = new DateTime
val avatarData = avatarOpt map (getAvatarData(models.AvatarData.OfGroup, groupId, _)) getOrElse models.AvatarData.empty(models.AvatarData.OfGroup, groupId.toLong)
val update = UpdateGroupAvatarChanged(groupId, clientUserId, avatarOpt, date.getMillis, randomId)
val serviceMessage = GroupServiceMessages.changedAvatar(avatarOpt)
val memberIds = group.members.keySet
db.run(for {
_ ← p.AvatarData.createOrUpdate(avatarData)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, None, isFat = false)
} yield {
db.run(HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
))
UpdateAvatarAck(avatarOpt, SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
})
}
}
protected def makePublic(group: Group, description: String): Unit = {
persistStashingReply(Vector(TSEvent(now(), BecamePublic()), TSEvent(now(), AboutUpdated(Some(description)))), group) { _ ⇒
db.run(DBIO.sequence(Seq(
p.Group.makePublic(groupId),
p.Group.updateAbout(groupId, Some(description))
))) map (_ ⇒ MakePublicAck())
}
}
protected def updateTitle(group: Group, clientUserId: Int, clientAuthId: Long, title: String, randomId: Long): Unit = {
val memberIds = group.members.keySet
persistStashingReply(TSEvent(now(), TitleUpdated(title)), group) { _ ⇒
val date = new DateTime
val update = UpdateGroupTitleChanged(groupId = groupId, userId = clientUserId, title = title, date = date.getMillis, randomId = randomId)
val serviceMessage = GroupServiceMessages.changedTitle(title)
db.run(for {
_ ← p.Group.updateTitle(groupId, title, clientUserId, randomId, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(seqstate, _) ← broadcastClientAndUsersUpdate(clientUserId, clientAuthId, memberIds, update, Some(PushTexts.TitleChanged), isFat = false)
} yield SeqStateDate(seqstate.seq, seqstate.state, date.getMillis))
}
}
protected def updateTopic(group: Group, clientUserId: Int, clientAuthId: Long, topic: Option[String], randomId: Long): Unit = {
withGroupMember(group, clientUserId) { member ⇒
val trimmed = topic.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), TopicUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val serviceMessage = GroupServiceMessages.changedTopic(trimmed)
val update = UpdateGroupTopicChanged(groupId = groupId, randomId = randomId, userId = clientUserId, topic = trimmed, date = dateMillis)
db.run(for {
_ ← p.Group.updateTopic(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.TopicChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(TopicTooLong)
}
}
}
protected def updateAbout(group: Group, clientUserId: Int, clientAuthId: Long, about: Option[String], randomId: Long): Unit = {
withGroupAdmin(group, clientUserId) {
val trimmed = about.map(_.trim)
if (trimmed.map(s ⇒ s.nonEmpty & s.length < 255).getOrElse(true)) {
persistStashingReply(TSEvent(now(), AboutUpdated(trimmed)), group) { _ ⇒
val date = new DateTime
val dateMillis = date.getMillis
val update = UpdateGroupAboutChanged(groupId, trimmed)
val serviceMessage = GroupServiceMessages.changedAbout(trimmed)
db.run(for {
_ ← p.Group.updateAbout(groupId, trimmed)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(clientUserId),
models.Peer.group(groupId),
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = update,
pushText = Some(PushTexts.AboutChanged),
isFat = false
)
} yield SeqStateDate(seq, state, dateMillis))
}
} else {
sender() ! Status.Failure(AboutTooLong)
}
}
}
protected def makeUserAdmin(group: Group, clientUserId: Int, clientAuthId: Long, candidateId: Int): Unit = {
withGroupAdmin(group, clientUserId) {
withGroupMember(group, candidateId) { member ⇒
persistStashingReply(TSEvent(now(), UserBecameAdmin(candidateId, clientUserId)), group) { e ⇒
val date = e.ts
if (!member.isAdmin) {
//we have current state, that does not updated by UserBecameAdmin event. That's why we update it manually
val updated = group.members.updated(candidateId, group.members(candidateId).copy(isAdmin = true))
val members = updated.values.map(_.asStruct).toVector
db.run(for {
_ ← p.GroupUser.makeAdmin(groupId, candidateId)
(seqState, _) ← broadcastClientAndUsersUpdate(
clientUserId = clientUserId,
clientAuthId = clientAuthId,
userIds = group.members.keySet - clientUserId,
update = UpdateGroupMembersUpdate(groupId, members),
pushText = None,
isFat = false
)
} yield (members, seqState))
} else {
Future.failed(UserAlreadyAdmin)
}
}
}
}
}
protected def revokeIntegrationToken(group: Group, userId: Int): Unit = {
withGroupAdmin(group, userId) {
val oldToken = group.bot.map(_.token)
val newToken = accessToken(ThreadLocalRandom.current())
persistStashingReply(TSEvent(now(), IntegrationTokenRevoked(newToken)), group) { _ ⇒
for {
_ ← db.run(p.GroupBot.updateToken(groupId, newToken))
_ ← integrationTokensKv.delete(oldToken.getOrElse(""))
_ ← integrationTokensKv.upsert(newToken, groupId)
} yield RevokeIntegrationTokenAck(newToken)
}
}
}
private def removeUser(userId: Int, memberIds: Set[Int], clientAuthId: Long, serviceMessage: ServiceMessage, update: Update, date: DateTime, randomId: Long): DBIO[SeqStateDate] = {
val groupPeer = models.Peer.group(groupId)
for {
_ ← p.GroupUser.delete(groupId, userId)
_ ← p.GroupInviteToken.revoke(groupId, userId)
(SeqState(seq, state), _) ← broadcastClientAndUsersUpdate(userId, clientAuthId, memberIds - userId, update, Some(PushTexts.Left), isFat = false)
// TODO: Move to a History Writing subsystem
_ ← p.Dialog.updateLastReadAt(userId, groupPeer, date)
_ ← p.Dialog.updateOwnerLastReadAt(userId, groupPeer, date)
_ ← HistoryUtils.writeHistoryMessage(
models.Peer.privat(userId),
groupPeer,
date,
randomId,
serviceMessage.header,
serviceMessage.toByteArray
)
} yield SeqStateDate(seq, state, date.getMillis)
}
private def genAccessHash(): Long =
ThreadLocalRandom.current().nextLong()
private def createInDb(state: Group, randomId: Long) =
p.Group.create(
models.Group(
id = groupId,
creatorUserId = state.creatorUserId,
accessHash = state.accessHash,
title = state.title,
isPublic = (state.typ == GroupType.Public),
createdAt = state.createdAt,
about = None,
topic = None
),
randomId
)
}
| dut3062796s/actor-platform | actor-server/actor-core/src/main/scala/im/actor/server/group/GroupCommandHandlers.scala | Scala | mit | 18,805 |
object Test extends App {
{
import LibA._
assert(optTwo == Sm(2))
assert(smTwo == Sm(2))
assert(none == Nn)
}
{
import LibB._
assert(optTwo == Sm(2))
assert(smTwo == Sm(2))
assert(none == Nn)
}
{
import LibC._
import Opt._
assert(optTwo == Sm(2))
assert(smTwo == Sm(2))
assert(none == Nn)
}
}
| dotty-staging/dotty | tests/run-macros/quoted-ToExpr-derivation-macro/Test_3.scala | Scala | apache-2.0 | 360 |
package breeze.signal
import breeze.generic.UFunc
import breeze.numerics.sqrt
import breeze.linalg._
/** Root mean square of a vector.
*
* @author ktakagaki
* @author dlwh
* @date 2/17/14.
*/
object rootMeanSquare extends UFunc {
implicit def rms1D[Vec](
implicit normImpl: norm.Impl2[Vec, Int, Double],
dimImpl: dim.Impl[Vec, Int]): rootMeanSquare.Impl[Vec, Double] = {
new rootMeanSquare.Impl[Vec, Double] {
def apply(v: Vec): Double = {
val n: Double = norm(v, 2)
n / sqrt(dim(v).toDouble)
}
}
}
}
| scalanlp/breeze | math/src/main/scala/breeze/signal/rootMeanSquare.scala | Scala | apache-2.0 | 562 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.util.Optional
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Map => MutableMap}
import org.apache.spark.sql.{Dataset, SparkSession}
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap, CurrentBatchTimestamp, CurrentDate, CurrentTimestamp}
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Project}
import org.apache.spark.sql.execution.SQLExecution
import org.apache.spark.sql.execution.datasources.v2.{StreamingDataSourceV2Relation, WriteToDataSourceV2}
import org.apache.spark.sql.execution.streaming.sources.{InternalRowMicroBatchWriter, MicroBatchWriter}
import org.apache.spark.sql.sources.v2.{DataSourceOptions, MicroBatchReadSupport, StreamWriteSupport}
import org.apache.spark.sql.sources.v2.reader.streaming.{MicroBatchReader, Offset => OffsetV2}
import org.apache.spark.sql.sources.v2.writer.SupportsWriteInternalRow
import org.apache.spark.sql.streaming.{OutputMode, ProcessingTime, Trigger}
import org.apache.spark.util.{Clock, Utils}
class MicroBatchExecution(
sparkSession: SparkSession,
name: String,
checkpointRoot: String,
analyzedPlan: LogicalPlan,
sink: BaseStreamingSink,
trigger: Trigger,
triggerClock: Clock,
outputMode: OutputMode,
extraOptions: Map[String, String],
deleteCheckpointOnStop: Boolean)
extends StreamExecution(
sparkSession, name, checkpointRoot, analyzedPlan, sink,
trigger, triggerClock, outputMode, deleteCheckpointOnStop) {
@volatile protected var sources: Seq[BaseStreamingSource] = Seq.empty
private val triggerExecutor = trigger match {
case t: ProcessingTime => ProcessingTimeExecutor(t, triggerClock)
case OneTimeTrigger => OneTimeExecutor()
case _ => throw new IllegalStateException(s"Unknown type of trigger: $trigger")
}
override lazy val logicalPlan: LogicalPlan = {
assert(queryExecutionThread eq Thread.currentThread,
"logicalPlan must be initialized in QueryExecutionThread " +
s"but the current thread was ${Thread.currentThread}")
var nextSourceId = 0L
val toExecutionRelationMap = MutableMap[StreamingRelation, StreamingExecutionRelation]()
val v2ToExecutionRelationMap = MutableMap[StreamingRelationV2, StreamingExecutionRelation]()
// We transform each distinct streaming relation into a StreamingExecutionRelation, keeping a
// map as we go to ensure each identical relation gets the same StreamingExecutionRelation
// object. For each microbatch, the StreamingExecutionRelation will be replaced with a logical
// plan for the data within that batch.
// Note that we have to use the previous `output` as attributes in StreamingExecutionRelation,
// since the existing logical plan has already used those attributes. The per-microbatch
// transformation is responsible for replacing attributes with their final values.
val disabledSources =
sparkSession.sqlContext.conf.disabledV2StreamingMicroBatchReaders.split(",")
val _logicalPlan = analyzedPlan.transform {
case streamingRelation@StreamingRelation(dataSourceV1, sourceName, output) =>
toExecutionRelationMap.getOrElseUpdate(streamingRelation, {
// Materialize source to avoid creating it in every batch
val metadataPath = s"$resolvedCheckpointRoot/sources/$nextSourceId"
val source = dataSourceV1.createSource(metadataPath)
nextSourceId += 1
logInfo(s"Using Source [$source] from DataSourceV1 named '$sourceName' [$dataSourceV1]")
StreamingExecutionRelation(source, output)(sparkSession)
})
case s @ StreamingRelationV2(
dataSourceV2: MicroBatchReadSupport, sourceName, options, output, _) if
!disabledSources.contains(dataSourceV2.getClass.getCanonicalName) =>
v2ToExecutionRelationMap.getOrElseUpdate(s, {
// Materialize source to avoid creating it in every batch
val metadataPath = s"$resolvedCheckpointRoot/sources/$nextSourceId"
val reader = dataSourceV2.createMicroBatchReader(
Optional.empty(), // user specified schema
metadataPath,
new DataSourceOptions(options.asJava))
nextSourceId += 1
logInfo(s"Using MicroBatchReader [$reader] from " +
s"DataSourceV2 named '$sourceName' [$dataSourceV2]")
StreamingExecutionRelation(reader, output)(sparkSession)
})
case s @ StreamingRelationV2(dataSourceV2, sourceName, _, output, v1Relation) =>
v2ToExecutionRelationMap.getOrElseUpdate(s, {
// Materialize source to avoid creating it in every batch
val metadataPath = s"$resolvedCheckpointRoot/sources/$nextSourceId"
if (v1Relation.isEmpty) {
throw new UnsupportedOperationException(
s"Data source $sourceName does not support microbatch processing.")
}
val source = v1Relation.get.dataSource.createSource(metadataPath)
nextSourceId += 1
logInfo(s"Using Source [$source] from DataSourceV2 named '$sourceName' [$dataSourceV2]")
StreamingExecutionRelation(source, output)(sparkSession)
})
}
sources = _logicalPlan.collect { case s: StreamingExecutionRelation => s.source }
uniqueSources = sources.distinct
_logicalPlan
}
/**
* Repeatedly attempts to run batches as data arrives.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit = {
triggerExecutor.execute(() => {
startTrigger()
if (isActive) {
reportTimeTaken("triggerExecution") {
if (currentBatchId < 0) {
// We'll do this initialization only once
populateStartOffsets(sparkSessionForStream)
sparkSession.sparkContext.setJobDescription(getBatchDescriptionString)
logDebug(s"Stream running from $committedOffsets to $availableOffsets")
} else {
constructNextBatch()
}
if (dataAvailable) {
currentStatus = currentStatus.copy(isDataAvailable = true)
updateStatusMessage("Processing new data")
runBatch(sparkSessionForStream)
}
}
// Report trigger as finished and construct progress object.
finishTrigger(dataAvailable)
if (dataAvailable) {
// Update committed offsets.
commitLog.add(currentBatchId)
committedOffsets ++= availableOffsets
logDebug(s"batch ${currentBatchId} committed")
// We'll increase currentBatchId after we complete processing current batch's data
currentBatchId += 1
sparkSession.sparkContext.setJobDescription(getBatchDescriptionString)
} else {
currentStatus = currentStatus.copy(isDataAvailable = false)
updateStatusMessage("Waiting for data to arrive")
Thread.sleep(pollingDelayMs)
}
}
updateStatusMessage("Waiting for next trigger")
isActive
})
}
/**
* Populate the start offsets to start the execution at the current offsets stored in the sink
* (i.e. avoid reprocessing data that we have already processed). This function must be called
* before any processing occurs and will populate the following fields:
* - currentBatchId
* - committedOffsets
* - availableOffsets
* The basic structure of this method is as follows:
*
* Identify (from the offset log) the offsets used to run the last batch
* IF last batch exists THEN
* Set the next batch to be executed as the last recovered batch
* Check the commit log to see which batch was committed last
* IF the last batch was committed THEN
* Call getBatch using the last batch start and end offsets
* // ^^^^ above line is needed since some sources assume last batch always re-executes
* Setup for a new batch i.e., start = last batch end, and identify new end
* DONE
* ELSE
* Identify a brand new batch
* DONE
*/
private def populateStartOffsets(sparkSessionToRunBatches: SparkSession): Unit = {
offsetLog.getLatest() match {
case Some((latestBatchId, nextOffsets)) =>
/* First assume that we are re-executing the latest known batch
* in the offset log */
currentBatchId = latestBatchId
availableOffsets = nextOffsets.toStreamProgress(sources)
/* Initialize committed offsets to a committed batch, which at this
* is the second latest batch id in the offset log. */
if (latestBatchId != 0) {
val secondLatestBatchId = offsetLog.get(latestBatchId - 1).getOrElse {
throw new IllegalStateException(s"batch ${latestBatchId - 1} doesn't exist")
}
committedOffsets = secondLatestBatchId.toStreamProgress(sources)
}
// update offset metadata
nextOffsets.metadata.foreach { metadata =>
OffsetSeqMetadata.setSessionConf(metadata, sparkSessionToRunBatches.conf)
offsetSeqMetadata = OffsetSeqMetadata(
metadata.batchWatermarkMs, metadata.batchTimestampMs, sparkSessionToRunBatches.conf)
}
/* identify the current batch id: if commit log indicates we successfully processed the
* latest batch id in the offset log, then we can safely move to the next batch
* i.e., committedBatchId + 1 */
commitLog.getLatest() match {
case Some((latestCommittedBatchId, _)) =>
if (latestBatchId == latestCommittedBatchId) {
/* The last batch was successfully committed, so we can safely process a
* new next batch but first:
* Make a call to getBatch using the offsets from previous batch.
* because certain sources (e.g., KafkaSource) assume on restart the last
* batch will be executed before getOffset is called again. */
availableOffsets.foreach {
case (source: Source, end: Offset) =>
val start = committedOffsets.get(source)
source.getBatch(start, end)
case nonV1Tuple =>
// The V2 API does not have the same edge case requiring getBatch to be called
// here, so we do nothing here.
}
currentBatchId = latestCommittedBatchId + 1
committedOffsets ++= availableOffsets
// Construct a new batch be recomputing availableOffsets
constructNextBatch()
} else if (latestCommittedBatchId < latestBatchId - 1) {
logWarning(s"Batch completion log latest batch id is " +
s"${latestCommittedBatchId}, which is not trailing " +
s"batchid $latestBatchId by one")
}
case None => logInfo("no commit log present")
}
logDebug(s"Resuming at batch $currentBatchId with committed offsets " +
s"$committedOffsets and available offsets $availableOffsets")
case None => // We are starting this stream for the first time.
logInfo(s"Starting new streaming query.")
currentBatchId = 0
constructNextBatch()
}
}
/**
* Returns true if there is any new data available to be processed.
*/
private def dataAvailable: Boolean = {
availableOffsets.exists {
case (source, available) =>
committedOffsets
.get(source)
.map(committed => committed != available)
.getOrElse(true)
}
}
/**
* Queries all of the sources to see if any new data is available. When there is new data the
* batchId counter is incremented and a new log entry is written with the newest offsets.
*/
private def constructNextBatch(): Unit = {
// Check to see what new data is available.
val hasNewData = {
awaitProgressLock.lock()
try {
// Generate a map from each unique source to the next available offset.
val latestOffsets: Map[BaseStreamingSource, Option[Offset]] = uniqueSources.map {
case s: Source =>
updateStatusMessage(s"Getting offsets from $s")
reportTimeTaken("getOffset") {
(s, s.getOffset)
}
case s: MicroBatchReader =>
updateStatusMessage(s"Getting offsets from $s")
reportTimeTaken("setOffsetRange") {
// Once v1 streaming source execution is gone, we can refactor this away.
// For now, we set the range here to get the source to infer the available end offset,
// get that offset, and then set the range again when we later execute.
s.setOffsetRange(
toJava(availableOffsets.get(s).map(off => s.deserializeOffset(off.json))),
Optional.empty())
}
val currentOffset = reportTimeTaken("getEndOffset") { s.getEndOffset() }
(s, Option(currentOffset))
}.toMap
availableOffsets ++= latestOffsets.filter { case (_, o) => o.nonEmpty }.mapValues(_.get)
if (dataAvailable) {
true
} else {
noNewData = true
false
}
} finally {
awaitProgressLock.unlock()
}
}
if (hasNewData) {
var batchWatermarkMs = offsetSeqMetadata.batchWatermarkMs
// Update the eventTime watermarks if we find any in the plan.
if (lastExecution != null) {
lastExecution.executedPlan.collect {
case e: EventTimeWatermarkExec => e
}.zipWithIndex.foreach {
case (e, index) if e.eventTimeStats.value.count > 0 =>
logDebug(s"Observed event time stats $index: ${e.eventTimeStats.value}")
val newWatermarkMs = e.eventTimeStats.value.max - e.delayMs
val prevWatermarkMs = watermarkMsMap.get(index)
if (prevWatermarkMs.isEmpty || newWatermarkMs > prevWatermarkMs.get) {
watermarkMsMap.put(index, newWatermarkMs)
}
// Populate 0 if we haven't seen any data yet for this watermark node.
case (_, index) =>
if (!watermarkMsMap.isDefinedAt(index)) {
watermarkMsMap.put(index, 0)
}
}
// Update the global watermark to the minimum of all watermark nodes.
// This is the safest option, because only the global watermark is fault-tolerant. Making
// it the minimum of all individual watermarks guarantees it will never advance past where
// any individual watermark operator would be if it were in a plan by itself.
if(!watermarkMsMap.isEmpty) {
val newWatermarkMs = watermarkMsMap.minBy(_._2)._2
if (newWatermarkMs > batchWatermarkMs) {
logInfo(s"Updating eventTime watermark to: $newWatermarkMs ms")
batchWatermarkMs = newWatermarkMs
} else {
logDebug(
s"Event time didn't move: $newWatermarkMs < " +
s"$batchWatermarkMs")
}
}
}
offsetSeqMetadata = offsetSeqMetadata.copy(
batchWatermarkMs = batchWatermarkMs,
batchTimestampMs = triggerClock.getTimeMillis()) // Current batch timestamp in milliseconds
updateStatusMessage("Writing offsets to log")
reportTimeTaken("walCommit") {
assert(offsetLog.add(
currentBatchId,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata)),
s"Concurrent update to the log. Multiple streaming jobs detected for $currentBatchId")
logInfo(s"Committed offsets for batch $currentBatchId. " +
s"Metadata ${offsetSeqMetadata.toString}")
// NOTE: The following code is correct because runStream() processes exactly one
// batch at a time. If we add pipeline parallelism (multiple batches in flight at
// the same time), this cleanup logic will need to change.
// Now that we've updated the scheduler's persistent checkpoint, it is safe for the
// sources to discard data from the previous batch.
if (currentBatchId != 0) {
val prevBatchOff = offsetLog.get(currentBatchId - 1)
if (prevBatchOff.isDefined) {
prevBatchOff.get.toStreamProgress(sources).foreach {
case (src: Source, off) => src.commit(off)
case (reader: MicroBatchReader, off) =>
reader.commit(reader.deserializeOffset(off.json))
}
} else {
throw new IllegalStateException(s"batch $currentBatchId doesn't exist")
}
}
// It is now safe to discard the metadata beyond the minimum number to retain.
// Note that purge is exclusive, i.e. it purges everything before the target ID.
if (minLogEntriesToMaintain < currentBatchId) {
offsetLog.purge(currentBatchId - minLogEntriesToMaintain)
commitLog.purge(currentBatchId - minLogEntriesToMaintain)
}
}
} else {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
}
}
/**
* Processes any data available between `availableOffsets` and `committedOffsets`.
* @param sparkSessionToRunBatch Isolated [[SparkSession]] to run this batch with.
*/
private def runBatch(sparkSessionToRunBatch: SparkSession): Unit = {
// Request unprocessed data from all sources.
newData = reportTimeTaken("getBatch") {
availableOffsets.flatMap {
case (source: Source, available)
if committedOffsets.get(source).map(_ != available).getOrElse(true) =>
val current = committedOffsets.get(source)
val batch = source.getBatch(current, available)
assert(batch.isStreaming,
s"DataFrame returned by getBatch from $source did not have isStreaming=true\\n" +
s"${batch.queryExecution.logical}")
logDebug(s"Retrieving data from $source: $current -> $available")
Some(source -> batch.logicalPlan)
case (reader: MicroBatchReader, available)
if committedOffsets.get(reader).map(_ != available).getOrElse(true) =>
val current = committedOffsets.get(reader).map(off => reader.deserializeOffset(off.json))
val availableV2: OffsetV2 = available match {
case v1: SerializedOffset => reader.deserializeOffset(v1.json)
case v2: OffsetV2 => v2
}
reader.setOffsetRange(
toJava(current),
Optional.of(availableV2))
logDebug(s"Retrieving data from $reader: $current -> $availableV2")
Some(reader ->
new StreamingDataSourceV2Relation(reader.readSchema().toAttributes, reader))
case _ => None
}
}
// Replace sources in the logical plan with data that has arrived since the last batch.
val newBatchesPlan = logicalPlan transform {
case StreamingExecutionRelation(source, output) =>
newData.get(source).map { dataPlan =>
assert(output.size == dataPlan.output.size,
s"Invalid batch: ${Utils.truncatedString(output, ",")} != " +
s"${Utils.truncatedString(dataPlan.output, ",")}")
val aliases = output.zip(dataPlan.output).map { case (to, from) =>
Alias(from, to.name)(exprId = to.exprId, explicitMetadata = Some(from.metadata))
}
Project(aliases, dataPlan)
}.getOrElse {
LocalRelation(output, isStreaming = true)
}
}
// Rewire the plan to use the new attributes that were returned by the source.
val newAttributePlan = newBatchesPlan transformAllExpressions {
case ct: CurrentTimestamp =>
CurrentBatchTimestamp(offsetSeqMetadata.batchTimestampMs,
ct.dataType)
case cd: CurrentDate =>
CurrentBatchTimestamp(offsetSeqMetadata.batchTimestampMs,
cd.dataType, cd.timeZoneId)
}
val triggerLogicalPlan = sink match {
case _: Sink => newAttributePlan
case s: StreamWriteSupport =>
val writer = s.createStreamWriter(
s"$runId",
newAttributePlan.schema,
outputMode,
new DataSourceOptions(extraOptions.asJava))
if (writer.isInstanceOf[SupportsWriteInternalRow]) {
WriteToDataSourceV2(
new InternalRowMicroBatchWriter(currentBatchId, writer), newAttributePlan)
} else {
WriteToDataSourceV2(new MicroBatchWriter(currentBatchId, writer), newAttributePlan)
}
case _ => throw new IllegalArgumentException(s"unknown sink type for $sink")
}
reportTimeTaken("queryPlanning") {
lastExecution = new IncrementalExecution(
sparkSessionToRunBatch,
triggerLogicalPlan,
outputMode,
checkpointFile("state"),
runId,
currentBatchId,
offsetSeqMetadata)
lastExecution.executedPlan // Force the lazy generation of execution plan
}
val nextBatch =
new Dataset(sparkSessionToRunBatch, lastExecution, RowEncoder(lastExecution.analyzed.schema))
reportTimeTaken("addBatch") {
SQLExecution.withNewExecutionId(sparkSessionToRunBatch, lastExecution) {
sink match {
case s: Sink => s.addBatch(currentBatchId, nextBatch)
case _: StreamWriteSupport =>
// This doesn't accumulate any data - it just forces execution of the microbatch writer.
nextBatch.collect()
}
}
}
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
}
/** Execute a function while locking the stream from making an progress */
private[sql] def withProgressLocked(f: => Unit): Unit = {
awaitProgressLock.lock()
try {
f
} finally {
awaitProgressLock.unlock()
}
}
private def toJava(scalaOption: Option[OffsetV2]): Optional[OffsetV2] = {
Optional.ofNullable(scalaOption.orNull)
}
}
| esi-mineset/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/MicroBatchExecution.scala | Scala | apache-2.0 | 23,137 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.util.concurrent.locks.ReentrantReadWriteLock
import java.util.Optional
import java.util.concurrent.CompletableFuture
import kafka.api.{ApiVersion, LeaderAndIsr}
import kafka.common.UnexpectedAppendOffsetException
import kafka.controller.{KafkaController, StateChangeLogger}
import kafka.log._
import kafka.metrics.KafkaMetricsGroup
import kafka.server._
import kafka.server.checkpoints.OffsetCheckpoints
import kafka.utils.CoreUtils.{inReadLock, inWriteLock}
import kafka.utils._
import kafka.zookeeper.ZooKeeperClientException
import org.apache.kafka.common.errors._
import org.apache.kafka.common.message.{DescribeProducersResponseData, FetchResponseData}
import org.apache.kafka.common.message.LeaderAndIsrRequestData.LeaderAndIsrPartitionState
import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.FileRecords.TimestampAndOffset
import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
import org.apache.kafka.common.requests._
import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import org.apache.kafka.common.utils.Time
import org.apache.kafka.common.{IsolationLevel, TopicPartition, Uuid}
import scala.collection.{Map, Seq}
import scala.jdk.CollectionConverters._
trait IsrChangeListener {
def markExpand(): Unit
def markShrink(): Unit
def markFailed(): Unit
}
class DelayedOperations(topicPartition: TopicPartition,
produce: DelayedOperationPurgatory[DelayedProduce],
fetch: DelayedOperationPurgatory[DelayedFetch],
deleteRecords: DelayedOperationPurgatory[DelayedDeleteRecords]) {
def checkAndCompleteAll(): Unit = {
val requestKey = TopicPartitionOperationKey(topicPartition)
fetch.checkAndComplete(requestKey)
produce.checkAndComplete(requestKey)
deleteRecords.checkAndComplete(requestKey)
}
def numDelayedDelete: Int = deleteRecords.numDelayed
}
object Partition extends KafkaMetricsGroup {
def apply(topicPartition: TopicPartition,
time: Time,
replicaManager: ReplicaManager): Partition = {
val isrChangeListener = new IsrChangeListener {
override def markExpand(): Unit = {
replicaManager.isrExpandRate.mark()
}
override def markShrink(): Unit = {
replicaManager.isrShrinkRate.mark()
}
override def markFailed(): Unit = replicaManager.failedIsrUpdatesRate.mark()
}
val delayedOperations = new DelayedOperations(
topicPartition,
replicaManager.delayedProducePurgatory,
replicaManager.delayedFetchPurgatory,
replicaManager.delayedDeleteRecordsPurgatory)
new Partition(topicPartition,
replicaLagTimeMaxMs = replicaManager.config.replicaLagTimeMaxMs,
interBrokerProtocolVersion = replicaManager.config.interBrokerProtocolVersion,
localBrokerId = replicaManager.config.brokerId,
time = time,
isrChangeListener = isrChangeListener,
delayedOperations = delayedOperations,
metadataCache = replicaManager.metadataCache,
logManager = replicaManager.logManager,
alterIsrManager = replicaManager.alterIsrManager)
}
def removeMetrics(topicPartition: TopicPartition): Unit = {
val tags = Map("topic" -> topicPartition.topic, "partition" -> topicPartition.partition.toString)
removeMetric("UnderReplicated", tags)
removeMetric("UnderMinIsr", tags)
removeMetric("InSyncReplicasCount", tags)
removeMetric("ReplicasCount", tags)
removeMetric("LastStableOffsetLag", tags)
removeMetric("AtMinIsr", tags)
}
}
sealed trait AssignmentState {
def replicas: Seq[Int]
def replicationFactor: Int = replicas.size
def isAddingReplica(brokerId: Int): Boolean = false
}
case class OngoingReassignmentState(addingReplicas: Seq[Int],
removingReplicas: Seq[Int],
replicas: Seq[Int]) extends AssignmentState {
override def replicationFactor: Int = replicas.diff(addingReplicas).size // keep the size of the original replicas
override def isAddingReplica(replicaId: Int): Boolean = addingReplicas.contains(replicaId)
}
case class SimpleAssignmentState(replicas: Seq[Int]) extends AssignmentState
sealed trait IsrState {
/**
* Includes only the in-sync replicas which have been committed to ZK.
*/
def isr: Set[Int]
/**
* This set may include un-committed ISR members following an expansion. This "effective" ISR is used for advancing
* the high watermark as well as determining which replicas are required for acks=all produce requests.
*
* Only applicable as of IBP 2.7-IV2, for older versions this will return the committed ISR
*
*/
def maximalIsr: Set[Int]
/**
* Indicates if we have an AlterIsr request inflight.
*/
def isInflight: Boolean
}
sealed trait PendingIsrChange extends IsrState {
def sentLeaderAndIsr: LeaderAndIsr
}
case class PendingExpandIsr(
isr: Set[Int],
newInSyncReplicaId: Int,
sentLeaderAndIsr: LeaderAndIsr
) extends PendingIsrChange {
val maximalIsr = isr + newInSyncReplicaId
val isInflight = true
override def toString: String = {
s"PendingExpandIsr(isr=$isr" +
s", newInSyncReplicaId=$newInSyncReplicaId" +
s", sentLeaderAndIsr=$sentLeaderAndIsr" +
")"
}
}
case class PendingShrinkIsr(
isr: Set[Int],
outOfSyncReplicaIds: Set[Int],
sentLeaderAndIsr: LeaderAndIsr
) extends PendingIsrChange {
val maximalIsr = isr
val isInflight = true
override def toString: String = {
s"PendingShrinkIsr(isr=$isr" +
s", outOfSyncReplicaIds=$outOfSyncReplicaIds" +
s", sentLeaderAndIsr=$sentLeaderAndIsr" +
")"
}
}
case class CommittedIsr(
isr: Set[Int]
) extends IsrState {
val maximalIsr = isr
val isInflight = false
override def toString: String = {
s"CommittedIsr(isr=$isr" +
")"
}
}
/**
* Data structure that represents a topic partition. The leader maintains the AR, ISR, CUR, RAR
*
* Concurrency notes:
* 1) Partition is thread-safe. Operations on partitions may be invoked concurrently from different
* request handler threads
* 2) ISR updates are synchronized using a read-write lock. Read lock is used to check if an update
* is required to avoid acquiring write lock in the common case of replica fetch when no update
* is performed. ISR update condition is checked a second time under write lock before performing
* the update
* 3) Various other operations like leader changes are processed while holding the ISR write lock.
* This can introduce delays in produce and replica fetch requests, but these operations are typically
* infrequent.
* 4) HW updates are synchronized using ISR read lock. @Log lock is acquired during the update with
* locking order Partition lock -> Log lock.
* 5) lock is used to prevent the follower replica from being updated while ReplicaAlterDirThread is
* executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica.
*/
class Partition(val topicPartition: TopicPartition,
val replicaLagTimeMaxMs: Long,
interBrokerProtocolVersion: ApiVersion,
localBrokerId: Int,
time: Time,
isrChangeListener: IsrChangeListener,
delayedOperations: DelayedOperations,
metadataCache: MetadataCache,
logManager: LogManager,
alterIsrManager: AlterIsrManager) extends Logging with KafkaMetricsGroup {
def topic: String = topicPartition.topic
def partitionId: Int = topicPartition.partition
private val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None)
private val remoteReplicasMap = new Pool[Int, Replica]
// The read lock is only required when multiple reads are executed and needs to be in a consistent manner
private val leaderIsrUpdateLock = new ReentrantReadWriteLock
// lock to prevent the follower replica log update while checking if the log dir could be replaced with future log.
private val futureLogLock = new Object()
private var zkVersion: Int = LeaderAndIsr.initialZKVersion
@volatile private var leaderEpoch: Int = LeaderAndIsr.initialLeaderEpoch - 1
// start offset for 'leaderEpoch' above (leader epoch of the current leader for this partition),
// defined when this broker is leader for partition
@volatile private var leaderEpochStartOffsetOpt: Option[Long] = None
@volatile var leaderReplicaIdOpt: Option[Int] = None
@volatile private[cluster] var isrState: IsrState = CommittedIsr(Set.empty)
@volatile var assignmentState: AssignmentState = SimpleAssignmentState(Seq.empty)
// Logs belonging to this partition. Majority of time it will be only one log, but if log directory
// is getting changed (as a result of ReplicaAlterLogDirs command), we may have two logs until copy
// completes and a switch to new location is performed.
// log and futureLog variables defined below are used to capture this
@volatile var log: Option[UnifiedLog] = None
// If ReplicaAlterLogDir command is in progress, this is future location of the log
@volatile var futureLog: Option[UnifiedLog] = None
/* Epoch of the controller that last changed the leader. This needs to be initialized correctly upon broker startup.
* One way of doing that is through the controller's start replica state change command. When a new broker starts up
* the controller sends it a start replica command containing the leader for each partition that the broker hosts.
* In addition to the leader, the controller can also send the epoch of the controller that elected the leader for
* each partition. */
private var controllerEpoch: Int = KafkaController.InitialControllerEpoch
this.logIdent = s"[Partition $topicPartition broker=$localBrokerId] "
private val tags = Map("topic" -> topic, "partition" -> partitionId.toString)
newGauge("UnderReplicated", () => if (isUnderReplicated) 1 else 0, tags)
newGauge("InSyncReplicasCount", () => if (isLeader) isrState.isr.size else 0, tags)
newGauge("UnderMinIsr", () => if (isUnderMinIsr) 1 else 0, tags)
newGauge("AtMinIsr", () => if (isAtMinIsr) 1 else 0, tags)
newGauge("ReplicasCount", () => if (isLeader) assignmentState.replicationFactor else 0, tags)
newGauge("LastStableOffsetLag", () => log.map(_.lastStableOffsetLag).getOrElse(0), tags)
def hasLateTransaction(currentTimeMs: Long): Boolean = leaderLogIfLocal.exists(_.hasLateTransaction(currentTimeMs))
def isUnderReplicated: Boolean = isLeader && (assignmentState.replicationFactor - isrState.isr.size) > 0
def isUnderMinIsr: Boolean = leaderLogIfLocal.exists { isrState.isr.size < _.config.minInSyncReplicas }
def isAtMinIsr: Boolean = leaderLogIfLocal.exists { isrState.isr.size == _.config.minInSyncReplicas }
def isReassigning: Boolean = assignmentState.isInstanceOf[OngoingReassignmentState]
def isAddingLocalReplica: Boolean = assignmentState.isAddingReplica(localBrokerId)
def isAddingReplica(replicaId: Int): Boolean = assignmentState.isAddingReplica(replicaId)
def inSyncReplicaIds: Set[Int] = isrState.isr
/**
* Create the future replica if 1) the current replica is not in the given log directory and 2) the future replica
* does not exist. This method assumes that the current replica has already been created.
*
* @param logDir log directory
* @param highWatermarkCheckpoints Checkpoint to load initial high watermark from
* @return true iff the future replica is created
*/
def maybeCreateFutureReplica(logDir: String, highWatermarkCheckpoints: OffsetCheckpoints): Boolean = {
// The writeLock is needed to make sure that while the caller checks the log directory of the
// current replica and the existence of the future replica, no other thread can update the log directory of the
// current replica or remove the future replica.
inWriteLock(leaderIsrUpdateLock) {
val currentLogDir = localLogOrException.parentDir
if (currentLogDir == logDir) {
info(s"Current log directory $currentLogDir is same as requested log dir $logDir. " +
s"Skipping future replica creation.")
false
} else {
futureLog match {
case Some(partitionFutureLog) =>
val futureLogDir = partitionFutureLog.parentDir
if (futureLogDir != logDir)
throw new IllegalStateException(s"The future log dir $futureLogDir of $topicPartition is " +
s"different from the requested log dir $logDir")
false
case None =>
createLogIfNotExists(isNew = false, isFutureReplica = true, highWatermarkCheckpoints, topicId)
true
}
}
}
}
def createLogIfNotExists(isNew: Boolean, isFutureReplica: Boolean, offsetCheckpoints: OffsetCheckpoints, topicId: Option[Uuid]): Unit = {
def maybeCreate(logOpt: Option[UnifiedLog]): UnifiedLog = {
logOpt match {
case Some(log) =>
trace(s"${if (isFutureReplica) "Future UnifiedLog" else "UnifiedLog"} already exists.")
if (log.topicId.isEmpty)
topicId.foreach(log.assignTopicId)
log
case None =>
createLog(isNew, isFutureReplica, offsetCheckpoints, topicId)
}
}
if (isFutureReplica) {
this.futureLog = Some(maybeCreate(this.futureLog))
} else {
this.log = Some(maybeCreate(this.log))
}
}
// Visible for testing
private[cluster] def createLog(isNew: Boolean, isFutureReplica: Boolean, offsetCheckpoints: OffsetCheckpoints, topicId: Option[Uuid]): UnifiedLog = {
def updateHighWatermark(log: UnifiedLog) = {
val checkpointHighWatermark = offsetCheckpoints.fetch(log.parentDir, topicPartition).getOrElse {
info(s"No checkpointed highwatermark is found for partition $topicPartition")
0L
}
val initialHighWatermark = log.updateHighWatermark(checkpointHighWatermark)
info(s"Log loaded for partition $topicPartition with initial high watermark $initialHighWatermark")
}
logManager.initializingLog(topicPartition)
var maybeLog: Option[UnifiedLog] = None
try {
val log = logManager.getOrCreateLog(topicPartition, isNew, isFutureReplica, topicId)
maybeLog = Some(log)
updateHighWatermark(log)
log
} finally {
logManager.finishedInitializingLog(topicPartition, maybeLog)
}
}
def getReplica(replicaId: Int): Option[Replica] = Option(remoteReplicasMap.get(replicaId))
private def checkCurrentLeaderEpoch(remoteLeaderEpochOpt: Optional[Integer]): Errors = {
if (!remoteLeaderEpochOpt.isPresent) {
Errors.NONE
} else {
val remoteLeaderEpoch = remoteLeaderEpochOpt.get
val localLeaderEpoch = leaderEpoch
if (localLeaderEpoch > remoteLeaderEpoch)
Errors.FENCED_LEADER_EPOCH
else if (localLeaderEpoch < remoteLeaderEpoch)
Errors.UNKNOWN_LEADER_EPOCH
else
Errors.NONE
}
}
private def getLocalLog(currentLeaderEpoch: Optional[Integer],
requireLeader: Boolean): Either[UnifiedLog, Errors] = {
checkCurrentLeaderEpoch(currentLeaderEpoch) match {
case Errors.NONE =>
if (requireLeader && !isLeader) {
Right(Errors.NOT_LEADER_OR_FOLLOWER)
} else {
log match {
case Some(partitionLog) =>
Left(partitionLog)
case _ =>
Right(Errors.NOT_LEADER_OR_FOLLOWER)
}
}
case error =>
Right(error)
}
}
def localLogOrException: UnifiedLog = log.getOrElse {
throw new NotLeaderOrFollowerException(s"Log for partition $topicPartition is not available " +
s"on broker $localBrokerId")
}
def futureLocalLogOrException: UnifiedLog = futureLog.getOrElse {
throw new NotLeaderOrFollowerException(s"Future log for partition $topicPartition is not available " +
s"on broker $localBrokerId")
}
def leaderLogIfLocal: Option[UnifiedLog] = {
log.filter(_ => isLeader)
}
/**
* Returns true if this node is currently leader for the Partition.
*/
def isLeader: Boolean = leaderReplicaIdOpt.contains(localBrokerId)
private def localLogWithEpochOrException(currentLeaderEpoch: Optional[Integer],
requireLeader: Boolean): UnifiedLog = {
getLocalLog(currentLeaderEpoch, requireLeader) match {
case Left(localLog) => localLog
case Right(error) =>
throw error.exception(s"Failed to find ${if (requireLeader) "leader" else ""} log for " +
s"partition $topicPartition with leader epoch $currentLeaderEpoch. The current leader " +
s"is $leaderReplicaIdOpt and the current epoch $leaderEpoch")
}
}
// Visible for testing -- Used by unit tests to set log for this partition
def setLog(log: UnifiedLog, isFutureLog: Boolean): Unit = {
if (isFutureLog)
futureLog = Some(log)
else
this.log = Some(log)
}
/**
* @return the topic ID for the log or None if the log or the topic ID does not exist.
*/
def topicId: Option[Uuid] = {
val log = this.log.orElse(logManager.getLog(topicPartition))
log.flatMap(_.topicId)
}
// remoteReplicas will be called in the hot path, and must be inexpensive
def remoteReplicas: Iterable[Replica] =
remoteReplicasMap.values
def futureReplicaDirChanged(newDestinationDir: String): Boolean = {
inReadLock(leaderIsrUpdateLock) {
futureLog.exists(_.parentDir != newDestinationDir)
}
}
def removeFutureLocalReplica(deleteFromLogDir: Boolean = true): Unit = {
inWriteLock(leaderIsrUpdateLock) {
futureLog = None
if (deleteFromLogDir)
logManager.asyncDelete(topicPartition, isFuture = true)
}
}
// Return true if the future replica exists and it has caught up with the current replica for this partition
// Only ReplicaAlterDirThread will call this method and ReplicaAlterDirThread should remove the partition
// from its partitionStates if this method returns true
def maybeReplaceCurrentWithFutureReplica(): Boolean = {
// lock to prevent the log append by followers while checking if the log dir could be replaced with future log.
futureLogLock.synchronized {
val localReplicaLEO = localLogOrException.logEndOffset
val futureReplicaLEO = futureLog.map(_.logEndOffset)
if (futureReplicaLEO.contains(localReplicaLEO)) {
// The write lock is needed to make sure that while ReplicaAlterDirThread checks the LEO of the
// current replica, no other thread can update LEO of the current replica via log truncation or log append operation.
inWriteLock(leaderIsrUpdateLock) {
futureLog match {
case Some(futurePartitionLog) =>
if (log.exists(_.logEndOffset == futurePartitionLog.logEndOffset)) {
logManager.replaceCurrentWithFutureLog(topicPartition)
log = futureLog
removeFutureLocalReplica(false)
true
} else false
case None =>
// Future replica is removed by a non-ReplicaAlterLogDirsThread before this method is called
// In this case the partition should have been removed from state of the ReplicaAlterLogDirsThread
// Return false so that ReplicaAlterLogDirsThread does not have to remove this partition from the
// state again to avoid race condition
false
}
}
} else false
}
}
/**
* Delete the partition. Note that deleting the partition does not delete the underlying logs.
* The logs are deleted by the ReplicaManager after having deleted the partition.
*/
def delete(): Unit = {
// need to hold the lock to prevent appendMessagesToLeader() from hitting I/O exceptions due to log being deleted
inWriteLock(leaderIsrUpdateLock) {
remoteReplicasMap.clear()
assignmentState = SimpleAssignmentState(Seq.empty)
log = None
futureLog = None
isrState = CommittedIsr(Set.empty)
leaderReplicaIdOpt = None
leaderEpochStartOffsetOpt = None
Partition.removeMetrics(topicPartition)
}
}
def getLeaderEpoch: Int = this.leaderEpoch
def getZkVersion: Int = this.zkVersion
/**
* Make the local replica the leader by resetting LogEndOffset for remote replicas (there could be old LogEndOffset
* from the time when this broker was the leader last time) and setting the new leader and ISR.
* If the leader replica id does not change, return false to indicate the replica manager.
*/
def makeLeader(partitionState: LeaderAndIsrPartitionState,
highWatermarkCheckpoints: OffsetCheckpoints,
topicId: Option[Uuid]): Boolean = {
val (leaderHWIncremented, isNewLeader) = inWriteLock(leaderIsrUpdateLock) {
// record the epoch of the controller that made the leadership decision. This is useful while updating the isr
// to maintain the decision maker controller's epoch in the zookeeper path
controllerEpoch = partitionState.controllerEpoch
val isr = partitionState.isr.asScala.map(_.toInt).toSet
val addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt)
val removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt)
updateAssignmentAndIsr(
assignment = partitionState.replicas.asScala.map(_.toInt),
isr = isr,
addingReplicas = addingReplicas,
removingReplicas = removingReplicas
)
try {
createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId)
} catch {
case e: ZooKeeperClientException =>
stateChangeLogger.error(s"A ZooKeeper client exception has occurred and makeLeader will be skipping the " +
s"state change for the partition $topicPartition with leader epoch: $leaderEpoch ", e)
return false
}
val leaderLog = localLogOrException
val leaderEpochStartOffset = leaderLog.logEndOffset
stateChangeLogger.info(s"Leader $topicPartition starts at leader epoch ${partitionState.leaderEpoch} from " +
s"offset $leaderEpochStartOffset with high watermark ${leaderLog.highWatermark} " +
s"ISR ${isr.mkString("[", ",", "]")} addingReplicas ${addingReplicas.mkString("[", ",", "]")} " +
s"removingReplicas ${removingReplicas.mkString("[", ",", "]")}. Previous leader epoch was $leaderEpoch.")
//We cache the leader epoch here, persisting it only if it's local (hence having a log dir)
leaderEpoch = partitionState.leaderEpoch
leaderEpochStartOffsetOpt = Some(leaderEpochStartOffset)
zkVersion = partitionState.zkVersion
// In the case of successive leader elections in a short time period, a follower may have
// entries in its log from a later epoch than any entry in the new leader's log. In order
// to ensure that these followers can truncate to the right offset, we must cache the new
// leader epoch and the start offset since it should be larger than any epoch that a follower
// would try to query.
leaderLog.maybeAssignEpochStartOffset(leaderEpoch, leaderEpochStartOffset)
val isNewLeader = !isLeader
val curTimeMs = time.milliseconds
// initialize lastCaughtUpTime of replicas as well as their lastFetchTimeMs and lastFetchLeaderLogEndOffset.
remoteReplicas.foreach { replica =>
val lastCaughtUpTimeMs = if (isrState.isr.contains(replica.brokerId)) curTimeMs else 0L
replica.resetLastCaughtUpTime(leaderEpochStartOffset, curTimeMs, lastCaughtUpTimeMs)
}
if (isNewLeader) {
// mark local replica as the leader after converting hw
leaderReplicaIdOpt = Some(localBrokerId)
// reset log end offset for remote replicas
remoteReplicas.foreach { replica =>
replica.updateFetchState(
followerFetchOffsetMetadata = LogOffsetMetadata.UnknownOffsetMetadata,
followerStartOffset = UnifiedLog.UnknownOffset,
followerFetchTimeMs = 0L,
leaderEndOffset = UnifiedLog.UnknownOffset)
}
}
// we may need to increment high watermark since ISR could be down to 1
(maybeIncrementLeaderHW(leaderLog), isNewLeader)
}
// some delayed operations may be unblocked after HW changed
if (leaderHWIncremented)
tryCompleteDelayedRequests()
isNewLeader
}
/**
* Make the local replica the follower by setting the new leader and ISR to empty
* If the leader replica id does not change and the new epoch is equal or one
* greater (that is, no updates have been missed), return false to indicate to the
* replica manager that state is already correct and the become-follower steps can be skipped
*/
def makeFollower(partitionState: LeaderAndIsrPartitionState,
highWatermarkCheckpoints: OffsetCheckpoints,
topicId: Option[Uuid]): Boolean = {
inWriteLock(leaderIsrUpdateLock) {
val newLeaderBrokerId = partitionState.leader
val oldLeaderEpoch = leaderEpoch
// record the epoch of the controller that made the leadership decision. This is useful while updating the isr
// to maintain the decision maker controller's epoch in the zookeeper path
controllerEpoch = partitionState.controllerEpoch
updateAssignmentAndIsr(
assignment = partitionState.replicas.asScala.iterator.map(_.toInt).toSeq,
isr = Set.empty[Int],
addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt),
removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt)
)
try {
createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId)
} catch {
case e: ZooKeeperClientException =>
stateChangeLogger.error(s"A ZooKeeper client exception has occurred. makeFollower will be skipping the " +
s"state change for the partition $topicPartition with leader epoch: $leaderEpoch.", e)
return false
}
val followerLog = localLogOrException
val leaderEpochEndOffset = followerLog.logEndOffset
stateChangeLogger.info(s"Follower $topicPartition starts at leader epoch ${partitionState.leaderEpoch} from " +
s"offset $leaderEpochEndOffset with high watermark ${followerLog.highWatermark}. " +
s"Previous leader epoch was $leaderEpoch.")
leaderEpoch = partitionState.leaderEpoch
leaderEpochStartOffsetOpt = None
zkVersion = partitionState.zkVersion
if (leaderReplicaIdOpt.contains(newLeaderBrokerId) && leaderEpoch == oldLeaderEpoch) {
false
} else {
leaderReplicaIdOpt = Some(newLeaderBrokerId)
true
}
}
}
/**
* Update the follower's state in the leader based on the last fetch request. See
* [[Replica.updateFetchState()]] for details.
*
* @return true if the follower's fetch state was updated, false if the followerId is not recognized
*/
def updateFollowerFetchState(followerId: Int,
followerFetchOffsetMetadata: LogOffsetMetadata,
followerStartOffset: Long,
followerFetchTimeMs: Long,
leaderEndOffset: Long): Boolean = {
getReplica(followerId) match {
case Some(followerReplica) =>
// No need to calculate low watermark if there is no delayed DeleteRecordsRequest
val oldLeaderLW = if (delayedOperations.numDelayedDelete > 0) lowWatermarkIfLeader else -1L
val prevFollowerEndOffset = followerReplica.logEndOffset
followerReplica.updateFetchState(
followerFetchOffsetMetadata,
followerStartOffset,
followerFetchTimeMs,
leaderEndOffset)
val newLeaderLW = if (delayedOperations.numDelayedDelete > 0) lowWatermarkIfLeader else -1L
// check if the LW of the partition has incremented
// since the replica's logStartOffset may have incremented
val leaderLWIncremented = newLeaderLW > oldLeaderLW
// Check if this in-sync replica needs to be added to the ISR.
maybeExpandIsr(followerReplica)
// check if the HW of the partition can now be incremented
// since the replica may already be in the ISR and its LEO has just incremented
val leaderHWIncremented = if (prevFollowerEndOffset != followerReplica.logEndOffset) {
// the leader log may be updated by ReplicaAlterLogDirsThread so the following method must be in lock of
// leaderIsrUpdateLock to prevent adding new hw to invalid log.
inReadLock(leaderIsrUpdateLock) {
leaderLogIfLocal.exists(leaderLog => maybeIncrementLeaderHW(leaderLog, followerFetchTimeMs))
}
} else {
false
}
// some delayed operations may be unblocked after HW or LW changed
if (leaderLWIncremented || leaderHWIncremented)
tryCompleteDelayedRequests()
debug(s"Recorded replica $followerId log end offset (LEO) position " +
s"${followerFetchOffsetMetadata.messageOffset} and log start offset $followerStartOffset.")
true
case None =>
false
}
}
/**
* Stores the topic partition assignment and ISR.
* It creates a new Replica object for any new remote broker. The isr parameter is
* expected to be a subset of the assignment parameter.
*
* Note: public visibility for tests.
*
* @param assignment An ordered sequence of all the broker ids that were assigned to this
* topic partition
* @param isr The set of broker ids that are known to be insync with the leader
* @param addingReplicas An ordered sequence of all broker ids that will be added to the
* assignment
* @param removingReplicas An ordered sequence of all broker ids that will be removed from
* the assignment
*/
def updateAssignmentAndIsr(assignment: Seq[Int],
isr: Set[Int],
addingReplicas: Seq[Int],
removingReplicas: Seq[Int]): Unit = {
val newRemoteReplicas = assignment.filter(_ != localBrokerId)
val removedReplicas = remoteReplicasMap.keys.filter(!newRemoteReplicas.contains(_))
// due to code paths accessing remoteReplicasMap without a lock,
// first add the new replicas and then remove the old ones
newRemoteReplicas.foreach(id => remoteReplicasMap.getAndMaybePut(id, new Replica(id, topicPartition)))
remoteReplicasMap.removeAll(removedReplicas)
if (addingReplicas.nonEmpty || removingReplicas.nonEmpty)
assignmentState = OngoingReassignmentState(addingReplicas, removingReplicas, assignment)
else
assignmentState = SimpleAssignmentState(assignment)
isrState = CommittedIsr(isr)
}
/**
* Check and maybe expand the ISR of the partition.
* A replica will be added to ISR if its LEO >= current hw of the partition and it is caught up to
* an offset within the current leader epoch. A replica must be caught up to the current leader
* epoch before it can join ISR, because otherwise, if there is committed data between current
* leader's HW and LEO, the replica may become the leader before it fetches the committed data
* and the data will be lost.
*
* Technically, a replica shouldn't be in ISR if it hasn't caught up for longer than replicaLagTimeMaxMs,
* even if its log end offset is >= HW. However, to be consistent with how the follower determines
* whether a replica is in-sync, we only check HW.
*
* This function can be triggered when a replica's LEO has incremented.
*/
private def maybeExpandIsr(followerReplica: Replica): Unit = {
val needsIsrUpdate = !isrState.isInflight && canAddReplicaToIsr(followerReplica.brokerId) && inReadLock(leaderIsrUpdateLock) {
needsExpandIsr(followerReplica)
}
if (needsIsrUpdate) {
val alterIsrUpdateOpt = inWriteLock(leaderIsrUpdateLock) {
// check if this replica needs to be added to the ISR
if (!isrState.isInflight && needsExpandIsr(followerReplica)) {
Some(prepareIsrExpand(followerReplica.brokerId))
} else {
None
}
}
// Send the AlterIsr request outside of the LeaderAndIsr lock since the completion logic
// may increment the high watermark (and consequently complete delayed operations).
alterIsrUpdateOpt.foreach(submitAlterIsr)
}
}
private def needsExpandIsr(followerReplica: Replica): Boolean = {
canAddReplicaToIsr(followerReplica.brokerId) && isFollowerAtHighwatermark(followerReplica)
}
private def canAddReplicaToIsr(followerReplicaId: Int): Boolean = {
val current = isrState
!current.isInflight && !current.isr.contains(followerReplicaId)
}
private def isFollowerAtHighwatermark(followerReplica: Replica): Boolean = {
leaderLogIfLocal.exists { leaderLog =>
val followerEndOffset = followerReplica.logEndOffset
followerEndOffset >= leaderLog.highWatermark && leaderEpochStartOffsetOpt.exists(followerEndOffset >= _)
}
}
/*
* Returns a tuple where the first element is a boolean indicating whether enough replicas reached `requiredOffset`
* and the second element is an error (which would be `Errors.NONE` for no error).
*
* Note that this method will only be called if requiredAcks = -1 and we are waiting for all replicas in ISR to be
* fully caught up to the (local) leader's offset corresponding to this produce request before we acknowledge the
* produce request.
*/
def checkEnoughReplicasReachOffset(requiredOffset: Long): (Boolean, Errors) = {
leaderLogIfLocal match {
case Some(leaderLog) =>
// keep the current immutable replica list reference
val curMaximalIsr = isrState.maximalIsr
if (isTraceEnabled) {
def logEndOffsetString: ((Int, Long)) => String = {
case (brokerId, logEndOffset) => s"broker $brokerId: $logEndOffset"
}
val curInSyncReplicaObjects = (curMaximalIsr - localBrokerId).flatMap(getReplica)
val replicaInfo = curInSyncReplicaObjects.map(replica => (replica.brokerId, replica.logEndOffset))
val localLogInfo = (localBrokerId, localLogOrException.logEndOffset)
val (ackedReplicas, awaitingReplicas) = (replicaInfo + localLogInfo).partition { _._2 >= requiredOffset}
trace(s"Progress awaiting ISR acks for offset $requiredOffset: " +
s"acked: ${ackedReplicas.map(logEndOffsetString)}, " +
s"awaiting ${awaitingReplicas.map(logEndOffsetString)}")
}
val minIsr = leaderLog.config.minInSyncReplicas
if (leaderLog.highWatermark >= requiredOffset) {
/*
* The topic may be configured not to accept messages if there are not enough replicas in ISR
* in this scenario the request was already appended locally and then added to the purgatory before the ISR was shrunk
*/
if (minIsr <= curMaximalIsr.size)
(true, Errors.NONE)
else
(true, Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND)
} else
(false, Errors.NONE)
case None =>
(false, Errors.NOT_LEADER_OR_FOLLOWER)
}
}
/**
* Check and maybe increment the high watermark of the partition;
* this function can be triggered when
*
* 1. Partition ISR changed
* 2. Any replica's LEO changed
*
* The HW is determined by the smallest log end offset among all replicas that are in sync or are considered caught-up.
* This way, if a replica is considered caught-up, but its log end offset is smaller than HW, we will wait for this
* replica to catch up to the HW before advancing the HW. This helps the situation when the ISR only includes the
* leader replica and a follower tries to catch up. If we don't wait for the follower when advancing the HW, the
* follower's log end offset may keep falling behind the HW (determined by the leader's log end offset) and therefore
* will never be added to ISR.
*
* With the addition of AlterIsr, we also consider newly added replicas as part of the ISR when advancing
* the HW. These replicas have not yet been committed to the ISR by the controller, so we could revert to the previously
* committed ISR. However, adding additional replicas to the ISR makes it more restrictive and therefor safe. We call
* this set the "maximal" ISR. See KIP-497 for more details
*
* Note There is no need to acquire the leaderIsrUpdate lock here since all callers of this private API acquire that lock
*
* @return true if the HW was incremented, and false otherwise.
*/
private def maybeIncrementLeaderHW(leaderLog: UnifiedLog, curTime: Long = time.milliseconds): Boolean = {
// maybeIncrementLeaderHW is in the hot path, the following code is written to
// avoid unnecessary collection generation
var newHighWatermark = leaderLog.logEndOffsetMetadata
remoteReplicasMap.values.foreach { replica =>
// Note here we are using the "maximal", see explanation above
if (replica.logEndOffsetMetadata.messageOffset < newHighWatermark.messageOffset &&
(curTime - replica.lastCaughtUpTimeMs <= replicaLagTimeMaxMs || isrState.maximalIsr.contains(replica.brokerId))) {
newHighWatermark = replica.logEndOffsetMetadata
}
}
leaderLog.maybeIncrementHighWatermark(newHighWatermark) match {
case Some(oldHighWatermark) =>
debug(s"High watermark updated from $oldHighWatermark to $newHighWatermark")
true
case None =>
def logEndOffsetString: ((Int, LogOffsetMetadata)) => String = {
case (brokerId, logEndOffsetMetadata) => s"replica $brokerId: $logEndOffsetMetadata"
}
if (isTraceEnabled) {
val replicaInfo = remoteReplicas.map(replica => (replica.brokerId, replica.logEndOffsetMetadata)).toSet
val localLogInfo = (localBrokerId, localLogOrException.logEndOffsetMetadata)
trace(s"Skipping update high watermark since new hw $newHighWatermark is not larger than old value. " +
s"All current LEOs are ${(replicaInfo + localLogInfo).map(logEndOffsetString)}")
}
false
}
}
/**
* The low watermark offset value, calculated only if the local replica is the partition leader
* It is only used by leader broker to decide when DeleteRecordsRequest is satisfied. Its value is minimum logStartOffset of all live replicas
* Low watermark will increase when the leader broker receives either FetchRequest or DeleteRecordsRequest.
*/
def lowWatermarkIfLeader: Long = {
if (!isLeader)
throw new NotLeaderOrFollowerException(s"Leader not local for partition $topicPartition on broker $localBrokerId")
// lowWatermarkIfLeader may be called many times when a DeleteRecordsRequest is outstanding,
// care has been taken to avoid generating unnecessary collections in this code
var lowWaterMark = localLogOrException.logStartOffset
remoteReplicas.foreach { replica =>
if (metadataCache.hasAliveBroker(replica.brokerId) && replica.logStartOffset < lowWaterMark) {
lowWaterMark = replica.logStartOffset
}
}
futureLog match {
case Some(partitionFutureLog) =>
Math.min(lowWaterMark, partitionFutureLog.logStartOffset)
case None =>
lowWaterMark
}
}
/**
* Try to complete any pending requests. This should be called without holding the leaderIsrUpdateLock.
*/
private def tryCompleteDelayedRequests(): Unit = delayedOperations.checkAndCompleteAll()
def maybeShrinkIsr(): Unit = {
def needsIsrUpdate: Boolean = {
!isrState.isInflight && inReadLock(leaderIsrUpdateLock) {
needsShrinkIsr()
}
}
if (needsIsrUpdate) {
val alterIsrUpdateOpt = inWriteLock(leaderIsrUpdateLock) {
leaderLogIfLocal.flatMap { leaderLog =>
val outOfSyncReplicaIds = getOutOfSyncReplicas(replicaLagTimeMaxMs)
if (!isrState.isInflight && outOfSyncReplicaIds.nonEmpty) {
val outOfSyncReplicaLog = outOfSyncReplicaIds.map { replicaId =>
val logEndOffsetMessage = getReplica(replicaId)
.map(_.logEndOffset.toString)
.getOrElse("unknown")
s"(brokerId: $replicaId, endOffset: $logEndOffsetMessage)"
}.mkString(" ")
val newIsrLog = (isrState.isr -- outOfSyncReplicaIds).mkString(",")
info(s"Shrinking ISR from ${isrState.isr.mkString(",")} to $newIsrLog. " +
s"Leader: (highWatermark: ${leaderLog.highWatermark}, " +
s"endOffset: ${leaderLog.logEndOffset}). " +
s"Out of sync replicas: $outOfSyncReplicaLog.")
Some(prepareIsrShrink(outOfSyncReplicaIds))
} else {
None
}
}
}
// Send the AlterIsr request outside of the LeaderAndIsr lock since the completion logic
// may increment the high watermark (and consequently complete delayed operations).
alterIsrUpdateOpt.foreach(submitAlterIsr)
}
}
private def needsShrinkIsr(): Boolean = {
leaderLogIfLocal.exists { _ => getOutOfSyncReplicas(replicaLagTimeMaxMs).nonEmpty }
}
private def isFollowerOutOfSync(replicaId: Int,
leaderEndOffset: Long,
currentTimeMs: Long,
maxLagMs: Long): Boolean = {
getReplica(replicaId).fold(true) { followerReplica =>
followerReplica.logEndOffset != leaderEndOffset &&
(currentTimeMs - followerReplica.lastCaughtUpTimeMs) > maxLagMs
}
}
/**
* If the follower already has the same leo as the leader, it will not be considered as out-of-sync,
* otherwise there are two cases that will be handled here -
* 1. Stuck followers: If the leo of the replica hasn't been updated for maxLagMs ms,
* the follower is stuck and should be removed from the ISR
* 2. Slow followers: If the replica has not read up to the leo within the last maxLagMs ms,
* then the follower is lagging and should be removed from the ISR
* Both these cases are handled by checking the lastCaughtUpTimeMs which represents
* the last time when the replica was fully caught up. If either of the above conditions
* is violated, that replica is considered to be out of sync
*
* If an ISR update is in-flight, we will return an empty set here
**/
def getOutOfSyncReplicas(maxLagMs: Long): Set[Int] = {
val current = isrState
if (!current.isInflight) {
val candidateReplicaIds = current.isr - localBrokerId
val currentTimeMs = time.milliseconds()
val leaderEndOffset = localLogOrException.logEndOffset
candidateReplicaIds.filter(replicaId => isFollowerOutOfSync(replicaId, leaderEndOffset, currentTimeMs, maxLagMs))
} else {
Set.empty
}
}
private def doAppendRecordsToFollowerOrFutureReplica(records: MemoryRecords, isFuture: Boolean): Option[LogAppendInfo] = {
if (isFuture) {
// The read lock is needed to handle race condition if request handler thread tries to
// remove future replica after receiving AlterReplicaLogDirsRequest.
inReadLock(leaderIsrUpdateLock) {
// Note the replica may be undefined if it is removed by a non-ReplicaAlterLogDirsThread before
// this method is called
futureLog.map { _.appendAsFollower(records) }
}
} else {
// The lock is needed to prevent the follower replica from being updated while ReplicaAlterDirThread
// is executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica.
futureLogLock.synchronized {
Some(localLogOrException.appendAsFollower(records))
}
}
}
def appendRecordsToFollowerOrFutureReplica(records: MemoryRecords, isFuture: Boolean): Option[LogAppendInfo] = {
try {
doAppendRecordsToFollowerOrFutureReplica(records, isFuture)
} catch {
case e: UnexpectedAppendOffsetException =>
val log = if (isFuture) futureLocalLogOrException else localLogOrException
val logEndOffset = log.logEndOffset
if (logEndOffset == log.logStartOffset &&
e.firstOffset < logEndOffset && e.lastOffset >= logEndOffset) {
// This may happen if the log start offset on the leader (or current replica) falls in
// the middle of the batch due to delete records request and the follower tries to
// fetch its first offset from the leader.
// We handle this case here instead of Log#append() because we will need to remove the
// segment that start with log start offset and create a new one with earlier offset
// (base offset of the batch), which will move recoveryPoint backwards, so we will need
// to checkpoint the new recovery point before we append
val replicaName = if (isFuture) "future replica" else "follower"
info(s"Unexpected offset in append to $topicPartition. First offset ${e.firstOffset} is less than log start offset ${log.logStartOffset}." +
s" Since this is the first record to be appended to the $replicaName's log, will start the log from offset ${e.firstOffset}.")
truncateFullyAndStartAt(e.firstOffset, isFuture)
doAppendRecordsToFollowerOrFutureReplica(records, isFuture)
} else
throw e
}
}
def appendRecordsToLeader(records: MemoryRecords, origin: AppendOrigin, requiredAcks: Int,
requestLocal: RequestLocal): LogAppendInfo = {
val (info, leaderHWIncremented) = inReadLock(leaderIsrUpdateLock) {
leaderLogIfLocal match {
case Some(leaderLog) =>
val minIsr = leaderLog.config.minInSyncReplicas
val inSyncSize = isrState.isr.size
// Avoid writing to leader if there are not enough insync replicas to make it safe
if (inSyncSize < minIsr && requiredAcks == -1) {
throw new NotEnoughReplicasException(s"The size of the current ISR ${isrState.isr} " +
s"is insufficient to satisfy the min.isr requirement of $minIsr for partition $topicPartition")
}
val info = leaderLog.appendAsLeader(records, leaderEpoch = this.leaderEpoch, origin,
interBrokerProtocolVersion, requestLocal)
// we may need to increment high watermark since ISR could be down to 1
(info, maybeIncrementLeaderHW(leaderLog))
case None =>
throw new NotLeaderOrFollowerException("Leader not local for partition %s on broker %d"
.format(topicPartition, localBrokerId))
}
}
info.copy(leaderHwChange = if (leaderHWIncremented) LeaderHwChange.Increased else LeaderHwChange.Same)
}
def readRecords(lastFetchedEpoch: Optional[Integer],
fetchOffset: Long,
currentLeaderEpoch: Optional[Integer],
maxBytes: Int,
fetchIsolation: FetchIsolation,
fetchOnlyFromLeader: Boolean,
minOneMessage: Boolean): LogReadInfo = inReadLock(leaderIsrUpdateLock) {
// decide whether to only fetch from leader
val localLog = localLogWithEpochOrException(currentLeaderEpoch, fetchOnlyFromLeader)
// Note we use the log end offset prior to the read. This ensures that any appends following
// the fetch do not prevent a follower from coming into sync.
val initialHighWatermark = localLog.highWatermark
val initialLogStartOffset = localLog.logStartOffset
val initialLogEndOffset = localLog.logEndOffset
val initialLastStableOffset = localLog.lastStableOffset
lastFetchedEpoch.ifPresent { fetchEpoch =>
val epochEndOffset = lastOffsetForLeaderEpoch(currentLeaderEpoch, fetchEpoch, fetchOnlyFromLeader = false)
val error = Errors.forCode(epochEndOffset.errorCode)
if (error != Errors.NONE) {
throw error.exception()
}
if (epochEndOffset.endOffset == UNDEFINED_EPOCH_OFFSET || epochEndOffset.leaderEpoch == UNDEFINED_EPOCH) {
throw new OffsetOutOfRangeException("Could not determine the end offset of the last fetched epoch " +
s"$lastFetchedEpoch from the request")
}
// If fetch offset is less than log start, fail with OffsetOutOfRangeException, regardless of whether epochs are diverging
if (fetchOffset < initialLogStartOffset) {
throw new OffsetOutOfRangeException(s"Received request for offset $fetchOffset for partition $topicPartition, " +
s"but we only have log segments in the range $initialLogStartOffset to $initialLogEndOffset.")
}
if (epochEndOffset.leaderEpoch < fetchEpoch || epochEndOffset.endOffset < fetchOffset) {
val emptyFetchData = FetchDataInfo(
fetchOffsetMetadata = LogOffsetMetadata(fetchOffset),
records = MemoryRecords.EMPTY,
abortedTransactions = None
)
val divergingEpoch = new FetchResponseData.EpochEndOffset()
.setEpoch(epochEndOffset.leaderEpoch)
.setEndOffset(epochEndOffset.endOffset)
return LogReadInfo(
fetchedData = emptyFetchData,
divergingEpoch = Some(divergingEpoch),
highWatermark = initialHighWatermark,
logStartOffset = initialLogStartOffset,
logEndOffset = initialLogEndOffset,
lastStableOffset = initialLastStableOffset)
}
}
val fetchedData = localLog.read(fetchOffset, maxBytes, fetchIsolation, minOneMessage)
LogReadInfo(
fetchedData = fetchedData,
divergingEpoch = None,
highWatermark = initialHighWatermark,
logStartOffset = initialLogStartOffset,
logEndOffset = initialLogEndOffset,
lastStableOffset = initialLastStableOffset)
}
def fetchOffsetForTimestamp(timestamp: Long,
isolationLevel: Option[IsolationLevel],
currentLeaderEpoch: Optional[Integer],
fetchOnlyFromLeader: Boolean): Option[TimestampAndOffset] = inReadLock(leaderIsrUpdateLock) {
// decide whether to only fetch from leader
val localLog = localLogWithEpochOrException(currentLeaderEpoch, fetchOnlyFromLeader)
val lastFetchableOffset = isolationLevel match {
case Some(IsolationLevel.READ_COMMITTED) => localLog.lastStableOffset
case Some(IsolationLevel.READ_UNCOMMITTED) => localLog.highWatermark
case None => localLog.logEndOffset
}
val epochLogString = if (currentLeaderEpoch.isPresent) {
s"epoch ${currentLeaderEpoch.get}"
} else {
"unknown epoch"
}
// Only consider throwing an error if we get a client request (isolationLevel is defined) and the start offset
// is lagging behind the high watermark
val maybeOffsetsError: Option[ApiException] = leaderEpochStartOffsetOpt
.filter(epochStart => isolationLevel.isDefined && epochStart > localLog.highWatermark)
.map(epochStart => Errors.OFFSET_NOT_AVAILABLE.exception(s"Failed to fetch offsets for " +
s"partition $topicPartition with leader $epochLogString as this partition's " +
s"high watermark (${localLog.highWatermark}) is lagging behind the " +
s"start offset from the beginning of this epoch ($epochStart)."))
def getOffsetByTimestamp: Option[TimestampAndOffset] = {
logManager.getLog(topicPartition).flatMap(log => log.fetchOffsetByTimestamp(timestamp))
}
// If we're in the lagging HW state after a leader election, throw OffsetNotAvailable for "latest" offset
// or for a timestamp lookup that is beyond the last fetchable offset.
timestamp match {
case ListOffsetsRequest.LATEST_TIMESTAMP =>
maybeOffsetsError.map(e => throw e)
.orElse(Some(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, lastFetchableOffset, Optional.of(leaderEpoch))))
case ListOffsetsRequest.EARLIEST_TIMESTAMP =>
getOffsetByTimestamp
case _ =>
getOffsetByTimestamp.filter(timestampAndOffset => timestampAndOffset.offset < lastFetchableOffset)
.orElse(maybeOffsetsError.map(e => throw e))
}
}
def activeProducerState: DescribeProducersResponseData.PartitionResponse = {
val producerState = new DescribeProducersResponseData.PartitionResponse()
.setPartitionIndex(topicPartition.partition())
log.map(_.activeProducers) match {
case Some(producers) =>
producerState
.setErrorCode(Errors.NONE.code)
.setActiveProducers(producers.asJava)
case None =>
producerState
.setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code)
}
producerState
}
def fetchOffsetSnapshot(currentLeaderEpoch: Optional[Integer],
fetchOnlyFromLeader: Boolean): LogOffsetSnapshot = inReadLock(leaderIsrUpdateLock) {
// decide whether to only fetch from leader
val localLog = localLogWithEpochOrException(currentLeaderEpoch, fetchOnlyFromLeader)
localLog.fetchOffsetSnapshot
}
def legacyFetchOffsetsForTimestamp(timestamp: Long,
maxNumOffsets: Int,
isFromConsumer: Boolean,
fetchOnlyFromLeader: Boolean): Seq[Long] = inReadLock(leaderIsrUpdateLock) {
val localLog = localLogWithEpochOrException(Optional.empty(), fetchOnlyFromLeader)
val allOffsets = localLog.legacyFetchOffsetsBefore(timestamp, maxNumOffsets)
if (!isFromConsumer) {
allOffsets
} else {
val hw = localLog.highWatermark
if (allOffsets.exists(_ > hw))
hw +: allOffsets.dropWhile(_ > hw)
else
allOffsets
}
}
def logStartOffset: Long = {
inReadLock(leaderIsrUpdateLock) {
leaderLogIfLocal.map(_.logStartOffset).getOrElse(-1)
}
}
/**
* Update logStartOffset and low watermark if 1) offset <= highWatermark and 2) it is the leader replica.
* This function can trigger log segment deletion and log rolling.
*
* Return low watermark of the partition.
*/
def deleteRecordsOnLeader(offset: Long): LogDeleteRecordsResult = inReadLock(leaderIsrUpdateLock) {
leaderLogIfLocal match {
case Some(leaderLog) =>
if (!leaderLog.config.delete)
throw new PolicyViolationException(s"Records of partition $topicPartition can not be deleted due to the configured policy")
val convertedOffset = if (offset == DeleteRecordsRequest.HIGH_WATERMARK)
leaderLog.highWatermark
else
offset
if (convertedOffset < 0)
throw new OffsetOutOfRangeException(s"The offset $convertedOffset for partition $topicPartition is not valid")
leaderLog.maybeIncrementLogStartOffset(convertedOffset, ClientRecordDeletion)
LogDeleteRecordsResult(
requestedOffset = convertedOffset,
lowWatermark = lowWatermarkIfLeader)
case None =>
throw new NotLeaderOrFollowerException(s"Leader not local for partition $topicPartition on broker $localBrokerId")
}
}
/**
* Truncate the local log of this partition to the specified offset and checkpoint the recovery point to this offset
*
* @param offset offset to be used for truncation
* @param isFuture True iff the truncation should be performed on the future log of this partition
*/
def truncateTo(offset: Long, isFuture: Boolean): Unit = {
// The read lock is needed to prevent the follower replica from being truncated while ReplicaAlterDirThread
// is executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica.
inReadLock(leaderIsrUpdateLock) {
logManager.truncateTo(Map(topicPartition -> offset), isFuture = isFuture)
}
}
/**
* Delete all data in the local log of this partition and start the log at the new offset
*
* @param newOffset The new offset to start the log with
* @param isFuture True iff the truncation should be performed on the future log of this partition
*/
def truncateFullyAndStartAt(newOffset: Long, isFuture: Boolean): Unit = {
// The read lock is needed to prevent the follower replica from being truncated while ReplicaAlterDirThread
// is executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica.
inReadLock(leaderIsrUpdateLock) {
logManager.truncateFullyAndStartAt(topicPartition, newOffset, isFuture = isFuture)
}
}
/**
* Find the (exclusive) last offset of the largest epoch less than or equal to the requested epoch.
*
* @param currentLeaderEpoch The expected epoch of the current leader (if known)
* @param leaderEpoch Requested leader epoch
* @param fetchOnlyFromLeader Whether or not to require servicing only from the leader
*
* @return The requested leader epoch and the end offset of this leader epoch, or if the requested
* leader epoch is unknown, the leader epoch less than the requested leader epoch and the end offset
* of this leader epoch. The end offset of a leader epoch is defined as the start
* offset of the first leader epoch larger than the leader epoch, or else the log end
* offset if the leader epoch is the latest leader epoch.
*/
def lastOffsetForLeaderEpoch(currentLeaderEpoch: Optional[Integer],
leaderEpoch: Int,
fetchOnlyFromLeader: Boolean): EpochEndOffset = {
inReadLock(leaderIsrUpdateLock) {
val localLogOrError = getLocalLog(currentLeaderEpoch, fetchOnlyFromLeader)
localLogOrError match {
case Left(localLog) =>
localLog.endOffsetForEpoch(leaderEpoch) match {
case Some(epochAndOffset) => new EpochEndOffset()
.setPartition(partitionId)
.setErrorCode(Errors.NONE.code)
.setLeaderEpoch(epochAndOffset.leaderEpoch)
.setEndOffset(epochAndOffset.offset)
case None => new EpochEndOffset()
.setPartition(partitionId)
.setErrorCode(Errors.NONE.code)
}
case Right(error) => new EpochEndOffset()
.setPartition(partitionId)
.setErrorCode(error.code)
}
}
}
private def prepareIsrExpand(newInSyncReplicaId: Int): PendingExpandIsr = {
// When expanding the ISR, we assume that the new replica will make it into the ISR
// before we receive confirmation that it has. This ensures that the HW will already
// reflect the updated ISR even if there is a delay before we receive the confirmation.
// Alternatively, if the update fails, no harm is done since the expanded ISR puts
// a stricter requirement for advancement of the HW.
val isrToSend = isrState.isr + newInSyncReplicaId
val newLeaderAndIsr = new LeaderAndIsr(localBrokerId, leaderEpoch, isrToSend.toList, zkVersion)
val updatedState = PendingExpandIsr(isrState.isr, newInSyncReplicaId, newLeaderAndIsr)
isrState = updatedState
updatedState
}
private[cluster] def prepareIsrShrink(outOfSyncReplicaIds: Set[Int]): PendingShrinkIsr = {
// When shrinking the ISR, we cannot assume that the update will succeed as this could
// erroneously advance the HW if the `AlterIsr` were to fail. Hence the "maximal ISR"
// for `PendingShrinkIsr` is the the current ISR.
val isrToSend = isrState.isr -- outOfSyncReplicaIds
val newLeaderAndIsr = new LeaderAndIsr(localBrokerId, leaderEpoch, isrToSend.toList, zkVersion)
val updatedState = PendingShrinkIsr(isrState.isr, outOfSyncReplicaIds, newLeaderAndIsr)
isrState = updatedState
updatedState
}
private def submitAlterIsr(proposedIsrState: PendingIsrChange): CompletableFuture[LeaderAndIsr] = {
debug(s"Submitting ISR state change $proposedIsrState")
val future = alterIsrManager.submit(topicPartition, proposedIsrState.sentLeaderAndIsr, controllerEpoch)
future.whenComplete { (leaderAndIsr, e) =>
var hwIncremented = false
var shouldRetry = false
inWriteLock(leaderIsrUpdateLock) {
if (isrState != proposedIsrState) {
// This means isrState was updated through leader election or some other mechanism
// before we got the AlterIsr response. We don't know what happened on the controller
// exactly, but we do know this response is out of date so we ignore it.
debug(s"Ignoring failed ISR update to $proposedIsrState since we have already " +
s"updated state to $isrState")
} else if (leaderAndIsr != null) {
hwIncremented = handleAlterIsrUpdate(proposedIsrState, leaderAndIsr)
} else {
shouldRetry = handleAlterIsrError(proposedIsrState, Errors.forException(e))
}
}
if (hwIncremented) {
tryCompleteDelayedRequests()
}
// Send the AlterIsr request outside of the LeaderAndIsr lock since the completion logic
// may increment the high watermark (and consequently complete delayed operations).
if (shouldRetry) {
submitAlterIsr(proposedIsrState)
}
}
}
/**
* Handle a failed `AlterIsr` request. For errors which are non-retriable, we simply give up.
* This leaves [[Partition.isrState]] in a pending state. Since the error was non-retriable,
* we are okay staying in this state until we see new metadata from LeaderAndIsr (or an update
* to the KRaft metadata log).
*
* @param proposedIsrState The ISR state change that was requested
* @param error The error returned from [[AlterIsrManager]]
* @return true if the `AlterIsr` request should be retried, false otherwise
*/
private def handleAlterIsrError(
proposedIsrState: PendingIsrChange,
error: Errors
): Boolean = {
isrChangeListener.markFailed()
error match {
case Errors.OPERATION_NOT_ATTEMPTED =>
// Since the operation was not attempted, it is safe to reset back to the committed state.
isrState = CommittedIsr(proposedIsrState.isr)
debug(s"Failed to update ISR to $proposedIsrState since there is a pending ISR update still inflight. " +
s"ISR state has been reset to the latest committed state $isrState")
false
case Errors.UNKNOWN_TOPIC_OR_PARTITION =>
debug(s"Failed to update ISR to $proposedIsrState since the controller doesn't know about " +
"this topic or partition. Giving up.")
false
case Errors.FENCED_LEADER_EPOCH =>
debug(s"Failed to update ISR to $proposedIsrState since the leader epoch is old. Giving up.")
false
case Errors.INVALID_UPDATE_VERSION =>
debug(s"Failed to update ISR to $proposedIsrState because the version is invalid. Giving up.")
false
case _ =>
warn(s"Failed to update ISR to $proposedIsrState due to unexpected $error. Retrying.")
true
}
}
/**
* Handle a successful `AlterIsr` response.
*
* @param proposedIsrState The ISR state change that was requested
* @param leaderAndIsr The updated LeaderAndIsr state
* @return true if the high watermark was successfully incremented following, false otherwise
*/
private def handleAlterIsrUpdate(
proposedIsrState: PendingIsrChange,
leaderAndIsr: LeaderAndIsr
): Boolean = {
// Success from controller, still need to check a few things
if (leaderAndIsr.leaderEpoch != leaderEpoch) {
debug(s"Ignoring new ISR $leaderAndIsr since we have a stale leader epoch $leaderEpoch.")
isrChangeListener.markFailed()
false
} else if (leaderAndIsr.zkVersion < zkVersion) {
debug(s"Ignoring new ISR $leaderAndIsr since we have a newer version $zkVersion.")
isrChangeListener.markFailed()
false
} else {
// This is one of two states:
// 1) leaderAndIsr.zkVersion > zkVersion: Controller updated to new version with proposedIsrState.
// 2) leaderAndIsr.zkVersion == zkVersion: No update was performed since proposed and actual state are the same.
// In both cases, we want to move from Pending to Committed state to ensure new updates are processed.
isrState = CommittedIsr(leaderAndIsr.isr.toSet)
zkVersion = leaderAndIsr.zkVersion
info(s"ISR updated to ${isrState.isr.mkString(",")} and version updated to $zkVersion")
proposedIsrState match {
case PendingExpandIsr(_, _, _) => isrChangeListener.markExpand()
case PendingShrinkIsr(_, _, _) => isrChangeListener.markShrink()
}
// we may need to increment high watermark since ISR could be down to 1
leaderLogIfLocal.exists(log => maybeIncrementLeaderHW(log))
}
}
override def equals(that: Any): Boolean = that match {
case other: Partition => partitionId == other.partitionId && topic == other.topic
case _ => false
}
override def hashCode: Int =
31 + topic.hashCode + 17 * partitionId
override def toString: String = {
val partitionString = new StringBuilder
partitionString.append("Topic: " + topic)
partitionString.append("; Partition: " + partitionId)
partitionString.append("; Leader: " + leaderReplicaIdOpt)
partitionString.append("; Replicas: " + assignmentState.replicas.mkString(","))
partitionString.append("; ISR: " + isrState.isr.mkString(","))
assignmentState match {
case OngoingReassignmentState(adding, removing, _) =>
partitionString.append("; AddingReplicas: " + adding.mkString(","))
partitionString.append("; RemovingReplicas: " + removing.mkString(","))
case _ =>
}
partitionString.toString
}
}
| TiVo/kafka | core/src/main/scala/kafka/cluster/Partition.scala | Scala | apache-2.0 | 67,163 |
package io.buoyant.router.h2
import com.twitter.conversions.time._
import com.twitter.finagle.buoyant.h2.service.{H2Classifier, H2ReqRep, H2ReqRepFrame}
import com.twitter.finagle.buoyant.h2._
import com.twitter.finagle.service.{ResponseClass, RetryBudget}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.util._
import scala.{Stream => SStream}
/**
* The ClassifiedRetryFilter uses BufferedStreams to implement retries. The request stream is
* buffered and a child stream is sent to the service. This is done so that if it becomes
* necessary to retry, a new child of the request stream can be created.
*
* The response stream is also buffered and held until either the response stream completes
* or the response buffer becomes full. If the response stream completes, we use the provided
* ResponseClassifier to determine if the request should be retried. If so, we discard the
* response stream, fork a new child of the request stream, and send the new request stream to
* the service. If not, we return the response stream to the caller.
*/
class ClassifiedRetryFilter(
stats: StatsReceiver,
classifier: H2Classifier,
backoffs: SStream[Duration],
budget: RetryBudget,
classificationTimeout: Duration = Duration.Top,
requestBufferSize: Long = ClassifiedRetryFilter.DefaultBufferSize,
responseBufferSize: Long = ClassifiedRetryFilter.DefaultBufferSize
)(implicit timer: Timer) extends SimpleFilter[Request, Response] {
private[h2] val retriesStat = stats.scope("retries").stat("per_request")
private[h2] val totalRetries = stats.scope("retries").counter("total")
private[h2] val budgetExhausted =
stats.scope("retries").counter("budget_exhausted")
private[h2] val backoffsExhausted =
stats.scope("retries").counter("backoffs_exhausted")
private[h2] val requestStreamTooLong =
stats.scope("retries").counter("request_stream_too_long")
private[h2] val responseStreamTooLong =
stats.scope("retries").counter("response_stream_too_long")
private[h2] val classificationTimeoutCounter =
stats.scope("retries").counter("classification_timeout")
private[h2] val budgetGauge = stats.scope("retries").addGauge("budget") { budget.balance }
private[this] val responseClassifier = classifier.responseClassifier.lift
override def apply(
request: Request,
service: Service[Request, Response]
): Future[Response] = {
budget.deposit()
// Buffer the request stream so that we can fork another child stream if we need to retry.
val requestBuffer = new BufferedStream(request.stream, requestBufferSize)
val fork = Future.const(requestBuffer.fork())
def dispatch(reqStream: Stream, backoffs: SStream[Duration], count: Int): Future[Response] = {
val req = Request(request.headers.dup(), reqStream)
// Attempt to retry.
// If a retry is not possible because the request buffer has been discarded or we have run
// out of retries.
@inline def retry(orElse: => Future[Response], onRetry: => Unit = ()): Future[Response] = {
requestBuffer.fork() match {
case Return(s) =>
backoffs match {
case pause #:: rest =>
if (budget.tryWithdraw()) {
// Retry!
onRetry
totalRetries.incr()
schedule(pause)(dispatch(s, rest, count + 1))
} else {
// Not enough retry budget to retry.
budgetExhausted.incr()
consumeAll(s)
orElse
}
case _ =>
// We ran out of retries.
backoffsExhausted.incr()
consumeAll(s)
orElse
}
case Throw(e) =>
// We could not create a new child request stream so just return the response stream.
requestStreamTooLong.incr()
orElse
}
}
service(req).flatMap { rsp =>
// Buffer the response stream so that we can attempt to classify it before returning
// or discarding it.
val responseBuffer = new BufferedStream(rsp.stream, responseBufferSize)
// We eagerly create a child response stream since we need something to return in case we
// don't want to (or can't) retry.
val responseStream = responseBuffer.fork()
// Discard the buffers and return the current response stream.
@inline def discardAndReturn(): Future[Response] = {
retriesStat.add(count)
requestBuffer.discardBuffer()
responseBuffer.discardBuffer()
Future.const(responseStream).map(Response(rsp.headers, _))
}
// We will retry so discard the current response stream and response stream buffer.
@inline def discardResponse(): Unit = {
responseBuffer.discardBuffer()
responseStream.foreach { rs => consumeAll(rs); () }
}
// Attempt early classification before the stream is complete.
responseClassifier(H2ReqRep(req, Return(rsp))) match {
case Some(ResponseClass.Successful(_) | ResponseClass.Failed(false)) =>
discardAndReturn()
case Some(ResponseClass.Failed(true)) =>
// Request is retryable, attempt to create a new child request stream.
retry(orElse = discardAndReturn(), onRetry = discardResponse())
case None =>
// Unable to classify at this time. Next try to classify based on the stream.
retryable(req, rsp, responseBuffer).flatMap { retryable =>
if (retryable) {
// Request is retryable, attempt to create a new child request stream.
retry(orElse = discardAndReturn(), onRetry = discardResponse())
} else {
// Request is not retryable so just return the response stream.
discardAndReturn()
}
}
}
}.rescue {
case e if responseClassifier(H2ReqRep(req, Throw(e))).contains(ResponseClass.RetryableFailure) =>
// Request is retryable, attempt to create a new child request stream.
retry(orElse = { retriesStat.add(count); Future.exception(e) })
}
}
fork.flatMap(dispatch(_, backoffs, 0))
}
@inline
private[this] def schedule(d: Duration)(f: => Future[Response]) = {
if (d > 0.seconds) {
val promise = new Promise[Response]
timer.schedule(Time.now + d) {
promise.become(f)
}
promise
} else f
}
private[this] def retryable(req: Request, rsp: Response, responseBuffer: BufferedStream): Future[Boolean] = {
// Create a child response stream for the sole purpose of getting the last frame for
// response classification and determine if the request is retryable.
responseBuffer.fork() match {
case Return(s) =>
// Attempt to determine retryability based on the final frame. Completes when the stream is
// fully buffered.
val fullyBuffered = retryable(req, rsp, s)
.raiseWithin(classificationTimeout)
.handle {
case _: TimeoutException =>
classificationTimeoutCounter.incr()
false
}
// If the buffer is discarded before reading the final frame, we cannot retry.
val bufferDiscarded = responseBuffer.onBufferDiscarded.map(_ => false)
// Wait until the response stream is fully buffered or the buffer becomes full.
Future.selectIndex(IndexedSeq(fullyBuffered, bufferDiscarded)).flatMap {
case 0 =>
fullyBuffered
case 1 =>
responseStreamTooLong.incr()
bufferDiscarded
}
case Throw(_) =>
responseStreamTooLong.incr()
Future.False
}
}
/**
* Determine if the request is retryable. Will read and release the entire response stream
* as a side effect.
*/
private[this] def retryable(req: Request, rsp: Response, responseStream: Stream): Future[Boolean] = {
consumeAllButLast(responseStream).transform {
case Return(Some(f)) =>
val rc = classifier.streamClassifier(H2ReqRepFrame(req, Return(rsp, Some(Return(f)))))
val canRetry = rc == ResponseClass.RetryableFailure
f.release()
Future.value(canRetry)
case Return(None) =>
val rc = classifier.streamClassifier(H2ReqRepFrame(req, Return(rsp, None)))
val canRetry = rc == ResponseClass.RetryableFailure
Future.value(canRetry)
case Throw(e) =>
val rc = classifier.streamClassifier(H2ReqRepFrame(req, Return(rsp, Some(Throw(e)))))
val canRetry = rc == ResponseClass.RetryableFailure
Future.value(canRetry)
}
}
/** Read and release all frames from the Stream */
private[this] def consumeAll(stream: Stream): Future[Unit] = {
consumeAllButLast(stream).map {
case Some(frame) =>
frame.release(); ()
case None => ()
}
}
/** Read and release all but the last frame from a Stream. Then return the last frame */
private[this] def consumeAllButLast(stream: Stream): Future[Option[Frame]] =
if (stream.isEmpty) {
Future.None
} else {
stream.read().flatMap { frame =>
if (frame.isEnd) {
Future.value(Some(frame))
} else {
frame.release()
consumeAllButLast(stream)
}
}
}
}
object ClassifiedRetryFilter {
val DefaultBufferSize = 65535
}
| denverwilliams/linkerd | router/h2/src/main/scala/io/buoyant/router/h2/ClassifiedRetryFilter.scala | Scala | apache-2.0 | 9,585 |
package scuff
import org.junit._, Assert._
import scala.collection.immutable.HashMap
class TestMonoid {
@Test
def sum(): Unit = {
implicit val ms = Monoid.Sum[Int]
val list: List[Int] = 51 :: 17 :: 99 :: Nil
val sum = ms reduce list
assertEquals(51 + 17 + 99, sum)
}
@Test
def product(): Unit = {
val m = Monoid.Product[Int]
val list: List[Int] = 51 :: 17 :: 99 :: Nil
val prod = list.foldLeft(m.identity)(m.op)
assertEquals(51 * 17 * 99, prod)
}
@Test
def `Sum HashMap values`(): Unit = {
val hm1 = HashMap[String, Long]("a" -> 22, "b" -> 77, "c" -> 111)
val hm2 = HashMap[String, Long]("c" -> 9, "d" -> 5, "e" -> 0)
val expected = HashMap[String, Long]("a" -> 22, "b" -> 77, "c" -> (111 + 9), "d" -> 5, "e" -> 0)
val actual1 = Monoid.HashMap[String, Long](_ + _).op(hm1, hm2)
assertEquals(expected, actual1)
val actual2 = Monoid.HashMap[String, Long](Monoid.Sum[Long].op).op(hm1, hm2)
assertEquals(expected, actual2)
}
}
| nilskp/scuff | src/test/scala/scuff/TestMonoid.scala | Scala | mit | 1,003 |
package com.outr.arango.api
import com.outr.arango.api.model._
import io.youi.client.HttpClient
import io.youi.http.HttpMethod
import io.youi.net._
import io.circe.Json
import scala.concurrent.{ExecutionContext, Future}
object APISimpleWithin {
def put(client: HttpClient, body: PutAPISimpleWithin)(implicit ec: ExecutionContext): Future[Json] = client
.method(HttpMethod.Put)
.path(path"/_api/simple/within", append = true)
.restful[PutAPISimpleWithin, Json](body)
} | outr/arangodb-scala | api/src/main/scala/com/outr/arango/api/APISimpleWithin.scala | Scala | mit | 491 |
package com.kifi.slack.models
import java.util.UUID
import com.kifi.slack.models.SlackUserPresenceState.{ Away, Active }
import org.joda.time.DateTime
import play.api.libs.functional.syntax._
import play.api.libs.json._
case class SlackAuthScope(value: String)
object SlackAuthScope {
val Identify = SlackAuthScope("identify")
val Bot = SlackAuthScope("bot")
val Commands = SlackAuthScope("commands")
val ChannelsWrite = SlackAuthScope("channels:write")
val ChannelsHistory = SlackAuthScope("channels:history")
val ChannelsRead = SlackAuthScope("channels:read")
val ChatWrite = SlackAuthScope("chat:write")
val ChatWriteBot = SlackAuthScope("chat:write:bot")
val ChatWriteUser = SlackAuthScope("chat:write:user")
val EmojiRead = SlackAuthScope("emoji:read")
val FilesWriteUser = SlackAuthScope("files:write:user")
val FilesRead = SlackAuthScope("files:read")
val GroupsWrite = SlackAuthScope("groups:write")
val GroupsHistory = SlackAuthScope("groups:history")
val GroupsRead = SlackAuthScope("groups:read")
val IncomingWebhook = SlackAuthScope("incoming-webhook")
val ImWrite = SlackAuthScope("im:write")
val ImHistory = SlackAuthScope("im:history")
val ImRead = SlackAuthScope("im:read")
val MpimWrite = SlackAuthScope("mpim:write")
val MpimHistory = SlackAuthScope("mpim:history")
val MpimRead = SlackAuthScope("mpim:read")
val PinsWrite = SlackAuthScope("pins:write")
val PinsRead = SlackAuthScope("pins:read")
val ReactionsWrite = SlackAuthScope("reactions:write")
val ReactionsRead = SlackAuthScope("reactions:read")
val SearchRead = SlackAuthScope("search:read")
val StarsWrite = SlackAuthScope("stars:write")
val StarsRead = SlackAuthScope("stars:read")
val TeamRead = SlackAuthScope("team:read")
val UsersRead = SlackAuthScope("users:read")
val UsersWrite = SlackAuthScope("users:write")
// scopes covering APIs that can use a human-user token or bot-user token transparently
val inheritableBotScopes: Set[SlackAuthScope] = Set(Bot, UsersRead, TeamRead, ChannelsRead)
def setFromString(str: String): Set[SlackAuthScope] = str.split(',').filter(_.nonEmpty).map(SlackAuthScope(_)).toSet
def stringifySet(scopes: Set[SlackAuthScope]) = scopes.map(_.value).mkString(",")
val slackFormat: Format[Set[SlackAuthScope]] = Format(
Reads { j => j.validate[String].map(setFromString) },
Writes { scopes => JsString(stringifySet(scopes)) }
)
}
case class SlackAuthorizationCode(code: String)
case class SlackAuthState(state: String)
object SlackAuthState {
def apply(): SlackAuthState = SlackAuthState(UUID.randomUUID().toString)
}
case class SlackAuthorizationRequest(
url: String,
scopes: Set[SlackAuthScope],
uniqueToken: String,
redirectUri: Option[String])
case class SlackBotUserAuthorization(
userId: SlackUserId,
accessToken: SlackBotAccessToken)
object SlackBotUserAuthorization {
implicit val reads: Reads[SlackBotUserAuthorization] = (
(__ \\ 'bot_user_id).read[SlackUserId] and
(__ \\ 'bot_access_token).read[SlackBotAccessToken]
)(SlackBotUserAuthorization.apply _)
}
sealed trait SlackAuthorizationResponse {
def accessToken: SlackUserAccessToken
def scopes: Set[SlackAuthScope]
def teamId: SlackTeamId
}
object SlackAuthorizationResponse {
implicit val reads: Reads[SlackAuthorizationResponse] = {
SlackAppAuthorizationResponse.reads.map[SlackAuthorizationResponse](identity) orElse
SlackIdentityAuthorizationResponse.reads.map[SlackAuthorizationResponse](identity)
}
}
case class SlackIdentityAuthorizationResponse(
accessToken: SlackUserAccessToken,
scopes: Set[SlackAuthScope],
teamId: SlackTeamId,
userId: SlackUserId,
userFullName: String) extends SlackAuthorizationResponse
object SlackIdentityAuthorizationResponse {
val reads: Reads[SlackIdentityAuthorizationResponse] = (
(__ \\ 'access_token).read[SlackUserAccessToken] and
(__ \\ 'scope).read[Set[SlackAuthScope]](SlackAuthScope.slackFormat) and
(__ \\ 'team \\ 'id).read[SlackTeamId] and
(__ \\ 'user \\ 'id).read[SlackUserId] and
(__ \\ 'user \\ 'name).read[String]
)(SlackIdentityAuthorizationResponse.apply _)
}
case class SlackAppAuthorizationResponse(
accessToken: SlackUserAccessToken,
scopes: Set[SlackAuthScope],
teamId: SlackTeamId,
teamName: SlackTeamName,
incomingWebhook: Option[SlackIncomingWebhook],
botAuth: Option[SlackBotUserAuthorization]) extends SlackAuthorizationResponse
object SlackAppAuthorizationResponse {
val reads: Reads[SlackAppAuthorizationResponse] = (
(__ \\ 'access_token).read[SlackUserAccessToken] and
(__ \\ 'scope).read[Set[SlackAuthScope]](SlackAuthScope.slackFormat) and
(__ \\ 'team_id).read[SlackTeamId] and
(__ \\ 'team_name).read[SlackTeamName] and
(__ \\ 'incoming_webhook).readNullable[SlackIncomingWebhook] and
(__ \\ 'bot).readNullable[SlackBotUserAuthorization]
)(SlackAppAuthorizationResponse.apply _)
}
case class SlackIdentifyResponse(
url: String,
teamName: SlackTeamName,
userName: SlackUsername,
teamId: SlackTeamId,
userId: SlackUserId)
object SlackIdentifyResponse {
implicit val reads: Reads[SlackIdentifyResponse] = (
(__ \\ 'url).read[String] and
(__ \\ 'team).read[SlackTeamName]and
(__ \\ 'user).read[SlackUsername] and
(__ \\ 'team_id).read[SlackTeamId] and
(__ \\ 'user_id).read[SlackUserId]
)(SlackIdentifyResponse.apply _)
}
case class SlackUserIdentityResponse(user: PartialSlackUserInfo, teamId: SlackTeamId, team: Option[PartialSlackTeamInfo]) {
def userId = user.id
}
object SlackUserIdentityResponse {
implicit val reads = (
(__ \\ 'user).read[PartialSlackUserInfo] and
(__ \\ 'team \\ 'id).read[SlackTeamId] and
(__ \\ 'team).readNullable[PartialSlackTeamInfo].orElse(Reads.pure(None))
)(SlackUserIdentityResponse.apply _)
}
sealed abstract class SlackUserPresenceState(val name: String)
object SlackUserPresenceState {
case object Active extends SlackUserPresenceState("active")
case object Away extends SlackUserPresenceState("away")
case object Unknown extends SlackUserPresenceState("unknown")
case object ERROR extends SlackUserPresenceState("error")
}
case class SlackUserPresence(
state: SlackUserPresenceState,
lastActivity: Option[DateTime],
originalJson: JsValue)
object SlackUserPresence {
val reads: Reads[SlackUserPresence] = new Reads[SlackUserPresence] {
def reads(jsVal: JsValue): JsResult[SlackUserPresence] = {
val json = jsVal.as[JsObject]
val state = (json \\ "presence").asOpt[String] map {
case Active.name => Active
case Away.name => Away
} getOrElse SlackUserPresenceState.Unknown
val lastActivity = (json \\ "last_activity").asOpt[DateTime]
JsSuccess(SlackUserPresence(state, lastActivity, json))
}
}
val UnknownPresence = SlackUserPresence(SlackUserPresenceState.Unknown, None, JsObject(Seq.empty))
private val writes: Writes[SlackUserPresence] = Writes(_.originalJson)
implicit val format: Format[SlackUserPresence] = Format(reads, writes)
}
| kifi/slack-client | src/main/scala/com/kifi/slack/models/SlackAuth.scala | Scala | mit | 7,042 |
package net.fehmicansaglam.bson
import akka.util.{ByteString, ByteStringBuilder}
import net.fehmicansaglam.bson.element.BinarySubtype
import net.fehmicansaglam.bson.util.{Codec, Converters}
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
object Implicits {
implicit class BsonValueDouble(value: Double) extends BsonValueNumber with Identifiable[Double] {
override def identifier: Double = value
override def encode: ByteString = new ByteStringBuilder().putDouble(value).result()
override def toString: String = s"$value"
override def toInt: Int = value.toInt
override def toDouble: Double = value
override def toLong: Long = value.toLong
}
object BsonValueDouble {
def unapply(value: BsonValueDouble): Option[Double] = Some(value.identifier)
}
implicit class BsonValueString(value: String) extends BsonValue with Identifiable[String] {
override def identifier: String = value
override def encode: ByteString = {
val builder = new ByteStringBuilder()
val bytes = value.getBytes("utf-8")
builder.putInt(bytes.length + 1)
builder.putBytes(bytes)
builder.putByte(0)
builder.result()
}
override def toString: String = s""" "$value" """.trim
}
object BsonValueString {
def unapply(value: BsonValueString): Option[String] = Some(value.identifier)
}
implicit class BsonValueObject(document: BsonDocument) extends BsonValue with Identifiable[BsonDocument] {
override def identifier: BsonDocument = document
override def encode: ByteString = document.encode
override def toString: String = document.toString
override def pretty(level: Int): String = document.pretty(level)
override def toJson(extended: Boolean): String = document.toJson(extended)
}
implicit class BsonValueArray(document: BsonDocument) extends BsonValue with Identifiable[BsonDocument] {
override def identifier: BsonDocument = document
override def encode: ByteString = document.encode
override def toString: String = s"[ ${document.elements.map(_.value).mkString(", ")} ]"
override def pretty(level: Int): String = {
val prefix = "\\t" * (level + 1)
val values = document.elements.map(_.value)
val init = if (values.isEmpty) "" else values.init.foldLeft("")(_ + prefix + _.pretty(level + 1) + ",\\n")
val last = if (values.isEmpty) "" else prefix + values.last.pretty(level + 1)
s"[\\n$init$last\\n${"\\t" * level}]"
}
override def toJson(extended: Boolean): String = {
s"[ ${document.elements.map(_.value.toJson(extended)).mkString(", ")} ]"
}
}
implicit class BsonValueBoolean(value: Boolean) extends BsonValue with Identifiable[Boolean] {
override def identifier: Boolean = value
override def encode: ByteString = {
val builder = new ByteStringBuilder()
if (value) builder.putByte(0x01) else builder.putByte(0x00)
builder.result()
}
override def toString: String = s"$value"
}
object BsonValueBoolean {
def unapply(value: BsonValueBoolean): Option[Boolean] = Some(value.identifier)
}
implicit class BsonValueInteger(value: Int) extends BsonValueNumber with Identifiable[Int] {
override def identifier: Int = value
override def encode: ByteString = new ByteStringBuilder().putInt(value).result()
override def toString: String = s"$value"
override def toInt: Int = value
override def toDouble: Double = value.toDouble
override def toLong: Long = value.toLong
}
object BsonValueInteger {
def unapply(value: BsonValueInteger): Option[Int] = Some(value.identifier)
}
implicit class BsonValueLong(value: Long) extends BsonValueNumber with Identifiable[Long] {
override def identifier: Long = value
override def encode: ByteString = new ByteStringBuilder().putLong(value).result()
override def toString: String = s"$value"
override def toInt: Int = value.toInt
override def toDouble: Double = value.toDouble
override def toLong: Long = value
override def toJson(extended: Boolean): String = if (extended) {
s"""{ "$$numberLong": "$value" }"""
} else {
value.toString
}
}
object BsonValueLong {
def unapply(value: BsonValueLong): Option[Long] = Some(value.identifier)
}
implicit class BsonValueObjectId(value: Array[Byte]) extends BsonValue with Identifiable[String] {
override val identifier: String = Converters.hex2Str(value)
override def encode: ByteString = ByteString.newBuilder.putBytes(value).result()
override def toString: String = s"""ObjectId("$identifier")"""
override def toJson(extended: Boolean): String = if (extended) {
s"""{ "$$oid": "$identifier" }"""
} else {
s""" "$identifier" """.trim
}
}
/**
* Type alias for BsonValueObjectId
*/
type ObjectId = BsonValueObjectId
implicit class BsonValueDateTime(value: DateTime) extends BsonValue with Identifiable[DateTime] {
override def identifier: DateTime = value
override def encode: ByteString = ByteString.newBuilder.putLong(value.getMillis).result()
override def toString: String = s"""ISODate("${ISODateTimeFormat.dateTime().print(value)}")"""
override def toJson(extended: Boolean): String = if (extended) {
s"""{ "$$date": { "$$numberLong": "${value.getMillis}" } }"""
} else {
value.getMillis.toString
}
}
object BsonValueDateTime {
def unapply(value: BsonValueDateTime): Option[DateTime] = Some(value.identifier)
}
case class BsonValueTimestamp(increment: Int, timestamp: Int) extends BsonValue with Identifiable[(Int, Int)] {
override def identifier: (Int, Int) = (increment, timestamp)
override def encode: ByteString = new ByteStringBuilder().putInt(increment).putInt(timestamp).result()
override def toString: String = s"""{ "$$timestamp": { "t": $timestamp, "i": $increment } }"""
}
case class BsonValueBinary(value: ByteString, subtype: BinarySubtype) extends BsonValue with Identifiable[ByteString] {
override def identifier: ByteString = value
override def encode: ByteString = {
ByteString.newBuilder
.putInt(value.length)
.putByte(subtype.code)
.append(value)
.result()
}
override def toJson(extended: Boolean): String = if (extended) {
s"""{ "$$binary": "${Codec.encodeBase64(value.toArray)}", "$$type": "${subtype.code.formatted("%02x")}" }"""
} else {
Codec.encodeBase64(value.toArray)
}
}
/**
* Type alias for BsonValueBinary
*/
val Binary = BsonValueBinary
type Binary = BsonValueBinary
case class BsonValueRegex(pattern: String, options: String) extends BsonValue with Identifiable[(String, String)] {
override def identifier: (String, String) = (pattern, options)
override def encode: ByteString = {
val builder = ByteString.newBuilder
putCString(builder, pattern)
putCString(builder, options)
builder.result()
}
override def toJson(extended: Boolean): String = s"""{ "$$regex": "$pattern", "$$options": "$options" }"""
}
}
| fehmicansaglam/tepkin | bson/src/main/scala/net/fehmicansaglam/bson/Implicits.scala | Scala | apache-2.0 | 7,150 |
package com.typesafe.slick.testkit.util
import java.util.logging.{Level, Logger}
import java.sql.{DriverManager, SQLException}
import slick.compiler.Phase
import slick.dbio._
import slick.memory.MemoryProfile
import slick.jdbc._
import slick.jdbc.GetResult._
import slick.jdbc.meta.MTable
import org.junit.Assert
import slick.util.ConfigExtensionMethods._
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
object StandardTestDBs {
lazy val H2Mem = new H2TestDB("h2mem", false) {
val url = "jdbc:h2:mem:test1"
override def isPersistent = false
}
/** A modified H2Mem that tests the `removeTakeDrop` phase (which is not used by any of the
* standard profiles). */
lazy val H2Rownum = new H2TestDB("h2rownum", false) {
val url = "jdbc:h2:mem:test_rownum"
override def isPersistent = false
override val profile = new H2Profile {
override protected def computeQueryCompiler = super.computeQueryCompiler.addAfter(Phase.removeTakeDrop, Phase.expandSums)
}
}
lazy val H2MemKeepAlive = new H2TestDB("h2mem", true) {
val url = "jdbc:h2:mem:test1"
}
lazy val H2Disk = new H2TestDB("h2disk", false) {
val dbName = "h2-"+confName
val url = "jdbc:h2:"+TestkitConfig.testDBPath+"/"+dbName
override def cleanUpBefore() = TestDB.deleteDBFiles(dbName)
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: profile.Backend#Session) = {
session.close()
cleanUpBefore()
}
}
lazy val HsqldbMem = new HsqlDB("hsqldbmem") {
val dbName = "test1"
val url = "jdbc:hsqldb:mem:"+dbName+";user=SA;password=;shutdown=true"
override def isPersistent = false
}
lazy val HsqldbDisk = new HsqlDB("hsqldbdisk") {
val dbName = "hsqldb-"+confName
val url = "jdbc:hsqldb:file:"+TestkitConfig.testDBPath+"/"+dbName+";user=SA;password=;shutdown=true;hsqldb.applog=0"
override def cleanUpBefore() = TestDB.deleteDBFiles(dbName)
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: profile.Backend#Session) = {
session.close()
cleanUpBefore()
}
}
lazy val SQLiteMem = new SQLiteTestDB("jdbc:sqlite:file:slick_test?mode=memory&cache=shared", "sqlitemem") {
override def isPersistent = false
}
lazy val SQLiteDisk = {
val confName = "sqlitedisk"
val prefix = "sqlite-"+confName
new SQLiteTestDB("jdbc:sqlite:"+TestkitConfig.testDBPath+"/"+prefix+".db", confName) {
override def cleanUpBefore() = TestDB.deleteDBFiles(prefix)
}
}
lazy val DerbyMem = new DerbyDB("derbymem") {
val dbName = "test1"
val url = "jdbc:derby:memory:"+dbName+";create=true"
override def cleanUpBefore() = {
super.cleanUpBefore()
val dropUrl = "jdbc:derby:memory:"+dbName+";drop=true"
try { await(profile.backend.Database.forURL(dropUrl, driver = jdbcDriver).run(SimpleJdbcAction(_.connection))) }
catch { case e: SQLException => }
}
}
lazy val DerbyDisk = new DerbyDB("derbydisk") {
val dbName = "derby-"+confName
val url = "jdbc:derby:"+TestkitConfig.testDBPath+"/"+dbName+";create=true"
override def cleanUpBefore() = {
super.cleanUpBefore()
val dropUrl = "jdbc:derby:"+TestkitConfig.testDBPath+"/"+dbName+";shutdown=true"
try { await(profile.backend.Database.forURL(dropUrl, driver = jdbcDriver).run(SimpleJdbcAction(_.connection))) }
catch { case e: SQLException => }
TestDB.deleteDBFiles(dbName)
}
}
lazy val Postgres = new ExternalJdbcTestDB("postgres") {
val profile = PostgresProfile
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String,String,String, String)](_.conn.getMetaData().getTables("", "public", null, null)).map { ts =>
ts.filter(_._4.toUpperCase == "TABLE").map(_._3).sorted
}
override def localSequences(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String,String,String, String)](_.conn.getMetaData().getTables("", "public", null, null)).map { ts =>
ts.filter(_._4.toUpperCase == "SEQUENCE").map(_._3).sorted
}
override def capabilities = super.capabilities - TestDB.capabilities.jdbcMetaGetFunctions
}
lazy val MySQL = new ExternalJdbcTestDB("mysql") {
val profile = MySQLProfile
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: profile.Backend#Session) = {
session.close()
cleanUpBefore()
}
}
lazy val Heap = new RelationalTestDB {
type Profile = MemoryProfile
val profile: Profile = MemoryProfile
val confName: String = "heap"
def createDB: profile.Backend#Database = profile.backend.Database(ExecutionContext.global)
def dropUserArtifacts(implicit session: profile.Backend#Session): Unit = {
val db = session.database
db.getTables.foreach(t => db.dropTable(t.name))
}
def assertTablesExist(tables: String*) = profile.api.SimpleDBIO { ctx =>
val all = ctx.session.database.getTables.map(_.name).toSet
for(t <- tables) {
if(!all.contains(t)) Assert.fail("Table "+t+" should exist")
}
}
def assertNotTablesExist(tables: String*) = profile.api.SimpleDBIO { ctx =>
val all = ctx.session.database.getTables.map(_.name).toSet
for(t <- tables) {
if(all.contains(t)) Assert.fail("Table "+t+" should not exist")
}
}
}
lazy val DB2 = new ExternalJdbcTestDB("db2") {
val profile = DB2Profile
import profile.api.actionBasedSQLInterpolation
override def canGetLocalTables = false
lazy val schema = config.getString("schema")
def dropSchema: DBIO[Unit] = {
import ExecutionContext.Implicits.global
for {
schema <- sql"select schemaname from syscat.schemata where schemaname = '#$schema'".as[String].headOption
_ <- if(schema.isDefined) {
println(s"[Dropping DB2 schema '$schema']")
sqlu"call sysproc.admin_drop_schema($schema, null, ${"ERRORSCHEMA"}, ${"ERRORTABLE"})"
} else DBIO.successful(())
} yield ()
}
override def cleanUpBefore(): Unit = {
import ExecutionContext.Implicits.global
await(databaseFor("testConn").run(for {
_ <- dropSchema
_ = println(s"[Creating DB2 schema '$schema']")
_ <- sqlu"create schema #$schema"
} yield ()))
}
override def dropUserArtifacts(implicit session: profile.Backend#Session) = {
session.close()
cleanUpBefore()
}
}
class SQLServerDB(confName: String) extends ExternalJdbcTestDB(confName) {
val profile = SQLServerProfile
import profile.api.actionBasedSQLInterpolation
// sqlserver has valid "select for update" syntax, but in testing on Appveyor, the test hangs due to lock escalation
// so exclude from explicit ForUpdate testing
override def capabilities = super.capabilities - TestDB.capabilities.selectForUpdateRowLocking
val defaultSchema = config.getStringOpt("defaultSchema")
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] = {
MTable.getTables(None, defaultSchema, None, Some(Seq("TABLE"))).map(_.map(_.name.name).sorted)
}
override def dropUserArtifacts(implicit session: profile.Backend#Session) = blockingRunOnSession { implicit ec =>
for {
tables <- localTables
_ <- DBIO.sequence(tables.map(t => sqlu"exec sp_MSdropconstraints #$t"))
tableStatements = tables.map(t => sqlu"drop table #${profile.quoteIdentifier(t)}")
_ <- DBIO.sequence(tableStatements)
} yield ()
}
}
lazy val SQLServerJTDS = new SQLServerDB("sqlserver-jtds") {
override def capabilities = super.capabilities - TestDB.capabilities.plainSql
}
lazy val SQLServer2012JTDS = new SQLServerDB("sqlserver2012-jtds") {
override def capabilities = super.capabilities - TestDB.capabilities.plainSql
}
lazy val SQLServer2014JTDS = new SQLServerDB("sqlserver2014-jtds") {
override def capabilities = super.capabilities - TestDB.capabilities.plainSql
}
lazy val SQLServer2017JTDS = new SQLServerDB("sqlserver2017-jtds") {
override def capabilities = super.capabilities - TestDB.capabilities.plainSql
}
lazy val SQLServerSQLJDBC = new SQLServerDB("sqlserver-sqljdbc") {
override def capabilities = profile.capabilities - JdbcCapabilities.createModel
}
lazy val SQLServer2012SQLJDBC = new SQLServerDB("sqlserver2012-sqljdbc") {
override def capabilities = profile.capabilities - JdbcCapabilities.createModel
}
lazy val SQLServer2014SQLJDBC = new SQLServerDB("sqlserver2014-sqljdbc") {
override def capabilities = profile.capabilities - JdbcCapabilities.createModel
}
lazy val SQLServer2017SQLJDBC = new SQLServerDB("sqlserver2017-sqljdbc") {
override def capabilities = profile.capabilities - JdbcCapabilities.createModel
}
lazy val Oracle = new ExternalJdbcTestDB("oracle") {
val profile = OracleProfile
import profile.api.actionBasedSQLInterpolation
override def canGetLocalTables = false
override def capabilities =
super.capabilities - TestDB.capabilities.jdbcMetaGetIndexInfo - TestDB.capabilities.transactionIsolation
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] = {
val tableNames = profile.defaultTables.map(_.map(_.name.name)).map(_.toVector)
tableNames
}
override def localSequences(implicit ec: ExecutionContext): DBIO[Vector[String]] = {
// user_sequences much quicker than going to metadata if you don't know the schema they are going to be in
sql"select sequence_Name from user_sequences".as[String]
}
override def dropUserArtifacts(implicit session: profile.Backend#Session) =
blockingRunOnSession { implicit ec =>
for {
tables <- localTables
sequences <- localSequences
_ <- DBIO.seq(tables.map(t => sqlu"drop table #${profile.quoteIdentifier(t)} cascade constraints") ++
sequences.map(s => sqlu"drop sequence #${profile.quoteIdentifier(s)}"): _*)
} yield ()
}
}
}
abstract class H2TestDB(confName: String, keepAlive: Boolean) extends InternalJdbcTestDB(confName) {
val profile: Profile = H2Profile
val jdbcDriver = "org.h2.Driver"
override def capabilities = super.capabilities - TestDB.capabilities.jdbcMetaGetFunctions - TestDB.capabilities.jdbcMetaGetClientInfoProperties
override def createDB(): profile.Backend#Database = database.forURL(url, driver = jdbcDriver, keepAliveConnection = keepAlive)
}
class SQLiteTestDB(dburl: String, confName: String) extends InternalJdbcTestDB(confName) {
import profile.api.actionBasedSQLInterpolation
val profile = SQLiteProfile
val url = dburl
val jdbcDriver = "org.sqlite.JDBC"
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] =
super.localTables.map(_.filter(s => !s.toLowerCase.contains("sqlite_")))
override def dropUserArtifacts(implicit session: profile.Backend#Session) = blockingRunOnSession { implicit ec =>
for {
tables <- localTables
sequences <- localSequences
_ <- DBIO.seq((tables.map(t => sqlu"""drop table if exists #${profile.quoteIdentifier(t)}""") ++
sequences.map(t => sqlu"""drop sequence if exists #${profile.quoteIdentifier(t)}""")): _*)
} yield ()
}
}
abstract class DerbyDB(confName: String) extends InternalJdbcTestDB(confName) {
import profile.api.actionBasedSQLInterpolation
val profile = DerbyProfile
System.setProperty("derby.stream.error.method", classOf[DerbyDB].getName + ".DEV_NULL")
val jdbcDriver = "org.apache.derby.jdbc.EmbeddedDriver"
override def cleanUpBefore() = {
// The default seems to be 1s, and the CI environments can be a bit slow. So, set a conservative timeout, so
// tests don't fail intermittently
DriverManager.setLoginTimeout(30)
}
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String, String, String, String)](_.conn.getMetaData().getTables(null, "APP", null, null)).map { ts =>
ts.map(_._3).sorted
}
override def dropUserArtifacts(implicit session: profile.Backend#Session) = try {
blockingRunOnSession { implicit ec =>
for {
_ <- sqlu"""create table "__derby_dummy"(x integer primary key)""".asTry
constraints <- sql"""select c.constraintname, t.tablename
from sys.sysconstraints c, sys.sysschemas s, sys.systables t
where c.schemaid = s.schemaid and c.tableid = t.tableid and s.schemaname = 'APP'
""".as[(String, String)]
_ <- DBIO.seq((for((c, t) <- constraints if !c.startsWith("SQL"))
yield sqlu"""alter table ${profile.quoteIdentifier(t)} drop constraint ${profile.quoteIdentifier(c)}"""): _*)
tables <- localTables
sequences <- localSequences
_ <- DBIO.seq((tables.map(t => sqlu"""drop table #${profile.quoteIdentifier(t)}""") ++
sequences.map(t => sqlu"""drop sequence #${profile.quoteIdentifier(t)}""")): _*)
} yield ()
}
} catch {
case e: Exception =>
println("[Caught Exception while dropping user artifacts in Derby: "+e+"]")
session.close()
cleanUpBefore()
}
}
object DerbyDB {
val DEV_NULL = new java.io.OutputStream { def write(b: Int): Unit = {} };
}
abstract class HsqlDB(confName: String) extends InternalJdbcTestDB(confName) {
val profile = HsqldbProfile
val jdbcDriver = "org.hsqldb.jdbcDriver"
// Hsqldb has valid "select for update" syntax, but in testing, it either takes a whole table lock or no exclusive
// lock at all, so exclude from ForUpdate testing
override def capabilities = super.capabilities - TestDB.capabilities.selectForUpdateRowLocking
override def localTables(implicit ec: ExecutionContext): DBIO[Vector[String]] =
ResultSetAction[(String,String,String, String)](_.conn.getMetaData().getTables(null, "PUBLIC", null, null)).map { ts =>
ts.map(_._3).sorted
}
override def createDB(): profile.Backend#Database = {
val db = super.createDB()
Await.result(db.run(SimpleJdbcAction(_ => ())), Duration.Inf)
Logger.getLogger("hsqldb.db").setLevel(Level.WARNING)
Logger.getLogger("org.hsqldb").setLevel(Level.WARNING)
db
}
}
| nafg/slick | slick-testkit/src/main/scala/com/typesafe/slick/testkit/util/StandardTestDBs.scala | Scala | bsd-2-clause | 14,546 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib
import scala.reflect.{classTag, ClassTag}
import scala.scalajs.js
import org.scalajs.jasminetest.JasmineTest
object StringBufferTest extends JasmineTest {
def shouldThrow[T: ClassTag](fn: => Unit): Unit = {
try {
fn
expect("exception").toBe("thrown")
} catch {
case e: T =>
case x: Throwable => expect(x.toString).toBe(classTag[T].runtimeClass.getSimpleName)
}
}
describe("java.lang.StringBuffer") {
def newBuf: java.lang.StringBuffer =
new java.lang.StringBuffer
def initBuf(str: String): java.lang.StringBuffer =
new java.lang.StringBuffer(str)
it("should respond to `append`") {
expect(newBuf.append("asdf").toString).toEqual("asdf")
expect(newBuf.append(null: AnyRef).toString).toEqual("null")
expect(newBuf.append(null: String).toString).toEqual("null")
expect(newBuf.append(null: CharSequence,0,2).toString).toEqual("nu")
expect(newBuf.append(js.undefined).toString).toEqual("undefined")
expect(newBuf.append(true).toString).toEqual("true")
expect(newBuf.append('a').toString).toEqual("a")
expect(newBuf.append(Array('a','b','c','d')).toString).toEqual("abcd")
expect(newBuf.append(Array('a','b','c','d'), 1, 2).toString).toEqual("bc")
expect(newBuf.append(4.toByte).toString).toEqual("4")
expect(newBuf.append(304.toShort).toString).toEqual("304")
expect(newBuf.append(100000).toString).toEqual("100000")
expect(newBuf.append(2.5f).toString).toEqual("2.5")
expect(newBuf.append(3.5).toString).toEqual("3.5")
}
it("should respond to `insert`") {
expect(newBuf.insert(0, "asdf").toString).toEqual("asdf")
expect(newBuf.insert(0, null: AnyRef).toString).toEqual("null")
expect(newBuf.insert(0, null: String).toString).toEqual("null")
expect(newBuf.insert(0, null: CharSequence,0,2).toString).toEqual("nu")
expect(newBuf.insert(0, js.undefined).toString).toEqual("undefined")
expect(newBuf.insert(0, true).toString).toEqual("true")
expect(newBuf.insert(0, 'a').toString).toEqual("a")
expect(newBuf.insert(0, Array('a','b','c','d')).toString).toEqual("abcd")
expect(newBuf.insert(0, Array('a','b','c','d'), 1, 2).toString).toEqual("bc")
expect(newBuf.insert(0, 4.toByte).toString).toEqual("4")
expect(newBuf.insert(0, 304.toShort).toString).toEqual("304")
expect(newBuf.insert(0, 100000).toString).toEqual("100000")
expect(newBuf.insert(0, 2.5f).toString).toEqual("2.5")
expect(newBuf.insert(0, 3.5).toString).toEqual("3.5")
expect(initBuf("adef").insert(1, "bc")).toEqual("abcdef")
expect(initBuf("abcd").insert(4, "ef")).toEqual("abcdef")
expect(initBuf("adef").insert(1, Array('b','c'))).toEqual("abcdef")
expect(initBuf("adef").insert(1, initBuf("bc"))).toEqual("abcdef")
expect(initBuf("abef").insert(2, Array('a','b','c','d','e'), 2, 2)).toEqual("abcdef")
expect(initBuf("abef").insert(2, initBuf("abcde"), 2, 4)).toEqual("abcdef")
shouldThrow[StringIndexOutOfBoundsException](initBuf("abcd").insert(5, "whatever"))
shouldThrow[StringIndexOutOfBoundsException](initBuf("abcd").insert(-1, "whatever"))
}
it("should respond to `deleteCharAt`") {
expect(initBuf("0123").deleteCharAt(1).toString).toEqual("023")
expect(initBuf("0123").deleteCharAt(0).toString).toEqual("123")
expect(initBuf("0123").deleteCharAt(3).toString).toEqual("012")
shouldThrow[StringIndexOutOfBoundsException](initBuf("0123").deleteCharAt(-1))
shouldThrow[StringIndexOutOfBoundsException](initBuf("0123").deleteCharAt(4))
}
it("should respond to `replace`") {
expect(initBuf("0123").replace(1,3,"bc").toString).toEqual("0bc3")
expect(initBuf("0123").replace(0,4,"abcd").toString).toEqual("abcd")
expect(initBuf("0123").replace(0,10,"abcd").toString).toEqual("abcd")
expect(initBuf("0123").replace(3,10,"defg").toString).toEqual("012defg")
expect(initBuf("0123").replace(0,1,"xxxx").toString).toEqual("xxxx123")
expect(initBuf("0123").replace(1,1,"xxxx").toString).toEqual("0xxxx123")
shouldThrow[StringIndexOutOfBoundsException](initBuf("0123").replace(-1,3,"x"))
shouldThrow[StringIndexOutOfBoundsException](initBuf("0123").replace(4,5,"x"))
}
it("should respond to `setCharAt`") {
val buf = newBuf
buf.append("foobar")
buf.setCharAt(2, 'x')
expect(buf.toString).toEqual("foxbar")
buf.setCharAt(5, 'h')
expect(buf.toString).toEqual("foxbah")
expect(() => buf.setCharAt(-1, 'h')).toThrow
expect(() => buf.setCharAt(6, 'h')).toThrow
}
it("should properly setLength") {
val buf = newBuf
buf.append("foobar")
expect(() => buf.setLength(-3)).toThrow
expect({ buf.setLength(3); buf.toString }).toEqual("foo")
expect({ buf.setLength(6); buf.toString }).toEqual("foo\\u0000\\u0000\\u0000")
}
it("should respond to `appendCodePoint`") {
val buf = newBuf
buf.appendCodePoint(0x61)
expect(buf.toString).toEqual("a")
buf.appendCodePoint(0x10000)
expect(buf.toString).toEqual("a\\uD800\\uDC00")
buf.append("fixture")
buf.appendCodePoint(0x00010FFFF)
expect(buf.toString).toEqual("a\\uD800\\uDC00fixture\\uDBFF\\uDFFF")
}
}
describe("java.lang.StringBuilder") {
def newBuilder: java.lang.StringBuilder =
new java.lang.StringBuilder
def initBuilder(str: String): java.lang.StringBuilder =
new java.lang.StringBuilder(str)
it("should respond to `append`") {
expect(newBuilder.append("asdf").toString).toEqual("asdf")
expect(newBuilder.append(null: AnyRef).toString).toEqual("null")
expect(newBuilder.append(null: String).toString).toEqual("null")
expect(newBuilder.append(null: CharSequence,0,2).toString).toEqual("nu")
expect(newBuilder.append(js.undefined).toString).toEqual("undefined")
expect(newBuilder.append(true).toString).toEqual("true")
expect(newBuilder.append('a').toString).toEqual("a")
expect(newBuilder.append(Array('a','b','c','d')).toString).toEqual("abcd")
expect(newBuilder.append(Array('a','b','c','d'), 1, 2).toString).toEqual("bc")
expect(newBuilder.append(4.toByte).toString).toEqual("4")
expect(newBuilder.append(304.toShort).toString).toEqual("304")
expect(newBuilder.append(100000).toString).toEqual("100000")
expect(newBuilder.append(2.5f).toString).toEqual("2.5")
expect(newBuilder.append(3.5).toString).toEqual("3.5")
}
it("should respond to `insert`") {
expect(newBuilder.insert(0, "asdf").toString).toEqual("asdf")
expect(newBuilder.insert(0, null: AnyRef).toString).toEqual("null")
expect(newBuilder.insert(0, null: String).toString).toEqual("null")
expect(newBuilder.insert(0, null: CharSequence,0,2).toString).toEqual("nu")
expect(newBuilder.insert(0, js.undefined).toString).toEqual("undefined")
expect(newBuilder.insert(0, true).toString).toEqual("true")
expect(newBuilder.insert(0, 'a').toString).toEqual("a")
expect(newBuilder.insert(0, Array('a','b','c','d')).toString).toEqual("abcd")
expect(newBuilder.insert(0, Array('a','b','c','d'), 1, 2).toString).toEqual("bc")
expect(newBuilder.insert(0, 4.toByte).toString).toEqual("4")
expect(newBuilder.insert(0, 304.toShort).toString).toEqual("304")
expect(newBuilder.insert(0, 100000).toString).toEqual("100000")
expect(newBuilder.insert(0, 2.5f).toString).toEqual("2.5")
expect(newBuilder.insert(0, 3.5).toString).toEqual("3.5")
expect(initBuilder("adef").insert(1, "bc")).toEqual("abcdef")
expect(initBuilder("abcd").insert(4, "ef")).toEqual("abcdef")
expect(initBuilder("adef").insert(1, Array('b','c'))).toEqual("abcdef")
expect(initBuilder("adef").insert(1, initBuilder("bc"))).toEqual("abcdef")
expect(initBuilder("abef").insert(2, Array('a','b','c','d','e'), 2, 2)).toEqual("abcdef")
expect(initBuilder("abef").insert(2, initBuilder("abcde"), 2, 4)).toEqual("abcdef")
shouldThrow[StringIndexOutOfBoundsException](initBuilder("abcd").insert(5, "whatever"))
shouldThrow[StringIndexOutOfBoundsException](initBuilder("abcd").insert(-1, "whatever"))
}
it("should allow string interpolation to survive `null` and `undefined`") {
expect(s"${null}").toEqual("null")
expect(s"${js.undefined}").toEqual("undefined")
}
it("should respond to `deleteCharAt`") {
expect(initBuilder("0123").deleteCharAt(1).toString).toEqual("023")
expect(initBuilder("0123").deleteCharAt(0).toString).toEqual("123")
expect(initBuilder("0123").deleteCharAt(3).toString).toEqual("012")
shouldThrow[StringIndexOutOfBoundsException](initBuilder("0123").deleteCharAt(-1))
shouldThrow[StringIndexOutOfBoundsException](initBuilder("0123").deleteCharAt(4))
}
it("should respond to `replace`") {
expect(initBuilder("0123").replace(1,3,"bc").toString).toEqual("0bc3")
expect(initBuilder("0123").replace(0,4,"abcd").toString).toEqual("abcd")
expect(initBuilder("0123").replace(0,10,"abcd").toString).toEqual("abcd")
expect(initBuilder("0123").replace(3,10,"defg").toString).toEqual("012defg")
expect(initBuilder("0123").replace(0,1,"xxxx").toString).toEqual("xxxx123")
expect(initBuilder("0123").replace(1,1,"xxxx").toString).toEqual("0xxxx123")
shouldThrow[StringIndexOutOfBoundsException](initBuilder("0123").replace(-1,3,"x"))
shouldThrow[StringIndexOutOfBoundsException](initBuilder("0123").replace(4,5,"x"))
}
it("should respond to `setCharAt`") {
val b = newBuilder
b.append("foobar")
b.setCharAt(2, 'x')
expect(b.toString).toEqual("foxbar")
b.setCharAt(5, 'h')
expect(b.toString).toEqual("foxbah")
expect(() => b.setCharAt(-1, 'h')).toThrow
expect(() => b.setCharAt(6, 'h')).toThrow
}
it("should properly setLength") {
val b = newBuilder
b.append("foobar")
expect(() => b.setLength(-3)).toThrow
expect({ b.setLength(3); b.toString }).toEqual("foo")
expect({ b.setLength(6); b.toString }).toEqual("foo\\u0000\\u0000\\u0000")
}
it("should respond to `appendCodePoint`") {
val b = newBuilder
b.appendCodePoint(0x61)
expect(b.toString).toEqual("a")
b.appendCodePoint(0x10000)
expect(b.toString).toEqual("a\\uD800\\uDC00")
b.append("fixture")
b.appendCodePoint(0x00010FFFF)
expect(b.toString).toEqual("a\\uD800\\uDC00fixture\\uDBFF\\uDFFF")
}
}
}
| doron123/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/StringBufferTest.scala | Scala | bsd-3-clause | 11,211 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
package minimizers
import slamdata.Predef._
import quasar.common.effect.NameGenerator
import quasar.qscript.MonadPlannerErr
import scalaz.Monad
trait Minimizer[T[_[_]]] extends QSUTTypes[T] {
import MinimizeAutoJoins.MinStateM
type P
def couldApplyTo(candidates: List[QSUGraph]): Boolean
/** Returns what is essentially a lens focused on the first ancestor of `qgraph`
* _not_ applicable to this `Minimizer`. The function component describes how
* to rebuild the semantic equivalent of `qgraph` given a (possibly) new ancestor
* and `FreeMap` to apply to its output.
*/
def extract[
G[_]: Monad: NameGenerator: MonadPlannerErr: RevIdxM: MinStateM[T, P, ?[_]]](
qgraph: QSUGraph)
: Option[(QSUGraph, (QSUGraph, FreeMap) => G[QSUGraph])]
/** The first component of the tuple is the rewrite target on any provenance
* association, i.e. the semantic equivalent of the input. The second component
* is the root of the resulting graph.
*/
def apply[
G[_]: Monad: NameGenerator: MonadPlannerErr: RevIdxM: MinStateM[T, P, ?[_]]](
qgraph: QSUGraph,
singleSource: QSUGraph,
candidates: List[QSUGraph],
fm: FreeMapA[Int])
: G[Option[(QSUGraph, QSUGraph)]]
}
object Minimizer {
type Aux[T[_[_]], P0] = Minimizer[T] { type P = P0 }
}
| djspiewak/quasar | qsu/src/main/scala/quasar/qsu/minimizers/Minimizer.scala | Scala | apache-2.0 | 1,942 |
package se.culvertsoft.mgen.compiler.components
import scala.Array.canBuildFrom
import scala.collection.JavaConversions.mapAsJavaMap
import scala.collection.JavaConversions.seqAsJavaList
import scala.collection.mutable.HashMap
import scala.xml.XML.loadFile
import se.culvertsoft.mgen.api.model.Project
import se.culvertsoft.mgen.api.plugins.Parser
import se.culvertsoft.mgen.compiler.util.EnvVarUtils
import se.culvertsoft.mgen.compiler.util.FileUtils
import se.culvertsoft.mgen.compiler.util.XmlUtils.RichXmlNode
import se.culvertsoft.mgen.idlparser.IdlParser
object ParseProject {
def apply(
settings: Map[String, String],
pluginFinder: PluginLoader): Project = {
val projectPath =
settings
.get("project")
.getOrElse(throw new RuntimeException("Missing '-project' cmd line argument"))
val paramPaths = settings.get("include_paths").getOrElse("").split(",")
val envPaths =
if (settings.getOrElse("use_env_vars", "true").toBoolean)
EnvVarUtils.getCommaSeparated("MGEN_INCLUDE_PATHS")
else
Array[String]()
val includePaths = (paramPaths ++ envPaths).distinct
FileUtils.checkiSsFileOrThrow(projectPath)
ParseProject(projectPath, settings, includePaths, null, new HashMap[String, Project], pluginFinder)
}
def apply(
filePath: String,
settings0: Map[String, String],
searchPaths0: Seq[String],
parent: Project,
alreadyParsed: HashMap[String, Project],
pluginFinder: PluginLoader): Project = {
val file = FileUtils.findFile(filePath, searchPaths0)
.getOrElse(throw new RuntimeException(s"Could not find referenced project file: ${filePath}"))
val absoluteFilePath = file.getCanonicalPath()
alreadyParsed.get(absoluteFilePath) match {
case Some(p) => p
case _ =>
println(s" parsing project: ${absoluteFilePath}")
val projectName = FileUtils.removeFileEnding(FileUtils.nameOf(absoluteFilePath))
val projectDir = FileUtils.directoryOf(absoluteFilePath)
val project = new Project(projectName, filePath, file.getAbsolutePath(), parent)
alreadyParsed.put(absoluteFilePath, project)
val searchPaths: Seq[String] = searchPaths0 ++ Seq(projectDir)
// Read in project xml source code
val projectXml = scala.xml.Utility.trim(loadFile(file))
if (projectXml.label.toLowerCase() != "project") {
throw new RuntimeException(s"Tried to load $filePath as project, but it was not a project file!")
}
// Parse settings
val settings = settings0 ++ projectXml.getSettings()
project.setSettings(settings)
// Parse Generators
val generators = (projectXml \\ "Generator") map ParseGenerator.apply
project.setGenerators(generators)
// Parse dependencies
val dependFilePaths = projectXml.getAllNodeContents("Dependency").map(_.toString)
val dependencies = dependFilePaths.map(ParseProject(_, settings, searchPaths, project, alreadyParsed, pluginFinder))
project.setDependencies(dependencies)
// Parse sources
val sourcesNodes = (projectXml \\ "Sources")
sourcesNodes foreach { sourcesNode =>
val sourcesSettings = settings ++ sourcesNode.getStringStringMap()
val parserName = sourcesNode.getAttribString("parser").getOrElse(classOf[IdlParser].getName())
val parser = pluginFinder.getThreadLocal[Parser](parserName).getOrElse(throw new RuntimeException(s"Parser not found: Unknown parser $parserName in project file $absoluteFilePath"))
val sourceFileNames = (sourcesNode \\ "Source") map (_.text)
val files = sourceFileNames.map { fileName =>
FileUtils.findFile(fileName, searchPaths)
.getOrElse(throw new RuntimeException(s"Could not find source file: ${fileName} specified for parser ${parserName} in project ${absoluteFilePath}"))
}
parser.parse(files, sourcesSettings, project)
}
project
}
}
} | culvertsoft/mgen | mgen-compiler/src/main/scala/se/culvertsoft/mgen/compiler/components/ParseProject.scala | Scala | mit | 4,046 |
package mesosphere.marathon
package integration
import mesosphere.AkkaIntegrationTest
import mesosphere.marathon.integration.setup._
import mesosphere.marathon.raml.App
import mesosphere.marathon.state.PathId._
import scala.concurrent.duration._
@SerialIntegrationTest
class GracefulTaskKillIntegrationTest extends AkkaIntegrationTest with EmbeddedMarathonTest {
before {
cleanUp()
}
// this command simulates a 'long terminating' application
// note: Integration test does not interpret symbolic names (SIGTERM=15), therefore signal 15 is used.
val taskKillGraceDuration = 4
val taskKillGracePeriod = taskKillGraceDuration.seconds
val appCommand: String = s"""trap \\"sleep ${taskKillGraceDuration + 1}\\" 15 && sleep 100000"""
"GracefulTaskKilling" should {
"create a 'long terminating' app with custom taskKillGracePeriod duration" in {
Given("a new 'long terminating' app with taskKillGracePeriod set to 10 seconds")
val app = App(
(testBasePath / "app").toString,
cmd = Some(appCommand),
taskKillGracePeriodSeconds = Some(taskKillGracePeriod.toSeconds.toInt))
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be(201) //Created
waitForDeployment(result)
waitForTasks(app.id.toPath, 1)
//make sure, the app has really started
val taskId = marathon.tasks(app.id.toPath).value.head.id
When("a task of an app is killed")
val taskKillSentTimestamp = System.currentTimeMillis()
marathon.killTask(app.id.toPath, taskId).code should be(200)
waitForEventWith(
"status_update_event",
_.info("taskStatus") == "TASK_KILLED",
maxWait = taskKillGracePeriod.plus(2.seconds))
val taskKilledReceivedTimestamp = System.currentTimeMillis()
val waitedForTaskKilledEvent = (taskKilledReceivedTimestamp - taskKillSentTimestamp).milliseconds
// the task_killed event should occur at least 10 seconds after sending it
waitedForTaskKilledEvent.toMillis should be >= taskKillGracePeriod.toMillis
}
"create a 'short terminating' app with custom taskKillGracePeriod duration" in {
Given("a new 'short terminating' app with taskKillGracePeriod set to 10 seconds")
val app = App(
(testBasePath / "app").toString,
cmd = Some("sleep 100000"),
taskKillGracePeriodSeconds = Some(taskKillGracePeriod.toSeconds.toInt))
When("The app is deployed")
val result = marathon.createAppV2(app)
Then("The app is created")
result.code should be(201) //Created
waitForDeployment(result)
waitForTasks(app.id.toPath, 1)
//make sure, the app has really started
val taskId = marathon.tasks(app.id.toPath).value.head.id
When("a task of an app is killed")
val taskKillSentTimestamp = System.currentTimeMillis()
marathon.killTask(app.id.toPath, taskId).code should be(200)
waitForEventWith(
"status_update_event",
_.info("taskStatus") == "TASK_KILLED",
maxWait = taskKillGracePeriod.plus(2.seconds))
val taskKilledReceivedTimestamp = System.currentTimeMillis()
val waitedForTaskKilledEvent = (taskKilledReceivedTimestamp - taskKillSentTimestamp).milliseconds
// the task_killed event should occur instantly or at least smaller as taskKillGracePeriod,
// because the app terminates shortly
waitedForTaskKilledEvent.toMillis should be < taskKillGracePeriod.toMillis
}
}
}
| natemurthy/marathon | src/test/scala/mesosphere/marathon/integration/GracefulTaskKillIntegrationTest.scala | Scala | apache-2.0 | 3,556 |
/* __ *\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ **
** /____/\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\* */
package org.scalajs.testsuite.compiler
import scala.scalajs.js
import org.junit.Test
import org.junit.Assert._
import org.junit.Assume._
import org.scalajs.testsuite.utils.Platform._
class OptimizerTest {
import OptimizerTest._
// Inlineable classes
@Test def must_update_fields_of_this_in_the_computation_of_other_fields_issue_1153(): Unit = {
val foo = new InlineClassDependentFields(5)
assertEquals(5, foo.x)
assertTrue(foo.b)
assertEquals(11, foo.y)
}
@Test def must_not_break_code_that_assigns_this_to_a_field(): Unit = {
val foo = new InlineClassThisAlias(5)
assertEquals(5, foo.z)
}
// Optimizer regression tests
@Test def `must_not_break_*_(-1)_for_Int_issue_1453`(): Unit = {
@noinline
def start0: Int = (() => 10) ()
val start = start0
val step = -1
val numRangeElements = start - 1
val lastElement = start + (numRangeElements - 1) * step
assertEquals(2, lastElement)
}
@Test def `must_not_break_*_(-1)_for_Float_and_Double_issue_1478`(): Unit = {
@noinline
def a: Float = (() => 5.0f) ()
assertEquals(-5.0f, a * -1.0f)
@noinline
def b: Double = (() => 7.0) ()
assertEquals(-7.0, b * -1.0)
}
@Test def must_not_break_foreach_on_downward_Range_issue_1453(): Unit = {
@noinline
def start0: Int = (() => 10) ()
val elements = js.Array[Int]()
for (i <- start0 to 2 by -1) {
if (i < 0)
sys.error("Going into infinite loop")
elements.push(i)
}
assertArrayEquals(Array(10, 9, 8, 7, 6, 5, 4, 3, 2), elements.toArray)
}
@Test def must_not_break_classOf_T_eqeq_classOf_U_issue_1658(): Unit = {
assertEquals(classOf[String], classOf[String])
assertEquals(classOf[Int], classOf[Int])
assertEquals(classOf[Array[Int]], classOf[Array[Int]])
assertEquals(classOf[Array[String]], classOf[Array[String]])
assertFalse(classOf[String] == classOf[Int])
assertFalse(classOf[Seq[_]] == classOf[List[_]])
assertFalse(classOf[Array[Int]] == classOf[Array[Integer]])
assertFalse(classOf[Array[Object]] == classOf[Array[Integer]])
assertFalse(classOf[String] == classOf[Array[String]])
assertFalse(classOf[Array[Array[Object]]] == classOf[Array[Object]])
}
// +[string] constant folding
@Test def must_not_break_when_folding_two_constant_strings(): Unit = {
@inline def str: String = "I am "
assertEquals("I am constant", str + "constant")
}
@Test def must_not_break_when_folding_the_empty_string_when_associated_with_a_string(): Unit = {
@noinline def str: String = "hello"
assertEquals("hello", str + "")
assertEquals("hello", "" + str)
}
@Test def `must_not_break_when_folding_1.4f_and_a_stringLit`(): Unit = {
assertEquals("1.399999976158142hello", 1.4f + "hello")
assertEquals("hello1.399999976158142", "hello" + 1.4f)
}
@Test def must_not_break_when_folding_cascading_+[string](): Unit = {
@noinline def str: String = "awesome! 10/10"
assertEquals("Scala.js is awesome! 10/10", "Scala.js" + (" is " + str))
assertEquals("awesome! 10/10 is Scala.js", (str + " is ") + "Scala.js")
}
@Test def must_not_break_when_folding_a_chain_of_+[string](): Unit = {
@inline def b: String = "b"
@inline def d: String = "d"
@inline def f: String = "f"
assertEquals("abcdefg", "a" + b + "c" + d + "e" + f + "g")
}
@Test def must_not_break_when_folding_integer_in_double_and_stringLit(): Unit = {
assertEquals("1hello", 1.0 + "hello")
assertEquals("hello1", "hello" + 1.0)
}
@Test def must_not_break_when_folding_zero_and_stringLit(): Unit = {
assertEquals("0hello", 0.0 + "hello")
assertEquals("hello0", "hello" + 0.0)
assertEquals("0hello", -0.0 + "hello")
assertEquals("hello0", "hello" + (-0.0))
}
@Test def must_not_break_when_folding_Infinities_and_stringLit(): Unit = {
assertEquals("Infinityhello", Double.PositiveInfinity + "hello")
assertEquals("helloInfinity", "hello" + Double.PositiveInfinity)
assertEquals("-Infinityhello", Double.NegativeInfinity + "hello")
assertEquals("hello-Infinity", "hello" + Double.NegativeInfinity)
}
@Test def must_not_break_when_folding_NaN_and_stringLit(): Unit = {
assertEquals("NaNhello", Double.NaN + "hello")
assertEquals("helloNaN", "hello" + Double.NaN)
}
@Test def must_not_break_when_folding_double_with_decimal_and_stringLit(): Unit = {
assumeFalse("Assumed not executing in FullOpt", isInFullOpt)
assertEquals("1.2323919403474454e+21hello", 1.2323919403474454E21 + "hello")
assertEquals("hello1.2323919403474454e+21", "hello" + 1.2323919403474454E21)
}
@Test def must_not_break_when_folding_double_that_JVM_would_print_in_scientific_notation_and_stringLit(): Unit = {
assumeFalse("Assumed not executing in FullOpt", isInFullOpt)
assertEquals("123456789012345hello", 123456789012345d + "hello")
assertEquals("hello123456789012345", "hello" + 123456789012345d)
}
@Test def must_not_break_when_folding_doubles_to_String(): Unit = {
assumeFalse("Assumed not executing in FullOpt", isInFullOpt)
@noinline def toStringNoInline(v: Double): String = v.toString
@inline def test(v: Double): Unit =
assertEquals(toStringNoInline(v), v.toString)
// Special cases
test(0.0)
test(-0.0)
test(Double.NaN)
test(Double.PositiveInfinity)
test(Double.NegativeInfinity)
// k <= n <= 21
test(1.0)
test(12.0)
test(123.0)
test(1234.0)
test(12345.0)
test(123456.0)
test(1234567.0)
test(12345678.0)
test(123456789.0)
test(1234567890.0)
test(12345678901.0)
test(123456789012.0)
test(1234567890123.0)
test(12345678901234.0)
test(123456789012345.0)
test(1234567890123456.0)
test(12345678901234657.0)
test(123456789012345678.0)
test(1234567890123456789.0)
test(12345678901234567890.0)
test(123456789012345678901.0)
// 0 < n <= 21
test(1.42)
test(12.42)
test(123.42)
test(1234.42)
test(12345.42)
test(123456.42)
test(1234567.42)
test(12345678.42)
test(123456789.42)
test(1234567890.42)
test(12345678901.42)
test(123456789012.42)
test(1234567890123.42)
test(12345678901234.42)
test(123456789012345.42)
test(1234567890123456.42)
test(12345678901234657.42)
test(123456789012345678.42)
test(1234567890123456789.42)
test(12345678901234567890.42)
test(123456789012345678901.42)
// -6 < n <= 0
test(0.1)
test(0.01)
test(0.001)
test(0.0001)
test(0.00001)
test(0.000001)
// k == 1
test(1e22)
test(2e25)
test(3e50)
test(4e100)
test(5e200)
test(6e300)
test(7e307)
test(1e-22)
test(2e-25)
test(3e-50)
test(4e-100)
test(5e-200)
test(6e-300)
test(7e-307)
// else
test(1.42e22)
test(2.42e25)
test(3.42e50)
test(4.42e100)
test(5.42e200)
test(6.42e300)
test(7.42e307)
test(1.42e-22)
test(2.42e-25)
test(3.42e-50)
test(4.42e-100)
test(5.42e-200)
test(6.42e-300)
test(7.42e-307)
// special cases when ulp > 1
test(18271179521433728.0)
test(1.15292150460684685E18)
test(1234567890123456770.0)
test(2234567890123456770.0)
test(4234567890123450000.0)
test(149170297077708820000.0)
test(296938164846899230000.0)
test(607681513323520000000.0)
}
@Test def must_not_break_when_folding_long_and_stringLit(): Unit = {
assertEquals("1hello", 1L + "hello")
assertEquals("hello1", "hello" + 1L)
}
@Test def must_not_break_when_folding_integer_and_stringLit(): Unit = {
assertEquals("42hello", 42 + "hello")
assertEquals("hello42", "hello" + 42)
}
@Test def must_not_break_when_folding_boolean_and_stringLit(): Unit = {
assertEquals("false is not true", false + " is not true")
assertEquals("false is not true", "false is not " + true)
}
@Test def must_not_break_when_folding_unit_and_stringLit(): Unit = {
assertEquals("undefined is undefined?", () + " is undefined?")
assertEquals("undefined is undefined", "undefined is " +())
}
@Test def must_not_break_when_folding_null_and_stringLit(): Unit = {
assertEquals("Damien is not null", "Damien is not " + null)
}
@Test def must_not_break_when_folding_char_and_stringLit(): Unit = {
assertEquals("Scala.js", 'S' + "cala.js")
assertEquals("Scala.js", "Scala.j" + 's')
}
}
object OptimizerTest {
@inline
class InlineClassDependentFields(val x: Int) {
val b = x > 3
val y = if (b) x + 6 else x-2
}
@inline
class InlineClassThisAlias(val x: Int) {
val t = this
val y = x
val z = t.y
}
}
| japgolly/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/compiler/OptimizerTest.scala | Scala | bsd-3-clause | 9,236 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.execution.command.ExplainCommand
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.test.SQLTestData._
private case class FunctionResult(f1: String, f2: String)
class UDFSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("built-in fixed arity expressions") {
val df = spark.emptyDataFrame
df.selectExpr("rand()", "randn()", "rand(5)", "randn(50)")
}
test("built-in vararg expressions") {
val df = Seq((1, 2)).toDF("a", "b")
df.selectExpr("array(a, b)")
df.selectExpr("struct(a, b)")
}
test("built-in expressions with multiple constructors") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("substr(a, 2)", "substr(a, 2, 3)").collect()
}
test("count") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("count(a)")
}
test("count distinct") {
val df = Seq(("abcd", 2)).toDF("a", "b")
df.selectExpr("count(distinct a)")
}
test("SPARK-8003 spark_partition_id") {
val df = Seq((1, "Tearing down the walls that divide us")).toDF("id", "saying")
df.createOrReplaceTempView("tmp_table")
checkAnswer(sql("select spark_partition_id() from tmp_table").toDF(), Row(0))
spark.catalog.dropTempView("tmp_table")
}
test("SPARK-8005 input_file_name") {
withTempPath { dir =>
val data = sparkContext.parallelize(0 to 10, 2).toDF("id")
data.write.parquet(dir.getCanonicalPath)
spark.read.parquet(dir.getCanonicalPath).createOrReplaceTempView("test_table")
val answer = sql("select input_file_name() from test_table").head().getString(0)
assert(answer.contains(dir.toURI.getPath))
assert(sql("select input_file_name() from test_table").distinct().collect().length >= 2)
spark.catalog.dropTempView("test_table")
}
}
test("error reporting for incorrect number of arguments") {
val df = spark.emptyDataFrame
val e = intercept[AnalysisException] {
df.selectExpr("substr('abcd', 2, 3, 4)")
}
assert(e.getMessage.contains("arguments"))
}
test("error reporting for undefined functions") {
val df = spark.emptyDataFrame
val e = intercept[AnalysisException] {
df.selectExpr("a_function_that_does_not_exist()")
}
assert(e.getMessage.contains("Undefined function"))
assert(e.getMessage.contains("a_function_that_does_not_exist"))
}
test("Simple UDF") {
spark.udf.register("strLenScala", (_: String).length)
assert(sql("SELECT strLenScala('test')").head().getInt(0) === 4)
}
test("UDF defined using UserDefinedFunction") {
import functions.udf
val foo = udf((x: Int) => x + 1)
spark.udf.register("foo", foo)
assert(sql("select foo(5)").head().getInt(0) == 6)
}
test("ZeroArgument UDF") {
spark.udf.register("random0", () => { Math.random()})
assert(sql("SELECT random0()").head().getDouble(0) >= 0.0)
}
test("TwoArgument UDF") {
spark.udf.register("strLenScala", (_: String).length + (_: Int))
assert(sql("SELECT strLenScala('test', 1)").head().getInt(0) === 5)
}
test("UDF in a WHERE") {
spark.udf.register("oneArgFilter", (n: Int) => { n > 80 })
val df = sparkContext.parallelize(
(1 to 100).map(i => TestData(i, i.toString))).toDF()
df.createOrReplaceTempView("integerData")
val result =
sql("SELECT * FROM integerData WHERE oneArgFilter(key)")
assert(result.count() === 20)
}
test("UDF in a HAVING") {
spark.udf.register("havingFilter", (n: Long) => { n > 5 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.createOrReplaceTempView("groupData")
val result =
sql(
"""
| SELECT g, SUM(v) as s
| FROM groupData
| GROUP BY g
| HAVING havingFilter(s)
""".stripMargin)
assert(result.count() === 2)
}
test("UDF in a GROUP BY") {
spark.udf.register("groupFunction", (n: Int) => { n > 10 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.createOrReplaceTempView("groupData")
val result =
sql(
"""
| SELECT SUM(v)
| FROM groupData
| GROUP BY groupFunction(v)
""".stripMargin)
assert(result.count() === 2)
}
test("UDFs everywhere") {
spark.udf.register("groupFunction", (n: Int) => { n > 10 })
spark.udf.register("havingFilter", (n: Long) => { n > 2000 })
spark.udf.register("whereFilter", (n: Int) => { n < 150 })
spark.udf.register("timesHundred", (n: Long) => { n * 100 })
val df = Seq(("red", 1), ("red", 2), ("blue", 10),
("green", 100), ("green", 200)).toDF("g", "v")
df.createOrReplaceTempView("groupData")
val result =
sql(
"""
| SELECT timesHundred(SUM(v)) as v100
| FROM groupData
| WHERE whereFilter(v)
| GROUP BY groupFunction(v)
| HAVING havingFilter(v100)
""".stripMargin)
assert(result.count() === 1)
}
test("struct UDF") {
spark.udf.register("returnStruct", (f1: String, f2: String) => FunctionResult(f1, f2))
val result =
sql("SELECT returnStruct('test', 'test2') as ret")
.select($"ret.f1").head().getString(0)
assert(result === "test")
}
test("udf that is transformed") {
spark.udf.register("makeStruct", (x: Int, y: Int) => (x, y))
// 1 + 1 is constant folded causing a transformation.
assert(sql("SELECT makeStruct(1 + 1, 2)").first().getAs[Row](0) === Row(2, 2))
}
test("type coercion for udf inputs") {
spark.udf.register("intExpected", (x: Int) => x)
// pass a decimal to intExpected.
assert(sql("SELECT intExpected(1.0)").head().getInt(0) === 1)
}
test("udf in different types") {
spark.udf.register("testDataFunc", (n: Int, s: String) => { (n, s) })
spark.udf.register("decimalDataFunc",
(a: java.math.BigDecimal, b: java.math.BigDecimal) => { (a, b) })
spark.udf.register("binaryDataFunc", (a: Array[Byte], b: Int) => { (a, b) })
spark.udf.register("arrayDataFunc",
(data: Seq[Int], nestedData: Seq[Seq[Int]]) => { (data, nestedData) })
spark.udf.register("mapDataFunc",
(data: scala.collection.Map[Int, String]) => { data })
spark.udf.register("complexDataFunc",
(m: Map[String, Int], a: Seq[Int], b: Boolean) => { (m, a, b) } )
checkAnswer(
sql("SELECT tmp.t.* FROM (SELECT testDataFunc(key, value) AS t from testData) tmp").toDF(),
testData)
checkAnswer(
sql("""
| SELECT tmp.t.* FROM
| (SELECT decimalDataFunc(a, b) AS t FROM decimalData) tmp
""".stripMargin).toDF(), decimalData)
checkAnswer(
sql("""
| SELECT tmp.t.* FROM
| (SELECT binaryDataFunc(a, b) AS t FROM binaryData) tmp
""".stripMargin).toDF(), binaryData)
checkAnswer(
sql("""
| SELECT tmp.t.* FROM
| (SELECT arrayDataFunc(data, nestedData) AS t FROM arrayData) tmp
""".stripMargin).toDF(), arrayData.toDF())
checkAnswer(
sql("""
| SELECT mapDataFunc(data) AS t FROM mapData
""".stripMargin).toDF(), mapData.toDF())
checkAnswer(
sql("""
| SELECT tmp.t.* FROM
| (SELECT complexDataFunc(m, a, b) AS t FROM complexData) tmp
""".stripMargin).toDF(), complexData.select("m", "a", "b"))
}
test("SPARK-11716 UDFRegistration does not include the input data type in returned UDF") {
val myUDF = spark.udf.register("testDataFunc", (n: Int, s: String) => { (n, s.toInt) })
// Without the fix, this will fail because we fail to cast data type of b to string
// because myUDF does not know its input data type. With the fix, this query should not
// fail.
checkAnswer(
testData2.select(myUDF($"a", $"b").as("t")),
testData2.selectExpr("struct(a, b)"))
checkAnswer(
sql("SELECT tmp.t.* FROM (SELECT testDataFunc(a, b) AS t from testData2) tmp").toDF(),
testData2)
}
test("SPARK-19338 Provide identical names for UDFs in the EXPLAIN output") {
def explainStr(df: DataFrame): String = {
val explain = ExplainCommand(df.queryExecution.logical, extended = false)
val sparkPlan = spark.sessionState.executePlan(explain).executedPlan
sparkPlan.executeCollect().map(_.getString(0).trim).headOption.getOrElse("")
}
val udf1 = "myUdf1"
val udf2 = "myUdf2"
spark.udf.register(udf1, (n: Int) => { n + 1 })
spark.udf.register(udf2, (n: Int) => { n * 1 })
assert(explainStr(sql("SELECT myUdf1(myUdf2(1))")).contains(s"UDF:$udf1(UDF:$udf2(1))"))
}
}
| wangyixiaohuihui/spark2-annotation | sql/core/src/test/scala/org/apache/spark/sql/UDFSuite.scala | Scala | apache-2.0 | 9,815 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qsu
import quasar.{Qspec, TreeMatchers}
import quasar.IdStatus.ExcludeId
import quasar.contrib.iota._
import quasar.contrib.matryoshka._
import quasar.contrib.pathy.AFile
import quasar.ejson.{EJson, Fixed}
import quasar.ejson.implicits._
import quasar.fp._
import quasar.qscript.construction
import matryoshka._
import matryoshka.data._
import pathy.Path._
object CoalesceUnaryMappableSpec extends Qspec with QSUTTypes[Fix] with TreeMatchers {
import QSUGraph.Extractors._
val qsu = QScriptUniform.DslT[Fix]
val rec = construction.RecFunc[Fix]
val mf = construction.Func[Fix]
val J = Fixed[Fix[EJson]]
val dataA: AFile = rootDir </> file("dataA")
val dataB: AFile = rootDir </> file("dataB")
val coalesce = CoalesceUnaryMappable[Fix] _
"coalescing mappable regions having a single root" should {
"be the identity for a Map applied to a non-mappable root" >> {
val g = QSUGraph.fromTree[Fix](
qsu.map(
qsu.read(dataA, ExcludeId),
rec.ProjectKeyS(rec.Hole, "A")))
coalesce(g) must beLike {
case Map(Read(p, ExcludeId), f) =>
val exp = mf.ProjectKeyS(mf.Hole, "A")
p must_= dataA
f.linearize must beTreeEqual(exp)
}
}
"compose the functions of adjacent Map nodes" >> {
val g = QSUGraph.fromTree[Fix](
qsu.map(
qsu.map(
qsu.map(
qsu.read(dataA, ExcludeId),
rec.ProjectKeyS(rec.Hole, "X")),
rec.ProjectKeyS(rec.Hole, "Y")),
rec.ProjectKeyS(rec.Hole, "Z")))
coalesce(g) must beLike {
case Map(Read(p, ExcludeId), f) =>
val exp =
mf.ProjectKeyS(mf.ProjectKeyS(mf.ProjectKeyS(mf.Hole, "X"), "Y"), "Z")
p must_= dataA
f.linearize must beTreeEqual(exp)
}
}
"coalesce map nodes in AutoJoin2" >> {
val g = QSUGraph.fromTree[Fix](
qsu._autojoin2(
qsu.map(
qsu.map(
qsu.read(dataA, ExcludeId),
rec.ProjectKeyS(rec.Hole, "X")),
rec.MakeMapS("A", rec.Hole)),
qsu.map(
qsu.map(
qsu.read(dataB, ExcludeId),
rec.ProjectKeyS(rec.Hole, "Y")),
rec.MakeMapS("B", rec.Hole)),
mf.ConcatMaps(mf.LeftSide, mf.RightSide)))
coalesce(g) must beLike {
case AutoJoin2(Read(l, ExcludeId), Read(r, ExcludeId), f) =>
val exp =
mf.ConcatMaps(
mf.MakeMapS("A", mf.ProjectKeyS(mf.LeftSide, "X")),
mf.MakeMapS("B", mf.ProjectKeyS(mf.RightSide, "Y")))
l must_= dataA
r must_= dataB
f must beTreeEqual(exp)
}
}
"coalesce map nodes in AutoJoin3" >> {
val g = QSUGraph.fromTree[Fix](
qsu._autojoin3(
qsu.map(
qsu.read(dataA, ExcludeId),
rec.ProjectKeyS(rec.Hole, "X")),
qsu.map(
qsu.read(dataB, ExcludeId),
rec.ProjectKeyS(rec.Hole, "Y")),
qsu.read(dataA, ExcludeId),
mf.Cond(mf.LeftSide3, mf.RightSide3, mf.Center)))
coalesce(g) must beLike {
case AutoJoin3(Read(l, ExcludeId), Read(c, ExcludeId), Read(r, ExcludeId), f) =>
val exp =
mf.Cond(
mf.ProjectKeyS(mf.LeftSide3, "X"),
mf.RightSide3,
mf.ProjectKeyS(mf.Center, "Y"))
l must_= dataA
c must_= dataB
r must_= dataA
f must beTreeEqual(exp)
}
}
}
}
| slamdata/quasar | qsu/src/test/scala/quasar/qsu/CoalesceUnaryMappableSpec.scala | Scala | apache-2.0 | 4,155 |