code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.wandledi.scala
import org.wandledi.Scroll
import org.wandledi.Selector
trait SelectableElement extends org.wandledi.SelectableElement with Element with Selectable
object SelectableElement {
def apply(selector: Selector, parentScroll: Scroll, localScroll: Scroll) =
new SelectableElementImpl(selector, parentScroll, localScroll)
}
| machisuji/Wandledi | scala-lib/src/main/scala/org/wandledi/scala/SelectableElement.scala | Scala | mit | 351 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.orc
import org.apache.orc.TypeDescription
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.types.StructType
private[sql] object OrcFileFormat {
private def checkFieldName(name: String): Unit = {
try {
TypeDescription.fromString(s"struct<$name:int>")
} catch {
case _: IllegalArgumentException =>
throw new AnalysisException(
s"""Column name "$name" contains invalid character(s).
|Please use alias to rename it.
""".stripMargin.split("\\n").mkString(" ").trim)
}
}
def checkFieldNames(schema: StructType): StructType = {
schema.fieldNames.foreach(checkFieldName)
schema
}
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/orc/OrcFileFormat.scala | Scala | apache-2.0 | 1,537 |
/******************************************************************************
* Copyright © 2016 Maxim Karpov *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); *
* you may not use this file except in compliance with the License. *
* You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
******************************************************************************/
package ru.makkarpov.scalingua.play
import play.api.mvc.RequestHeader
import play.twirl.api.Html
import ru.makkarpov.scalingua._
import scala.language.experimental.macros
object PlayUtils {
class StringInterpolator(val sc: StringContext) extends AnyVal {
def h(args: Any*)(implicit lang: Language, outputFormat: OutputFormat[Html]): Html =
macro Macros.interpolate[Html]
def lh(args: Any*)(implicit outputFormat: OutputFormat[Html]): LValue[Html] =
macro Macros.lazyInterpolate[Html]
def ph(args: Any*)(implicit lang: Language, outputFormat: OutputFormat[Html]): Html =
macro Macros.pluralInterpolate[Html]
def lph(args: Any*)(outputFormat: OutputFormat[Html]): LValue[Html] =
macro Macros.lazyPluralInterpolate[Html]
}
// Modified to match only correct doubles
private val qPattern = ";\\\\s*q=((?:[0-9]+\\\\.)?[0-9]+)".r
def languageFromAccept(accept: String)(implicit msg: Messages): Language = {
val langs = (for {
value0 <- accept.split(',')
value = value0.trim
} yield {
qPattern.findFirstMatchIn(value) match {
case Some(m) => (BigDecimal(m.group(1)), m.before.toString)
case None => (BigDecimal(1.0), value) // “The default value is q=1.”
}
}).sortBy(-_._1).iterator
while (langs.hasNext) {
val (_, id) = langs.next()
val lng = LanguageId.get(id)
if (lng.isDefined && msg.contains(lng.get))
return msg(lng.get)
}
Language.English
}
}
| makkarpov/scalingua | play/src/main/scala/ru/makkarpov/scalingua/play/PlayUtils.scala | Scala | apache-2.0 | 2,698 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter
import scala.collection.GenTraversableOnce
/**
* Holds values extracted from a filter. Values may be empty, in which case nothing was extracted from
* the filter. May be marked as 'disjoint', which means that mutually exclusive values were extracted
* from the filter. This may be checked to short-circuit queries that will not result in any hits.
*
* @param values values extracted from the filter. If nothing was extracted, will be empty
* @param precise values exactly match the filter, or may return false positives
* @param disjoint mutually exclusive values were extracted, e.g. 'a < 1 && a > 2'
* @tparam T type parameter
*/
case class FilterValues[+T](values: Seq[T], precise: Boolean = true, disjoint: Boolean = false) {
def map[U](f: T => U): FilterValues[U] = FilterValues(values.map(f), precise, disjoint)
def flatMap[U](f: T => GenTraversableOnce[U]): FilterValues[U] = FilterValues(values.flatMap(f), precise, disjoint)
def foreach[U](f: T => U): Unit = values.foreach(f)
def forall(p: T => Boolean): Boolean = values.forall(p)
def exists(p: T => Boolean): Boolean = values.exists(p)
def filter(f: T => Boolean): FilterValues[T] = FilterValues(values.filter(f), precise, disjoint)
def nonEmpty: Boolean = values.nonEmpty || disjoint
def isEmpty: Boolean = !nonEmpty
}
object FilterValues {
def empty[T]: FilterValues[T] = FilterValues[T](Seq.empty)
def disjoint[T]: FilterValues[T] = FilterValues[T](Seq.empty, disjoint = true)
def or[T](join: (Seq[T], Seq[T]) => Seq[T])(left: FilterValues[T], right: FilterValues[T]): FilterValues[T] = {
(left.disjoint, right.disjoint) match {
case (false, false) => FilterValues(join(left.values, right.values), left.precise && right.precise)
case (false, true) => left
case (true, false) => right
case (true, true) => FilterValues.disjoint
}
}
def and[T](intersect: (T, T) => Option[T])(left: FilterValues[T], right: FilterValues[T]): FilterValues[T] = {
if (left.disjoint || right.disjoint) {
FilterValues.disjoint
} else {
val intersections = left.values.flatMap(v => right.values.flatMap(intersect(_, v)))
if (intersections.isEmpty) {
FilterValues.disjoint
} else {
FilterValues(intersections, left.precise && right.precise)
}
}
}
}
| aheyne/geomesa | geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/FilterValues.scala | Scala | apache-2.0 | 2,840 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.Identifier
import org.apache.spark.sql.types.StructType
/**
* Thrown by a catalog when an item already exists. The analyzer will rethrow the exception
* as an [[org.apache.spark.sql.AnalysisException]] with the correct position information.
*/
class DatabaseAlreadyExistsException(db: String)
extends NamespaceAlreadyExistsException(s"Database '$db' already exists")
class NamespaceAlreadyExistsException(message: String) extends AnalysisException(message) {
def this(namespace: Array[String]) = {
this(s"Namespace '${namespace.quoted}' already exists")
}
}
class TableAlreadyExistsException(message: String) extends AnalysisException(message) {
def this(db: String, table: String) = {
this(s"Table or view '$table' already exists in database '$db'")
}
def this(tableIdent: Identifier) = {
this(s"Table ${tableIdent.quoted} already exists")
}
}
class TempTableAlreadyExistsException(table: String)
extends TableAlreadyExistsException(s"Temporary view '$table' already exists")
class PartitionAlreadyExistsException(message: String) extends AnalysisException(message) {
def this(db: String, table: String, spec: TablePartitionSpec) = {
this(s"Partition already exists in table '$table' database '$db':\\n" + spec.mkString("\\n"))
}
def this(tableName: String, partitionIdent: InternalRow, partitionSchema: StructType) = {
this(s"Partition already exists in table $tableName:" +
partitionIdent.toSeq(partitionSchema).zip(partitionSchema.map(_.name))
.map( kv => s"${kv._1} -> ${kv._2}").mkString(","))
}
}
class PartitionsAlreadyExistException(message: String) extends AnalysisException(message) {
def this(db: String, table: String, specs: Seq[TablePartitionSpec]) {
this(s"The following partitions already exists in table '$table' database '$db':\\n"
+ specs.mkString("\\n===\\n"))
}
def this(tableName: String, partitionIdents: Seq[InternalRow], partitionSchema: StructType) = {
this(s"The following partitions already exists in table $tableName:" +
partitionIdents.map(_.toSeq(partitionSchema).zip(partitionSchema.map(_.name))
.map( kv => s"${kv._1} -> ${kv._2}").mkString(",")).mkString("\\n===\\n"))
}
}
class FunctionAlreadyExistsException(db: String, func: String)
extends AnalysisException(s"Function '$func' already exists in database '$db'")
| rednaxelafx/apache-spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/AlreadyExistException.scala | Scala | apache-2.0 | 3,488 |
/*
* Copyright (c) 2011 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
/**
* Conversions between `Tuples` and `HLists`.
*
* The implicit defined by this object enhances `Tuples` (currently up to arity 4) with an `hlisted` method which
* constructs an equivalently typed [[shapeless.HList]]. This object also provides higher ranked functions for
* conversion between `Tuples` and `HLists`.
*
* @author Miles Sabin
*/
object Tuples {
trait TupleOps[L <: HList] {
def hlisted : L
}
implicit def tupleOps[T <: Product](t : T)(implicit hlister : HLister[T]) = new TupleOps[hlister.Out] {
def hlisted = hlister(t)
}
/**
* Higher ranked function which converts `Tuples` to `HLists`.
*/
object hlisted {
def apply[T <: Product](t : T)(implicit hlister : HLister[T]) : hlister.Out = hlister(t)
}
implicit def hlisted1[T <: Product](implicit hlister : HLister[T]) =
new Case[hlisted.type, T => hlister.Out](hlister.apply(_))
/**
* Monomorphic instantiator for [[shapeless.Tuples.hlisted]].
*/
implicit def univInstHListed[F, G](h : hlisted.type)(implicit c : Case[hlisted.type, F => G]) : F => G = c.value
/**
* Higher ranked function which converts `HLists` to `Tuples`.
*/
object tupled {
def apply[L <: HList](l : L)(implicit tupler : Tupler[L]) : tupler.Out = tupler(l)
}
implicit def tupled1[L <: HList](implicit tupler : Tupler[L]) =
new Case[tupled.type, L => tupler.Out](tupler.apply(_))
/**
* Monomorphic instantiator for [[shapeless.Tuples.tupled]].
*/
implicit def univInstTupled[F, G](t : tupled.type)(implicit c : Case[tupled.type, F => G]) : F => G = c.value
}
/**
* Type class supporting conversion of `Tuples` to `HLists`.
*
* @author Miles Sabin
*/
trait HLister[-T <: Product] {
type Out <: HList
def apply(t : T) : Out
}
trait HListerAux[-T <: Product, Out <: HList] {
def apply(t : T) : Out
}
/**
* `HLister` type class instances.
*
* @author Miles Sabin
*/
object HLister {
implicit def hlister[T <: Product, Out0 <: HList](implicit hlister : HListerAux[T, Out0]) = new HLister[T] {
type Out = Out0
def apply(t : T) : Out = hlister(t)
}
}
object HListerAux extends HListerAuxInstances
/**
* Conversions between ordinary functions and `HList` functions.
*
* The implicits defined by this object enhance ordinary functions (resp. HList functions) with an `hlisted` (resp.
* `unhlisted`) method which creates an equivalently typed `HList` function (resp. ordinary function).
*
* @author Miles Sabin
*/
object Functions {
trait FnHListOps[HLFn] {
def hlisted : HLFn
}
implicit def fnHListOps[F](t : F)(implicit fnHLister : FnHLister[F]) = new FnHListOps[fnHLister.Out] {
def hlisted = fnHLister(t)
}
trait FnUnHListOps[F] {
def unhlisted : F
}
implicit def fnUnHListOps[F](t : F)(implicit fnUnHLister : FnUnHLister[F]) = new FnUnHListOps[fnUnHLister.Out] {
def unhlisted = fnUnHLister(t)
}
}
/**
* Type class supporting conversion of arbitrary functions (currently up to arity 4) to functions of a single `HList`
* argument.
*
* @author Miles Sabin
*/
trait FnHLister[F] {
type Out
def apply(f : F) : Out
}
trait FnHListerAux[F, Out] {
def apply(f : F) : Out
}
/**
* `FnHLister` type class instances.
*
* @author Miles Sabin
*/
object FnHLister {
implicit def fnHLister[F, Out0](implicit fnHLister : FnHListerAux[F, Out0]) = new FnHLister[F] {
type Out = Out0
def apply(f : F) : Out = fnHLister(f)
}
}
object FnHListerAux extends FnHListerAuxInstances
/**
* Type class supporting conversion of functions of a single `HList` argument to ordinary functions (currently up to
* arity 4).
*
* @author Miles Sabin
*/
trait FnUnHLister[F] {
type Out
def apply(f : F) : Out
}
trait FnUnHListerAux[F, Out] {
def apply(f : F) : Out
}
/**
* `FnUnHLister` type class instances.
*
* @author Miles Sabin
*/
object FnUnHLister {
implicit def fnUnHLister[F, Out0](implicit fnUnHLister : FnUnHListerAux[F, Out0]) = new FnUnHLister[F] {
type Out = Out0
def apply(f : F) : Out = fnUnHLister(f)
}
}
object FnUnHListerAux extends FnUnHListerAuxInstances
/**
* Conversions between `Traversables` and `HLists`.
*
* The implicit defined by this object enhances `Traversables` with a `toHList` method which constructs an equivalently
* typed [[shapeless.HList]] if possible.
*
* @author Miles Sabin
*/
object Traversables {
trait TraversableOps[T] {
def toHList[L <: HList](implicit fl : FromTraversable[T, L]) : Option[L]
}
implicit def traversableOps[T](l : Traversable[T]) = new TraversableOps[T] {
def toHList[L <: HList](implicit fl : FromTraversable[T, L]) = fl(l)
}
}
/**
* Type class supporting type safe conversion of `Traversables` to `HLists`.
*
* @author Miles Sabin
*/
trait FromTraversable[T, Out <: HList] {
def apply(l : Traversable[T]) : Option[Out]
}
/**
* `FromTraversable` type class instances.
*
* @author Miles Sabin
*/
object FromTraversable {
import Typeable._
implicit def hnilFromTraversable[T] = new FromTraversable[T, HNil] {
def apply(l : Traversable[T]) = l match {
case Nil => Some(HNil)
case _ => None
}
}
implicit def hlistFromTraversable[T, OutH, OutT <: HList]
(implicit flt : FromTraversable[T, OutT], oc : Typeable[OutH]) = new FromTraversable[T, OutH :: OutT] {
def apply(l : Traversable[T]) : Option[OutH :: OutT] =
for(e <- l.headOption; h <- e.cast[OutH]; t <- flt(l.tail)) yield h :: t
}
} | retronym/shapeless | src/main/scala/shapeless/conversions.scala | Scala | apache-2.0 | 6,124 |
package org.openapitools.client.model
case class GithubRepositories (
_class: Option[String],
_links: Option[GithubRepositorieslinks],
_items: Option[List[GithubRepository]],
_lastPage: Option[Integer],
_nextPage: Option[Integer],
_pageSize: Option[Integer]
)
object GithubRepositories {
def toStringBody(var_class: Object, var_links: Object, var_items: Object, var_lastPage: Object, var_nextPage: Object, var_pageSize: Object) =
s"""
| {
| "class":$var_class,"links":$var_links,"items":$var_items,"lastPage":$var_lastPage,"nextPage":$var_nextPage,"pageSize":$var_pageSize
| }
""".stripMargin
}
| cliffano/swaggy-jenkins | clients/scala-gatling/generated/src/gatling/scala/org/openapitools/client/model/GithubRepositories.scala | Scala | mit | 666 |
package spark
import java.io.{File, FileOutputStream}
import java.net.{URI, URL, URLClassLoader}
import java.util.concurrent._
import scala.actors.remote.RemoteActor
import scala.collection.mutable.ArrayBuffer
import com.google.protobuf.ByteString
import org.apache.mesos._
import org.apache.mesos.Protos._
import spark.broadcast._
/**
* The Mesos executor for Spark.
*/
class Executor extends org.apache.mesos.Executor with Logging {
var classLoader: ClassLoader = null
var threadPool: ExecutorService = null
var env: SparkEnv = null
initLogging()
override def init(d: ExecutorDriver, args: ExecutorArgs) {
// Read spark.* system properties from executor arg
val props = Utils.deserialize[Array[(String, String)]](args.getData.toByteArray)
for ((key, value) <- props) {
System.setProperty(key, value)
}
// Make sure an appropriate class loader is set for remote actors
RemoteActor.classLoader = getClass.getClassLoader
// Initialize Spark environment (using system properties read above)
env = SparkEnv.createFromSystemProperties(false)
SparkEnv.set(env)
// Old stuff that isn't yet using env
Broadcast.initialize(false)
// Create our ClassLoader (using spark properties) and set it on this thread
classLoader = createClassLoader()
Thread.currentThread.setContextClassLoader(classLoader)
// Start worker thread pool
threadPool = new ThreadPoolExecutor(
1, 128, 600, TimeUnit.SECONDS, new SynchronousQueue[Runnable])
}
override def launchTask(d: ExecutorDriver, task: TaskDescription) {
threadPool.execute(new TaskRunner(task, d))
}
class TaskRunner(desc: TaskDescription, d: ExecutorDriver)
extends Runnable {
override def run() = {
val tid = desc.getTaskId.getValue
SparkEnv.set(env)
Thread.currentThread.setContextClassLoader(classLoader)
val ser = SparkEnv.get.closureSerializer.newInstance()
logInfo("Running task ID " + tid)
d.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(desc.getTaskId)
.setState(TaskState.TASK_RUNNING)
.build())
try {
Accumulators.clear
val task = ser.deserialize[Task[Any]](desc.getData.toByteArray, classLoader)
for (gen <- task.generation) {// Update generation if any is set
env.mapOutputTracker.updateGeneration(gen)
}
val value = task.run(tid.toInt)
val accumUpdates = Accumulators.values
val result = new TaskResult(value, accumUpdates)
d.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(desc.getTaskId)
.setState(TaskState.TASK_FINISHED)
.setData(ByteString.copyFrom(ser.serialize(result)))
.build())
logInfo("Finished task ID " + tid)
} catch {
case ffe: FetchFailedException => {
val reason = ffe.toTaskEndReason
d.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(desc.getTaskId)
.setState(TaskState.TASK_FAILED)
.setData(ByteString.copyFrom(ser.serialize(reason)))
.build())
}
case t: Throwable => {
val reason = ExceptionFailure(t)
d.sendStatusUpdate(TaskStatus.newBuilder()
.setTaskId(desc.getTaskId)
.setState(TaskState.TASK_FAILED)
.setData(ByteString.copyFrom(ser.serialize(reason)))
.build())
// TODO: Handle errors in tasks less dramatically
logError("Exception in task ID " + tid, t)
System.exit(1)
}
}
}
}
/**
* Create a ClassLoader for use in tasks, adding any JARs specified by the user or any classes
* created by the interpreter to the search path
*/
private def createClassLoader(): ClassLoader = {
var loader = this.getClass.getClassLoader
// If any JAR URIs are given through spark.jar.uris, fetch them to the
// current directory and put them all on the classpath. We assume that
// each URL has a unique file name so that no local filenames will clash
// in this process. This is guaranteed by MesosScheduler.
val uris = System.getProperty("spark.jar.uris", "")
val localFiles = ArrayBuffer[String]()
for (uri <- uris.split(",").filter(_.size > 0)) {
val url = new URL(uri)
val filename = url.getPath.split("/").last
downloadFile(url, filename)
localFiles += filename
}
if (localFiles.size > 0) {
val urls = localFiles.map(f => new File(f).toURI.toURL).toArray
loader = new URLClassLoader(urls, loader)
}
// If the REPL is in use, add another ClassLoader that will read
// new classes defined by the REPL as the user types code
val classUri = System.getProperty("spark.repl.class.uri")
if (classUri != null) {
logInfo("Using REPL class URI: " + classUri)
loader = {
try {
val klass = Class.forName("spark.repl.ExecutorClassLoader").asInstanceOf[Class[_ <: ClassLoader]]
val constructor = klass.getConstructor(classOf[String], classOf[ClassLoader])
constructor.newInstance(classUri, loader)
} catch {
case _: ClassNotFoundException => loader
}
}
}
return loader
}
// Download a file from a given URL to the local filesystem
private def downloadFile(url: URL, localPath: String) {
val in = url.openStream()
val out = new FileOutputStream(localPath)
Utils.copyStream(in, out, true)
}
override def error(d: ExecutorDriver, code: Int, message: String) {
logError("Error from Mesos: %s (code %d)".format(message, code))
}
override def killTask(d: ExecutorDriver, t: TaskID) {
logWarning("Mesos asked us to kill task " + t.getValue + "; ignoring (not yet implemented)")
}
override def shutdown(d: ExecutorDriver) {}
override def frameworkMessage(d: ExecutorDriver, data: Array[Byte]) {}
}
/**
* Executor entry point.
*/
object Executor extends Logging {
def main(args: Array[String]) {
System.loadLibrary("mesos")
// Create a new Executor and start it running
val exec = new Executor
new MesosExecutorDriver(exec).run()
}
}
| javelinjs/spark | core/src/main/scala/spark/Executor.scala | Scala | bsd-3-clause | 6,224 |
package uk.gov.hmrc.mongo.geospatial
import reactivemongo.api.ReadPreference
import uk.gov.hmrc.mongo.ReactiveRepository
trait Geospatial[A, ID] {
self: ReactiveRepository[A, ID] =>
import scala.concurrent.ExecutionContext
import play.api.libs.json.Json
import reactivemongo.api.indexes.Index
import reactivemongo.api.indexes.IndexType.Geo2DSpherical
lazy val LocationField = "loc"
lazy val geo2DSphericalIndex = Index(Seq((LocationField, Geo2DSpherical)), Some("geo2DSphericalIdx"))
def nearPoint(lon: Double, lat: Double, limit: Int = 100, readPreference: ReadPreference = ReadPreference.primaryPreferred)(implicit ec: ExecutionContext) = collection.find(
Json.obj(
LocationField -> Json.obj(
"$near" -> Json.obj(
"$geometry" -> Json.obj(
"type" -> "Point",
"coordinates" -> Json.arr(lon, lat)
)
)
)
)
).cursor[A](readPreference).collect[List](limit)
}
| mefellows/simple-reactivemongo | src/main/scala/uk/gov/hmrc/mongo/geospatial/Geospatial.scala | Scala | apache-2.0 | 960 |
package pl.bsulkowski.datagrafter
/** Contains data and computations organized in a mutable directed tree structure.
*
* Branch names allow unique identification of any tree element by path from root.
* Allows use of references between tree elements, that work like symbolic links for files.
* Tree branches and references together must form a DAG structure.
*
* Computations are represented by nodes with defined function and arguments passed in branches.
*
* Primitive data values can be stored only in leafs.
*/
abstract class Tree {
type Element <: ElementView
/**
*
*/
trait ElementView {
/** Returns handle to this or None in case of a root.
*
* Handle consists of parent Element and branch name.
* For elements that define a Graft it is the parent of corresponding graft Element.
*/
def handle: Option[(Element, String)]
/** Returns the Content of the Element eventually resolved from this Element.
*/
def content: Content
/** Returns Graft used to define this graft Element.
*/
def graft: Option[Graft]
}
abstract class Content
case class NodeContent(branches: scala.collection.mutable.Map[String, Element]) extends Content
case class DataContent(data: String) extends Content
case class FunctionContent(function: (Element => Element)) extends Content
case class ErrorContent(error: String) extends Content
abstract class Graft
case class ReferenceGraft(target: Path) extends Graft
case class ApplicationGraft(function: Element, arguments: Element) extends Graft
case class FunctionGraft(arguments: Element, result: Element) extends Graft
/** The root Element of this Tree.
*
* There may exist roots of other subtrees not connected to the main tree,
* but they are not tracked explicitly.
*/
val root: Element
def createNodeElement(): Element
def createDataElement(data: String): Element
def createGraftElement(graft: Graft): Element
}
| bsulkowski/data-grafter | src/main/scala/pl/bsulkowski/datagrafter/Tree.scala | Scala | mit | 1,993 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.data
import java.io._
import java.nio.charset.StandardCharsets
import com.esotericsoftware.kryo.io.{Input, Output}
import com.typesafe.scalalogging.LazyLogging
import org.apache.arrow.memory.{BufferAllocator, RootAllocator}
import org.apache.hadoop.hbase.client.{Put, Scan}
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.geotools.data.simple.SimpleFeatureSource
import org.geotools.data.{DataStoreFinder, DataUtilities, Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.arrow.io.SimpleFeatureArrowFileReader
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.opengis.feature.simple.SimpleFeature
import org.locationtech.geomesa.hbase.HBaseSystemProperties.TableAvailabilityTimeout
import org.locationtech.geomesa.hbase.data.HBaseDataStoreParams.{ConnectionParam, HBaseCatalogParam}
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.specs2.matcher.MatchResult
class HBaseBackCompatibilityTest extends HBaseTest with LazyLogging {
import scala.collection.JavaConverters._
val name = "BackCompatibilityTest"
val sft = SimpleFeatureTypes.createType(name, "name:String:index=true,age:Int,dtg:Date,*geom:Point:srid=4326")
val features = (0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", s"${i % 5}", s"2015-01-01T0$i:01:00.000Z", s"POINT(-12$i 4$i)")
}
val queries = Seq(
("INCLUDE", Seq(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
("IN ('0', '5', '7')", Seq(0, 5, 7)),
("bbox(geom, -130, 45, -120, 50)", Seq(5, 6, 7, 8, 9)),
("bbox(geom, -130, 45, -120, 50) AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5, 6, 7)),
("name = 'name5'", Seq(5)),
("name = 'name5' AND bbox(geom, -130, 45, -120, 50) AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5)),
("name = 'name5' AND dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(5)),
("name = 'name5' AND bbox(geom, -130, 40, -120, 50)", Seq(5)),
("dtg DURING 2015-01-01T00:00:00.000Z/2015-01-01T07:59:59.999Z", Seq(0, 1, 2, 3, 4, 5, 6, 7))
)
val addQueries = Seq(
"IN ('10')",
"name = 'name10'",
"bbox(geom, -111, 44, -109, 46)",
"bbox(geom, -111, 44, -109, 46) AND dtg DURING 2016-01-01T00:00:00.000Z/2016-01-01T01:00:00.000Z"
)
val transforms = Seq(
null,
Array("geom"),
Array("geom", "name")
)
val path = "src/test/resources/data/" // note: if running through intellij, use an absolute path
implicit val allocator: BufferAllocator = new RootAllocator(Long.MaxValue)
lazy val params = Map(ConnectionParam.getName -> connection, HBaseCatalogParam.getName -> name).asJava
step {
logger.info("Starting HBase back-compatibility test")
}
"HBase data store" should {
"Support back-compatibility to version 2.0.2" in { runVersionTest("2.0.2") }
"Support back-compatibility to version 2.3.1" in { runVersionTest("2.3.1") }
"Write data to disk" in {
skipped("integration")
val version = "2.0.2"
val ds = DataStoreFinder.getDataStore(params).asInstanceOf[HBaseDataStore]
try {
ds.createSchema(sft)
WithClose(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(FeatureUtils.write(writer, _, useProvidedFid = true))
}
} finally {
ds.dispose()
}
writeVersion(new File(s"$path/versioned-data-$version.kryo"))
ok
}
}
def runVersionTest(version: String): MatchResult[_] = {
restoreVersion(new File(s"$path/versioned-data-$version.kryo"))
val ds = DataStoreFinder.getDataStore(params).asInstanceOf[HBaseDataStore]
try {
val schema = ds.getSchema(name)
schema must not(beNull)
val fs = ds.getFeatureSource(name)
// test adding features
val featureToAdd = ScalaSimpleFeature.create(sft, "10", "name10", "10", "2016-01-01T00:30:00.000Z", "POINT(-110 45)")
val writer = ds.getFeatureWriterAppend(name, Transaction.AUTO_COMMIT)
FeatureUtils.write(writer, featureToAdd, useProvidedFid = true)
writer.close()
// make sure we can read it back
foreach(addQueries) { query =>
val filter = ECQL.toFilter(query)
foreach(transforms) { transform =>
doQuery(fs, new Query(name, filter, transform), Seq(featureToAdd))
}
}
// delete it
var remover = ds.getFeatureWriter(name, ECQL.toFilter("IN ('10')"), Transaction.AUTO_COMMIT)
remover.hasNext must beTrue
remover.next
remover.remove()
remover.hasNext must beFalse
remover.close()
// make sure that it no longer comes back
foreach(addQueries) { query =>
val filter = ECQL.toFilter(query)
foreach(transforms) { transform =>
doQuery(fs, new Query(name, filter, transform), Seq.empty)
}
}
// test queries
foreach(queries) { case (q, results) =>
val filter = ECQL.toFilter(q)
logger.debug(s"Running query $q")
foreach(transforms) { transform =>
doQuery(fs, new Query(name, filter, transform), results.map(features.apply))
}
doArrowQuery(fs, new Query(name, filter)) must containTheSameElementsAs(results)
}
// delete one of the old features
remover = ds.getFeatureWriter(name, ECQL.toFilter("IN ('5')"), Transaction.AUTO_COMMIT)
remover.hasNext must beTrue
remover.next
remover.remove()
remover.hasNext must beFalse
remover.close()
// make sure that it no longer comes back
foreach(queries) { case (q, results) =>
val filter = ECQL.toFilter(q)
logger.debug(s"Running query $q")
doQuery(fs, new Query(name, filter), results.filter(_ != 5).map(features.apply))
}
} finally {
ds.dispose()
}
ok
}
def doQuery(fs: SimpleFeatureSource, query: Query, expected: Seq[SimpleFeature]): MatchResult[_] = {
logger.debug(s"Running query ${ECQL.toCQL(query.getFilter)} :: " +
Option(query.getPropertyNames).map(_.mkString(",")).getOrElse("All"))
val results = SelfClosingIterator(fs.getFeatures(query).features).toList
if (logger.underlying.isDebugEnabled()) {
results.foreach(f => logger.debug(DataUtilities.encodeFeature(f)))
}
val transformed = {
val subtype = DataUtilities.createSubType(sft, query.getPropertyNames)
// note: we have to copy the SimpleFeatureImpl as its `equals` method checks for the implementing class
expected.map(e => ScalaSimpleFeature.copy(DataUtilities.reType(subtype, e)))
}
results must containTheSameElementsAs(transformed)
}
def doArrowQuery(fs: SimpleFeatureSource, query: Query): Seq[Int] = {
query.getHints.put(QueryHints.ARROW_ENCODE, java.lang.Boolean.TRUE)
val out = new ByteArrayOutputStream
val results = SelfClosingIterator(fs.getFeatures(query).features)
results.foreach(sf => out.write(sf.getAttribute(0).asInstanceOf[Array[Byte]]))
def in() = new ByteArrayInputStream(out.toByteArray)
WithClose(SimpleFeatureArrowFileReader.streaming(in)) { reader =>
SelfClosingIterator(reader.features()).map(_.getID.toInt).toSeq
}
}
def writeVersion(file: File): Unit = {
val fs = new FileOutputStream(file)
WithClose(new Output(fs)) { output =>
def writeBytes(value: Array[Byte]): Unit = writeBytesSubset(value, 0, value.length)
def writeBytesSubset(value: Array[Byte], offset: Int, length: Int): Unit = {
output.writeInt(length)
output.write(value, offset, length)
}
val tables = WithClose(connection.getAdmin)(_.listTableNames(s"$name.*"))
output.writeInt(tables.size)
tables.foreach { name =>
val table = connection.getTable(name)
val descriptor = table.getTableDescriptor
writeBytes(descriptor.getTableName.getName)
output.writeInt(descriptor.getColumnFamilies.length)
descriptor.getColumnFamilies.foreach(d => writeBytes(d.getName))
output.writeInt(descriptor.getCoprocessors.size())
descriptor.getCoprocessors.asScala.foreach(c => writeBytes(c.getBytes(StandardCharsets.UTF_8)))
WithClose(table.getScanner(new Scan())) { scanner =>
output.writeInt(scanner.iterator.asScala.length)
}
WithClose(table.getScanner(new Scan())) { scanner =>
scanner.iterator.asScala.foreach { result =>
val cell = result.rawCells()(0)
writeBytesSubset(cell.getRowArray, cell.getRowOffset, cell.getRowLength)
writeBytesSubset(cell.getFamilyArray, cell.getFamilyOffset, cell.getFamilyLength)
writeBytesSubset(cell.getQualifierArray, cell.getQualifierOffset, cell.getQualifierLength)
writeBytesSubset(cell.getValueArray, cell.getValueOffset, cell.getValueLength)
}
}
}
output.flush()
}
}
def restoreVersion(file: File): Unit = {
val input = new Input(new FileInputStream(file))
def readBytes: Array[Byte] = {
val bytes = Array.ofDim[Byte](input.readInt)
input.read(bytes)
bytes
}
val numTables = input.readInt
val tables = (0 until numTables).map { _ =>
val descriptor = new HTableDescriptor(TableName.valueOf(readBytes))
val numColumns = input.readInt
(0 until numColumns).foreach(_ => descriptor.addFamily(new HColumnDescriptor(readBytes)))
val numCoprocessors = input.readInt
// TODO jar path, etc?
(0 until numCoprocessors).foreach(_ => descriptor.addCoprocessor(new String(readBytes, StandardCharsets.UTF_8)))
val numMutations = input.readInt
val mutations = (0 until numMutations).map { _ =>
val row = readBytes
val cf = readBytes
val cq = readBytes
val value = readBytes
val mutation = new Put(row)
mutation.addColumn(cf, cq, value)
mutation
}
(descriptor, mutations)
}
// reload the tables
WithClose(connection.getAdmin) { admin =>
tables.foreach { case (descriptor, mutations) =>
if (admin.tableExists(descriptor.getTableName)) {
admin.disableTable(descriptor.getTableName)
admin.deleteTable(descriptor.getTableName)
}
admin.createTable(descriptor)
if (!admin.isTableAvailable(descriptor.getTableName)) {
val timeout = TableAvailabilityTimeout.toDuration.filter(_.isFinite())
logger.debug(s"Waiting for table '${descriptor.getTableName}' to become available with " +
s"${timeout.map(t => s"a timeout of $t").getOrElse("no timeout")}")
val stop = timeout.map(t => System.currentTimeMillis() + t.toMillis)
while (!admin.isTableAvailable(descriptor.getTableName) && stop.forall(_ > System.currentTimeMillis())) {
Thread.sleep(1000)
}
}
WithClose(connection.getBufferedMutator(descriptor.getTableName)) { mutator =>
mutations.foreach(mutator.mutate)
mutator.flush()
}
if (logger.underlying.isTraceEnabled()) {
logger.trace(s"restored ${descriptor.getTableName} ${admin.tableExists(descriptor.getTableName)}")
val scan = connection.getTable(descriptor.getTableName).getScanner(new Scan())
SelfClosingIterator(scan.iterator.asScala, scan.close()).foreach(r => logger.trace(r.toString))
}
}
}
}
step {
logger.info("Cleaning up HBase back-compatibility test")
}
}
| elahrvivaz/geomesa | geomesa-hbase/geomesa-hbase-datastore/src/test/scala/org/locationtech/geomesa/hbase/data/HBaseBackCompatibilityTest.scala | Scala | apache-2.0 | 12,277 |
package com.pwootage.metroidprime.formats.scly
import com.pwootage.metroidprime.formats.BinarySerializable
import com.pwootage.metroidprime.formats.io.PrimeDataFile
class ScriptObjectConnection extends BinarySerializable {
var state: Int = -1
var message: Int = -1
var targetObject: Int = -1
override def write(f: PrimeDataFile): Unit = {
f.write32(state)
f.write32(message)
f.write32(targetObject)
}
override def read(f: PrimeDataFile): Unit = {
state = f.read32()
message = f.read32()
targetObject = f.read32()
}
def stateEnum = Prime1ScriptObjectState.fromID(state)
def stateString = stateEnum.toString
def messageEnum = Prime1ScriptObjectMessage.fromID(message)
def messageString = messageEnum.toString
override def toString = s"ScriptObjectConnection($targetObject, $stateString, $messageString)"
}
| Pwootage/prime-patcher | src/main/scala/com/pwootage/metroidprime/formats/scly/ScriptObjectConnection.scala | Scala | gpl-3.0 | 859 |
/*
*************************************************************************************
* Copyright 2012 Normation SAS
*************************************************************************************
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU Affero GPL v3, the copyright holders add the following
* Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU Affero GPL v3
* licence, when you create a Related Module, this Related Module is
* not considered as a part of the work and may be distributed under the
* license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/agpl.html>.
*
*************************************************************************************
*/
package com.normation.rudder.batch
import net.liftweb.actor.{LiftActor, LAPinger}
import com.normation.rudder.services.system.DatabaseManager
import net.liftweb.common._
import org.joda.time._
import com.normation.rudder.domain.logger.ReportLogger
import com.normation.rudder.domain.reports._
/**
* An helper object designed to help building automatic reports cleaning
*/
object AutomaticReportsCleaning {
/*
* Default parameters and properties name
*/
val minParam = "rudder.batch.databasecleaner.runtime.minute"
val hourParam = "rudder.batch.databasecleaner.runtime.hour"
val dayParam = "rudder.batch.databasecleaner.runtime.day"
val freqParam = "rudder.batch.reportsCleaner.frequency"
val defaultMinute = 0
val defaultHour = 0
val defaultDay = "sunday"
val defaultArchiveTTL = 30
val defaultDeleteTTL = 90
/**
* Build a frequency depending on the value
*/
def buildFrequency(kind:String, min:Int, hour:Int, day:String):Box[CleanFrequency] = {
kind.toLowerCase() match {
case "hourly" => buildHourly(min)
case "daily" => buildDaily(min,hour)
case "weekly" => buildWeekly(min,hour,day)
case _ => Failure("%s is not correctly set, value is %s".format(freqParam,kind))
}
}
/**
* Build an hourly frequency
*/
private[this] def buildHourly(min:Int):Box[CleanFrequency] = {
if (min >= 0 && min <= 59)
Full(Hourly(min))
else
Failure("%s is not correctly set, value is %d, should be in [0-59]".format(minParam,min))
}
/**
* Build a daily frequency
*/
private[this] def buildDaily(min:Int,hour:Int):Box[CleanFrequency] = {
if (min >= 0 && min <= 59)
if(hour >= 0 && hour <= 23)
Full(Daily(hour,min))
else
Failure("%s is not correctly set, value is %d, should be in [0-23]".format(hourParam,hour))
else
Failure("%s is not correctly set, value is %d, should be in [0-59]".format(minParam,min))
}
/**
* Build a weekly frequency
*/
private[this] def buildWeekly(min:Int,hour:Int,day:String):Option[CleanFrequency] = {
if (min >= 0 && min <= 59)
if(hour >= 0 && hour <= 23)
day.toLowerCase() match {
case "monday" => Full(Weekly(DateTimeConstants.MONDAY,hour,min))
case "tuesday" => Full(Weekly(DateTimeConstants.TUESDAY,hour,min))
case "wednesday" => Full(Weekly(DateTimeConstants.WEDNESDAY,hour,min))
case "thursday" => Full(Weekly(DateTimeConstants.THURSDAY,hour,min))
case "friday" => Full(Weekly(DateTimeConstants.FRIDAY,hour,min))
case "saturday" => Full(Weekly(DateTimeConstants.SATURDAY,hour,min))
case "sunday" => Full(Weekly(DateTimeConstants.SUNDAY,hour,min))
case _ => Failure("%s is not correctly set, value is %s".format(dayParam,day))
}
else
Failure("%s is not correctly set, value is %d, should be in [0-23]".format(hourParam,hour))
else
Failure("%s is not correctly set, value is %d, should be in [0-59]".format(minParam,min))
}
}
/**
* Clean Frequency represents how often a report cleaning will be done.
*/
trait CleanFrequency {
/**
* Check if report cleaning has to be run
* Actually check every minute.
* TODO : check in a range of 5 minutes
*/
def check(date:DateTime):Boolean = {
val target = checker(date)
target.equals(date)
}
/**
* Compute the checker from now
*/
def checker(now: DateTime):DateTime
/**
* Compute the next cleaning time
*/
def next:DateTime
/**
* Display the frequency
*/
def displayFrequency : Option[String]
override def toString = displayFrequency match {
case Some(freq) => freq
case None => "Could not compute frequency"
}
}
/**
* An hourly frequency.
* It runs every hour past min minutes
*/
case class Hourly(min:Int) extends CleanFrequency{
def checker(date:DateTime):DateTime = date.withMinuteOfHour(min)
def next:DateTime = {
val now = DateTime.now()
if (now.isBefore(checker(now)))
checker(now)
else
checker(now).plusHours(1)
}
def displayFrequency = Some("Every hour past %d minutes".format(min))
}
/**
* A daily frequency.
* It runs every day at hour:min
*/
case class Daily(hour:Int,min:Int) extends CleanFrequency{
def checker(date:DateTime):DateTime = date.withMinuteOfHour(min).withHourOfDay(hour)
def next:DateTime = {
val now = DateTime.now()
if (now.isBefore(checker(now)))
checker(now)
else
checker(now).plusDays(1)
}
def displayFrequency = Some("Every day at %02d:%02d".format(hour,min))
}
/**
* A weekly frequency.
* It runs every week on day at hour:min
*/
case class Weekly(day:Int,hour:Int,min:Int) extends CleanFrequency{
def checker(date:DateTime):DateTime = date.withMinuteOfHour(min).withHourOfDay(hour).withDayOfWeek(day)
def next:DateTime = {
val now = DateTime.now()
if (now.isBefore(checker(now)))
checker(now)
else
checker(now).plusWeeks(1)
}
def displayFrequency = {
def expressWeekly(day:String) = Some("every %s at %02d:%02d".format(day,hour,min))
day match {
case DateTimeConstants.MONDAY => expressWeekly ("Monday")
case DateTimeConstants.TUESDAY => expressWeekly ("Tuesday")
case DateTimeConstants.WEDNESDAY => expressWeekly ("Wednesday")
case DateTimeConstants.THURSDAY => expressWeekly ("Thursday")
case DateTimeConstants.FRIDAY => expressWeekly ("Friday")
case DateTimeConstants.SATURDAY => expressWeekly ("Saturday")
case DateTimeConstants.SUNDAY => expressWeekly ("Sunday")
case _ => None
}
}
}
// States into which the cleaner process can be.
sealed trait CleanerState
// The process is idle.
case object IdleCleaner extends CleanerState
// An update is currently cleaning the databases.
case object ActiveCleaner extends CleanerState
sealed trait DatabaseCleanerMessage
// Messages the cleaner can receive.
// Ask to clean database (need to be in active state).
case object CleanDatabase extends DatabaseCleanerMessage
// Ask to check if cleaning has to be launched (need to be in idle state).
case object CheckLaunch extends DatabaseCleanerMessage
case class ManualLaunch(date:DateTime) extends DatabaseCleanerMessage
trait DatabaseCleanerActor extends LiftActor {
def isIdle : Boolean
}
/**
* A class that periodically check if the Database has to be cleaned.
*
* for now, Archive and delete run at same frequency.
* Delete and Archive TTL express the maximum age of reports.
* A negative or zero TTL means to not run the relative reports cleaner.
* Archive action doesn't run if its TTL is more than Delete TTL.
*/
case class AutomaticReportsCleaning(
dbManager : DatabaseManager
, deletettl : Int // in days
, archivettl : Int // in days
, freq : CleanFrequency
) extends Loggable {
val reportLogger = ReportLogger
// Check if automatic reports archiving has to be started
val archiver:DatabaseCleanerActor = if(archivettl < 1) {
val propertyName = "rudder.batch.reportsCleaner.archive.TTL"
reportLogger.info("Disable automatic database archive sinces property %s is 0 or negative".format(propertyName))
new LADatabaseCleaner(ArchiveAction(dbManager,this),-1)
} else {
// Don't launch automatic report archiving if reports would have already been deleted by automatic reports deleting
if ((archivettl < deletettl ) && (deletettl > 0)) {
logger.trace("***** starting Automatic Archive Reports batch *****")
new LADatabaseCleaner(ArchiveAction(dbManager,this),archivettl)
}
else {
reportLogger.info("Disable automatic archive since archive maximum age is older than delete maximum age")
new LADatabaseCleaner(ArchiveAction(dbManager,this),-1)
}
}
archiver ! CheckLaunch
val deleter:DatabaseCleanerActor = if(deletettl < 1) {
val propertyName = "rudder.batch.reportsCleaner.delete.TTL"
reportLogger.info("Disable automatic database deletion sinces property %s is 0 or negative".format(propertyName))
new LADatabaseCleaner(DeleteAction(dbManager,this),-1)
} else {
logger.trace("***** starting Automatic Delete Reports batch *****")
new LADatabaseCleaner(DeleteAction(dbManager,this),deletettl)
}
deleter ! CheckLaunch
////////////////////////////////////////////////////////////////
//////////////////// implementation details ////////////////////
////////////////////////////////////////////////////////////////
private case class LADatabaseCleaner(cleanaction:CleanReportAction,ttl:Int) extends DatabaseCleanerActor with Loggable {
updateManager =>
private[this] val reportLogger = ReportLogger
private[this] val automatic = ttl > 0
private[this] var currentState: CleanerState = IdleCleaner
private[this] var lastRun: DateTime = DateTime.now()
def isIdle : Boolean = currentState == IdleCleaner
private[this] def formatDate(date:DateTime) : String = date.toString("yyyy-MM-dd HH:mm")
private[this] def activeCleaning(date : DateTime, message : DatabaseCleanerMessage, kind:String) : Unit = {
val formattedDate = formatDate(date)
cleanaction.act(date) match {
case eb:EmptyBox =>
// Error while cleaning, should launch again
reportLogger.error("Reports database: Error while processing database %s, cause is: %s ".format(cleanaction.continue.toLowerCase(),eb))
reportLogger.error("Reports database: Relaunching %s %s process for all reports before %s".format(kind.toLowerCase,cleanaction.continue.toLowerCase(), formattedDate))
(this) ! message
case Full(res) =>
if (res==0)
reportLogger.info("Reports database: %s %s completed for all reports before %s, no reports to %s".format(kind,cleanaction.name.toLowerCase(), formattedDate,cleanaction.name.toLowerCase()))
else
reportLogger.info("Reports database: %s %s completed for all reports before %s, %d reports %s".format(kind,cleanaction.name.toLowerCase(),formattedDate,res,cleanaction.past.toLowerCase()))
lastRun=DateTime.now
currentState = IdleCleaner
}
}
override protected def messageHandler = {
/*
* Ask to check if need to be launched
* If idle => check
* If active => do nothing
* always register to LAPinger
*/
case CheckLaunch => {
// Schedule next check, every minute
if (automatic) {
LAPinger.schedule(this, CheckLaunch, 1000L*60)
currentState match {
case IdleCleaner =>
logger.trace("***** Check launch *****")
if(freq.check(DateTime.now)){
logger.trace("***** Automatic %s entering in active State *****".format(cleanaction.name.toLowerCase()))
currentState = ActiveCleaner
(this) ! CleanDatabase
}
else
logger.trace("***** Automatic %s will not be launched now, It is scheduled '%s'*****".format(cleanaction.name.toLowerCase(),freq.toString))
case ActiveCleaner => ()
}
}
else
logger.trace("***** Database %s is not automatic, it will not schedule its next launch *****".format(cleanaction.name))
}
/*
* Ask to clean Database
* If idle => do nothing
* If active => clean database
*/
case CleanDatabase => {
currentState match {
case ActiveCleaner =>
val now = DateTime.now
val target = now.minusDays(ttl)
val formattedDate = formatDate(target)
logger.trace("***** %s Database *****".format(cleanaction.name))
reportLogger.info("Reports database: Automatic %s started for all reports before %s".format(cleanaction.name.toLowerCase(),formattedDate))
activeCleaning(target,CleanDatabase,"automatic")
case IdleCleaner => ()
}
}
case ManualLaunch(date) => {
val formattedDate = formatDate(date)
logger.trace("***** Ask to launch manual database %s *****".format(cleanaction.name))
currentState match {
case IdleCleaner =>
currentState = ActiveCleaner
logger.trace("***** Start manual %s database *****".format(cleanaction.name))
reportLogger.info("Reports database: Manual %s started for all reports before %s ".format(cleanaction.name.toLowerCase(), formattedDate))
activeCleaning(date,ManualLaunch(date),"Manual")
case ActiveCleaner => reportLogger.info("Reports database: A database cleaning is already running, please try later")
}
}
case _ =>
reportLogger.error("Wrong message for automatic reports %s ".format(cleanaction.name.toLowerCase()))
}
}
}
| jooooooon/rudder | rudder-core/src/main/scala/com/normation/rudder/batch/AutomaticReportsCleaner.scala | Scala | agpl-3.0 | 14,600 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the GenJournal entity.
*/
class GenJournalGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val scn = scenario("Test the GenJournal entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token"))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login")).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.repeat(2) {
exec(http("Get all genJournal entries")
.get("/api/gen-journal-entries")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new genJournal entry")
.post("/api/gen-journal-entries")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "entrydate":"2020-01-01T00:00:00.000Z", "transaction":"SAMPLE_TEXT", "dacctno":"SAMPLE_TEXT", "cacctno":"SAMPLE_TEXT", "dadebit":"SAMPLE_TEXT", "dacredit":"SAMPLE_TEXT", "cadebit":"SAMPLE_TEXT", "cacredit":"SAMPLE_TEXT", "notes":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_genJournal_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created genJournal entry")
.get("${new_genJournal_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created genJournal entry")
.delete("${new_genJournal_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| dlwhitehurst/blackhole | src/test/gatling/simulations/GenJournalGatlingTest.scala | Scala | apache-2.0 | 3,636 |
package controller
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{RequestMapping, RequestMethod}
import us.hexcoder.twirl.view.TwirlView
/**
* User: 67726e
*/
@Controller
@RequestMapping(value = Array("/"))
class IndexController {
@RequestMapping(value = Array("/"), method = Array(RequestMethod.GET))
def index():TwirlView = {
TwirlView.empty()
}
@RequestMapping(value = Array("/test.html"), method = Array(RequestMethod.GET))
def getHtml():TwirlView = {
TwirlView.ok(html.index())
}
@RequestMapping(value = Array("/test.xml"), method = Array(RequestMethod.GET))
def getXml():TwirlView = {
TwirlView.ok(xml.test())
}
@RequestMapping(value = Array("/test.txt"), method = Array(RequestMethod.GET))
def getTxt():TwirlView = {
TwirlView.ok(txt.test())
}
@RequestMapping(value = Array("/test.js"), method = Array(RequestMethod.GET))
def getJs():TwirlView = {
TwirlView.ok(js.application())
}
@RequestMapping(value = Array("/redirect"), method = Array(RequestMethod.GET))
def redirect():TwirlView = {
TwirlView.redirect("/test.html")
}
}
| 67726e/Spring-MVC-Twirl | src/test/scala/controller/IndexController.scala | Scala | mit | 1,149 |
package me.heaton.profun.week4
/**
* Expression with Pattern Matching
*/
trait ExprWithPM {
/**
* A MatchError exception is thrown if no pattern matches the value of selector
*
* Patterns are constructed from:
* constructors
* variables (Always begin with a lowercase letter and no repetition)
* wildcard patterns _
* constants (Always begin with a Capital letter except null, true, false)
*/
def eval: Int = this match {
case NumberPM(n) => n
case SumPM(e1, e2) => e1.eval + e2.eval
}
def show: String = this match {
case NumberPM(n) => n.toString
case SumPM(e1, e2) => e1.show + " + " + e2.show
case VarPM(x) => x
case ProdPM(e1, e2) => Priority(e1).show + " * " + Priority(e2).show
}
}
/**
* Case class implicitly defines companion objects with apply methods.
*
* object NumberPM { def apply(n: Int) = new Number(n) }
*/
case class NumberPM(n: Int) extends ExprWithPM
case class SumPM(e1: ExprWithPM, e2: ExprWithPM) extends ExprWithPM
case class VarPM(x: String) extends ExprWithPM
case class ProdPM(e1: ExprWithPM, e2: ExprWithPM) extends ExprWithPM
case class Priority(e: ExprWithPM) extends ExprWithPM {
override def show: String = e match {
case e: SumPM => "(" + e.show + ")"
case _ => e.show
}
}
| SanCoder-Q/hello-scala | src/main/scala/me/heaton/profun/week4/ExprWithPM.scala | Scala | mit | 1,290 |
package amphip.model
import scala.annotation.implicitNotFound
object ops {
@implicitNotFound("Ref is not defined for ${A}")
trait RefOp[A, B, C] {
def apply(a: => A, expr: List[B]): C
}
@implicitNotFound("=== is not defined for ${A}, ${B}")
trait EqOp[A, B, C] {
def eq(lhe: A, rhe: B): C
}
@implicitNotFound("=!= is not defined for ${A}, ${B}")
trait NEqOp[A, B, C] {
def neq(lhe: A, rhe: B): C
}
@implicitNotFound("< is not defined for ${A}, ${B}")
trait LTOp[A, B, C] {
def lt(lhe: A, rhe: B): C
}
@implicitNotFound("<= is not defined for ${A}, ${B}")
trait LTEOp[A, B, C] {
def lte(lhe: A, rhe: B): C
}
@implicitNotFound("> is not defined for ${A}, ${B}")
trait GTOp[A, B, C] {
def gt(lhe: A, rhe: B): C
}
@implicitNotFound(">= is not defined for ${A}, ${B}")
trait GTEOp[A, B, C] {
def gte(lhe: A, rhe: B): C
}
@implicitNotFound("Dimen is not defined for ${A}")
trait DimenOp[A, B] {
def dimen(a: A, n: Int): B
}
@implicitNotFound("Within is not defined for ${A}, ${B}")
trait WithinOp[A, B, C] {
def within(lhe: A, rhe: B): C
}
@implicitNotFound("Assign is not defined for ${A}, ${B}")
trait AssignOp[A, B, C] {
def assign(lhe: A, rhe: B): C
}
@implicitNotFound("Default is not defined for ${A}, ${B}")
trait DefaultOp[A, B, C] {
def default(lhe: A, rhe: B): C
}
@implicitNotFound("In is not defined for ${A}, ${B}")
trait InOp[A, B, C] {
def in(lhe: A, rhe: B): C
}
@implicitNotFound("${A} can't be integer")
trait IntegerOp[A] {
def integer(a: A): A
}
@implicitNotFound("${A} can't be binary")
trait BinaryOp[A] {
def binary(a: A): A
}
@implicitNotFound("${A} can't be symbolic")
trait SymbolicOp[A] {
def symbolic(a: A): A
}
@implicitNotFound("Cond is not defined for ${A}, ${B}, ${C}")
trait CondOp[A, B, C, D] {
def cond(test: A)(ifTrue: B)(otherwise: C): D
}
@implicitNotFound("Cond1 is not defined for ${A}, ${B}")
trait Cond1Op[A, B, C] {
def cond1(test: A)(ifTrue: B): C
}
@implicitNotFound("Addition is not defined for ${A}, ${B}")
trait AddOp[A, B, C] {
def add(lhe: A, rhe: B): C
}
@implicitNotFound("Substraction is not defined for ${A}, ${B}")
trait SubOp[A, B, C] {
def sub(lhe: A, rhe: B): C
}
@implicitNotFound("Less is not defined for ${A}, ${B}")
trait LessOp[A, B, C] {
def less(lhe: A, rhe: B): C
}
@implicitNotFound("Sum is not defined for ${A}, ${B}")
trait SumOp[A, B, C] {
def sum(indexing: A, integrand: B): C
}
@implicitNotFound("Prod is not defined for ${A}, ${B}")
trait ProdOp[A, B, C] {
def prod(indexing: A, integrand: B): C
}
@implicitNotFound("Max is not defined for ${A}, ${B}")
trait MaxOp[A, B, C] {
def max(indexing: A, integrand: B): C
}
@implicitNotFound("Min is not defined for ${A}, ${B}")
trait MinOp[A, B, C] {
def min(indexing: A, integrand: B): C
}
@implicitNotFound("Multiplication is not defined for ${A}, ${B}")
trait MultOp[A, B, C] {
def mult(lhe: A, rhe: B): C
}
@implicitNotFound("Division is not defined for ${A}, ${B}")
trait DivOp[A, B, C] {
def div(lhe: A, rhe: B): C
}
@implicitNotFound("Exact division is not defined for ${A}, ${B}")
trait DivExactOp[A, B, C] {
def divExact(lhe: A, rhe: B): C
}
@implicitNotFound("Modulus is not defined for ${A}, ${B}")
trait ModOp[A, B, C] {
def mod(lhe: A, rhe: B): C
}
@implicitNotFound("Unary plus is not defined for ${A}")
trait UnaryPlusOp[A, B] {
def plus(a: A): B
}
@implicitNotFound("Unary minus is not defined for ${A}")
trait UnaryMinusOp[A, B] {
def minus(a: A): B
}
@implicitNotFound("Raise is not defined for ${A}, ${B}")
trait RaiseOp[A, B, C] {
def raise(lhe: A, rhe: B): C
}
@implicitNotFound("Pipe is not defined for ${A}, ${B}")
trait PipeOp[A, B, C] {
def pipe(lhe: A, rhe: B): C
}
@implicitNotFound("Diff is not defined for ${A}, ${B}")
trait DiffOp[A, B, C] {
def diff(lhe: A, rhe: B): C
}
@implicitNotFound("SymDiff is not defined for ${A}, ${B}")
trait SymDiffOp[A, B, C] {
def symDiff(lhe: A, rhe: B): C
}
@implicitNotFound("Inter is not defined for ${A}, ${B}")
trait InterOp[A, B, C] {
def inter(lhe: A, rhe: B): C
}
@implicitNotFound("SetOf is not defined for ${A}, ${B}")
trait SetOfOp[A, B, C] {
def setOf(indexing: A, integrand: B*): C
}
@implicitNotFound("To is not defined for ${A}, ${B}")
trait ToOp[A, B, C] {
def to(t0: A, tf: B): C
}
@implicitNotFound("By is not defined for ${A}, ${B}")
trait ByOp[A, B, C] {
def by(exp: A, deltaT: B): C
}
@implicitNotFound("Disj is not defined for ${A}, ${B}")
trait DisjOp[A, B, C] {
def disj(lhe: A, rhe: B): C
}
@implicitNotFound("Forall is not defined for ${A}, ${B}")
trait ForallOp[A, B, C] {
def forall(indexing: A, integrand: B): C
}
@implicitNotFound("Exists is not defined for ${A}, ${B}")
trait ExistsOp[A, B, C] {
def exists(indexing: A, integrand: B): C
}
@implicitNotFound("Conj is not defined for ${A}, ${B}")
trait ConjOp[A, B, C] {
def conj(lhe: A, rhe: B): C
}
@implicitNotFound("Negation is not defined for ${A}")
trait NegOp[A, B] {
def not(a: A): B
}
//// FUNCTIONS
@implicitNotFound("Size is not defined for ${A}")
trait SizeOp[A, B] {
def size(a: A): B
}
} | gerferra/amphip | core/src/main/scala/amphip/model/ops.scala | Scala | mpl-2.0 | 5,442 |
package omniauth.lib
import scala.xml.NodeSeq
import net.liftweb.http.S
import net.liftweb.common.Box
import net.liftweb.json.JsonParser
import net.liftweb.util.Helpers.tryo
import dispatch.classic.:/
import omniauth.Omniauth
import omniauth.AuthInfo
class VKProvider(appId: String, secret: String) extends OmniauthProvider{
val API_VERSION = "5.23"
override def providerName = VKProvider.providerName
implicit val formats = net.liftweb.json.DefaultFormats
override def signIn(): NodeSeq = {
var requestUrl = "https://oauth.vk.com/oauth/authorize?"
var urlParams = Map.empty[String, String]
urlParams += "client_id" -> appId
urlParams += "response_code" -> "code"
urlParams += "redirect_uri" -> (Omniauth.siteAuthBaseUrl + "auth/" + providerName + "/callback")
urlParams += "v" -> API_VERSION
urlParams += "scope" -> permissions
requestUrl += Omniauth.q_str(urlParams)
S.redirectTo(requestUrl)
}
override def callback(): NodeSeq = {
val code = S.param("code") openOr S.redirectTo("/")
logger.debug("GOT CODE" + code)
val callbackUrl = Omniauth.siteAuthBaseUrl + "auth/" + providerName + "/callback"
var urlParams = Map.empty[String, String]
urlParams += "client_id" -> appId
urlParams += "client_secret" -> secret
urlParams += "redirect_uri" -> (Omniauth.siteAuthBaseUrl + "auth/" + providerName + "/callback")
urlParams += "code" -> code
val tmpRequest = (:/("oauth.vk.com").secure / "access_token").POST <:<
Map("Content-Type" -> "application/x-www-form-urlencoded") << urlParams
val json = Omniauth.http(tmpRequest >- JsonParser.parse)
val accessToken = tryo {
AuthToken(
(json \\ "access_token").extract[String],
(json \\ "expires_in").extract[Option[Long]],
(json \\ "user_id").extract[Option[String]],
(json \\ "email").extract[Option[String]]
)
}
S.redirectTo((for {
t <- accessToken
if (validateToken(t))
} yield {
Omniauth.successRedirect
}) openOr Omniauth.failureRedirect)
}
override def validateToken(token: AuthToken): Boolean = {
token.refreshToken.map { uid =>
val email = token.secret.getOrElse("")
var urlParams = Map.empty[String, String]
urlParams += "user_id" -> token.refreshToken.getOrElse("")
urlParams += "v" -> API_VERSION
urlParams += "access_token" -> token.token
val tmpRequest = (:/("api.vk.com").secure / "method" / "users.get") <:<
Map("Content-Type" -> "application/x-www-form-urlencoded") << urlParams
try {
val json = Omniauth.http(tmpRequest >- JsonParser.parse)
val user = (json \\ "response")(0)
val firstName = (user \\ "first_name").extract[String]
val lastName = (user \\ "last_name").extract[String]
val name = (firstName + " " + lastName).trim()
val ai = AuthInfo(providerName, uid, name, token, None, Some(name), Some(email), Some(firstName), Some(lastName))
Omniauth.setAuthInfo(ai)
logger.debug(ai)
true
} catch {
case e: Throwable => false
}
} getOrElse false
}
override def tokenToId(token: AuthToken): Box[String] = {
token.refreshToken
}
def permissions = Properties.get(VKProvider.providerPropertyPermissions) openOr ""
}
object VKProvider {
val providerName = "vk"
val providerPropertyKey = "omniauth.vkkey"
val providerPropertySecret = "omniauth.vksecret"
val providerPropertyPermissions = "omniauth.vkpermissions"
}
| ghostm/lift-omniauth | src/main/scala/omniauth/lib/VKProvider.scala | Scala | apache-2.0 | 3,538 |
package edu.uci.eecs.spectralLDA
import breeze.linalg.sum
import org.apache.spark.{SparkConf, SparkContext}
import edu.uci.eecs.spectralLDA.algorithm._
import org.apache.spark.rdd._
import org.apache.spark.mllib.clustering._
import org.apache.spark.mllib.linalg._
object CVLogPerplexity {
def main(args: Array[String]) = {
val conf: SparkConf = new SparkConf().setAppName(s"Spectral LDA")
val sc: SparkContext = new SparkContext(conf)
val cv = args(0).toInt
val documentsPath = args(1)
val k = args(2).toInt
val alpha0 = args(3).toDouble
val maxIterations = args(4).toInt
val tol = args(5).toDouble
val minWords = args(6).toInt
val docs = sc.objectFile[(Long, breeze.linalg.SparseVector[Double])](documentsPath)
.filter {
case (_, tc) => sum(tc) >= minWords
}
for (i <- 0 until cv) {
val splits = docs.randomSplit(Array[Double](0.9, 0.1))
computeLogLikelihood(splits, k, alpha0, maxIterations, tol)
}
sc.stop()
}
def computeLogLikelihood(splits: Array[RDD[(Long, breeze.linalg.SparseVector[Double])]],
k: Int,
alpha0: Double,
maxIterations: Int,
tol: Double
): Unit = {
val numTestTokens = splits(1)
.map {
case (_, tc) => breeze.linalg.sum(tc)
}
.reduce(_ + _)
val tensorLDA = new TensorLDA(
dimK = k,
alpha0 = alpha0,
maxIterations = maxIterations,
tol = tol
)
val (beta, alpha, _, _, m1) = tensorLDA.fit(splits(0))
val augBeta = breeze.linalg.DenseMatrix.zeros[Double](beta.rows, k + 1)
val augAlpha = breeze.linalg.DenseVector.ones[Double](alpha.length + 1)
augBeta(::, 0 until k) := beta
val dummyTopic = m1 + 0.1 * breeze.linalg.DenseVector.ones[Double](beta.rows) / beta.rows.toDouble
augBeta(::, k) := dummyTopic / sum(dummyTopic)
augAlpha(0 until k) := alpha
val tensorLDAModel = new TensorLDAModel(augBeta, augAlpha)
val tensorLDALogL = tensorLDAModel.logLikelihood(splits(1), smoothing = 1e-6, maxIterations = 50)
println(s"Tensor LDA log-perplexity no extra smoothing: ${- tensorLDALogL / numTestTokens}")
val trainMapped: RDD[(Long, Vector)] = splits(0).map {
case (id, tc) =>
val (idx, v) = tc.activeIterator.toArray.unzip
(id, new SparseVector(tc.length, idx, v))
}
val testMapped: RDD[(Long, Vector)] = splits(1).map {
case (id, tc) =>
val (idx, v) = tc.activeIterator.toArray.unzip
(id, new SparseVector(tc.length, idx, v))
}
val ldaOptimizer = new OnlineLDAOptimizer()
.setMiniBatchFraction(0.05)
val lda = new LDA()
.setOptimizer(ldaOptimizer)
.setMaxIterations(80)
.setK(k)
.setDocConcentration(alpha0 / k.toDouble)
.setBeta(1.0)
val ldaModel: LDAModel = lda.run(trainMapped)
val ldaLogL = ldaModel.asInstanceOf[LocalLDAModel].logLikelihood(testMapped)
println(s"Variational Inference log-perplexity: ${- ldaLogL / numTestTokens}")
}
} | FurongHuang/SpectralLDA-TensorSpark | src/main/scala/edu/uci/eecs/spectralLDA/CVLogPerplexity.scala | Scala | apache-2.0 | 3,114 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.cubeB
import akka.actor.{Actor, ActorLogging}
import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper}
import org.squbs.unicomplex.Initialized
import org.squbs.unicomplex.Unicomplex.InitReport
import scala.util.Try
class InitCubeActorB extends Actor with ActorLogging with GracefulStopHelper {
// do initialization
def init: InitReport = {
log.info("initializing")
Try {
// do some tasks
Some("InitCubeActorB")
}
}
context.parent ! (Initialized(init))
def receive = {
case GracefulStop => defaultLeafActorStop
case other => sender ! other
}
}
| Harikiranvuyyuru/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/cubeB/CubeB.scala | Scala | apache-2.0 | 1,227 |
package controllers.management
import javax.inject._
import auth.{AdminAction, AuthenticatedRequest}
import forms.UsersSamplesForm
import models.UserSample
import play.api.Logger
import play.api.db.Database
import play.api.libs.json._
import play.api.mvc._
import scala.util.{Failure, Success, Try}
@Singleton
class UsersSamplesController @Inject()(db: Database) extends Controller {
val AdminAction = new AdminAction(db)
import UsersSamplesController._
/**
* Add one or more users_samples rows to the database.
* Expects a JSON body of the type
* ```{ "users_samples": [{"sample": "S1", "username": "A"}, {"sample": "S2", "username": "B"}] }```
*/
def addUsersSamples() = AdminAction(parse.json) { implicit request =>
val appId = request.user.appId
val usersSamples = usersSamplesFromRequest
val usernames = usersSamples.map(_.username)
val sampleNames = usersSamples.map(_.sample)
val userIdsOpt = getUserIds(db, usernames)
val sampleIdsOpt = getSampleIds(db, sampleNames)
val verified = for {
userIds <- verifyIds(userIdsOpt, usernames)
sampleIds <- verifyIds(sampleIdsOpt, sampleNames)
} yield {
require(userIds.size == userIdsOpt.size)
require(userIds.size == sampleIds.size)
(userIds zip sampleIds)
}
verified match {
case Failure(err) =>
// Logger.debug(err.getMessage)
InternalServerError(err.getMessage +". Nothing was inserted.")
case Success(zipped) =>
val counts: Seq[Int] = zipped.map(us => findUsersSamples(db, us._1, us._2))
val notExistYet = zipped.zipWithIndex.filter(x => counts(x._2) == 0).map(_._1).zipWithIndex
if (notExistYet.nonEmpty) {
db.withConnection { conn =>
val unknowns: String = notExistYet.map(_ => "(?,?)").mkString(",")
val statement = conn.prepareStatement(s"""
INSERT INTO `users_samples`(`user_id`,`sample_id`) VALUES $unknowns ;
""")
notExistYet.foreach { case ((uid, sid), i) =>
statement.setInt(2 * i + 1, uid)
statement.setInt(2 * i + 2, sid)
}
statement.execute()
}
}
Ok(s"Inserted ${notExistYet.size} user-sample(s)")
}
}
/**
* Delete one or more users_samples rows from the database.
* Expects a JSON body of the type
* ```{ "users_samples": [{"sample": "S1", "username": "A"}, {"sample": "S2", "username": "B"}] }```
*/
def deleteUsersSamples() = AdminAction(parse.json) { implicit request =>
val appId = request.user.appId
val usersSamples = usersSamplesFromRequest
val usernames = usersSamples.map(_.username)
val sampleNames = usersSamples.map(_.sample)
val userIdsOpt = getUserIds(db, usernames)
val sampleIdsOpt = getSampleIds(db, sampleNames)
val verified = for {
userIds <- verifyIds(userIdsOpt, usernames)
sampleIds <- verifyIds(sampleIdsOpt, sampleNames)
} yield {
require(userIds.size == userIdsOpt.size)
require(userIds.size == sampleIds.size)
(userIds zip sampleIds)
}
verified match {
case Failure(err) =>
// Logger.debug(err.getMessage)
InternalServerError(err.getMessage +". Nothing was deleted.")
case Success(zipped) =>
val counts: Seq[Int] = zipped.map(us => findUsersSamples(db, us._1, us._2))
val found = zipped.zipWithIndex.filter(x => counts(x._2) > 0).map(_._1)
// It is no problem in SQL to try and delete things that don't exist, but we count for the report
db.withConnection { conn =>
found.foreach { case (uid, sid) =>
val statement = conn.prepareStatement(s"""
DELETE FROM `users_samples` WHERE user_id = ? and sample_id = ? ;
""")
statement.setInt(1, uid)
statement.setInt(2, sid)
statement.execute()
}
Ok(s"Deleted ${found.size} user-sample(s)")
}
}
}
}
object UsersSamplesController {
/**
* Search the request body for key "users_samples", and create as many `UserSample` objects.
*/
def usersSamplesFromRequest(implicit request: AuthenticatedRequest[JsValue]): Seq[UserSample] = {
val usersSamplesJs: JsArray = (request.body \ "users_samples").asOpt[JsArray] getOrElse {
throw new IllegalArgumentException("Could not cast users_samples array from request body to JsArray")
}
val users: Seq[UserSample] = usersSamplesJs.value map { userSampleJs =>
Try (UsersSamplesForm.fromJson(userSampleJs)) getOrElse {
throw new IllegalArgumentException("Could not cast request body to UserSample models")
}
}
users
}
/**
* Find in the database the userIds corresponding to the given *usernames*, if exist.
*/
def getUserIds(db: Database, usernames: Seq[String]): List[Option[Int]] = {
val unknowns: String = usernames.map(_ => "?").mkString(",")
db.withConnection { conn =>
val statement = conn.prepareStatement(s"SELECT username,id FROM users WHERE username IN ($unknowns);")
usernames.zipWithIndex.foreach {case (name, i) => statement.setString(i+1, name)}
val res = statement.executeQuery()
val usersMap = scala.collection.mutable.HashMap[String,Int]()
while (res.next()) {
usersMap += (res.getString("username") -> res.getInt("id"))
}
val userIds = usernames map (usersMap.get)
userIds.toList
}
}
/**
* Find in the database the sampleIds corresponding to the given *sampleNames*, if exist.
*/
def getSampleIds(db: Database, sampleNames: Seq[String]): List[Option[Int]] = {
val unknowns: String = sampleNames.map(_ => "?").mkString(",")
db.withConnection { conn =>
val statement = conn.prepareStatement(s"SELECT name,id FROM samples WHERE name IN ($unknowns);")
sampleNames.zipWithIndex.foreach {case (name, i) => statement.setString(i+1, name)}
val res = statement.executeQuery()
val samplesMap = scala.collection.mutable.HashMap[String,Int]()
while (res.next()) {
samplesMap += (res.getString("name") -> res.getInt("id"))
}
val sampleIds = sampleNames map (samplesMap.get)
sampleIds.toList
}
}
/**
* Check that we could find all of *names* in the database by looking at whether their
* corresponding ids (from `getUserIds`, `getSampleIds`) are None.
*/
def verifyIds(maybeIds: List[Option[Int]], names: Seq[String]): Try[List[Int]] = {
val missing = (maybeIds zip names) filter (_._1.isEmpty)
if (missing.isEmpty)
Success(maybeIds.map(_.get))
else {
val missingNames = missing.map(_._2).mkString("','")
Failure(new IllegalArgumentException(s"Could not find '$missingNames' in database."))
}
}
/**
* Return the number of users found with this *username* and *appId* in the database.
*/
def findUsersSamples(db: Database, userId: Int, sampleId: Int): Int = {
db.withConnection { conn =>
val statement = conn.prepareStatement("SELECT `id` FROM `users_samples` WHERE user_id = ? AND sample_id = ?;")
statement.setInt(1, userId)
statement.setInt(2, sampleId)
val result = statement.executeQuery()
var count = 0
while (result.next()) {
count += 1
}
count
}
}
}
| chuv-ssrc/bam-server-scala | app/controllers/management/UsersSamplesController.scala | Scala | gpl-3.0 | 7,378 |
import sbt._
import sbt.Keys._
object ScalamataBuild extends Build {
lazy val buildSettings = Project.defaultSettings ++ Seq(
// name := "Automata in Scala",
organization := "org.everpeace",
version := "0.1-SNAPSHOT",
scalaVersion := "2.9.1"
// add other settings here
)
lazy val root = Project(
id = "scalamata-root",
base = file("."),
settings = buildSettings,
aggregate = Seq(core, examples) )
lazy val core = Project(
id = "scalamata",
base = file("core"),
settings = buildSettings )
lazy val examples = Project(
id = "scalamata-examples",
base = file("examples"),
dependencies = Seq(core),
settings = buildSettings )
}
| everpeace/scalamata | project/Build.scala | Scala | mit | 715 |
package tw.edu.ntu.csie.liblinear
/**
* SolverType defines the names of different solvers.
*/
object SolverType extends Enumeration
{
type Solver = Value
val L2_LR = Value(0)
val L2_L2LOSS_SVC = Value(2)
val unknown = Value(-1)
def parse(id : Int) : Value =
{
if(id == L2_LR.id)
{
return L2_LR;
}
else if(id == L2_L2LOSS_SVC.id)
{
return L2_L2LOSS_SVC
}
else
{
return unknown
}
}
}
/**
* Parameter stores the type of solver and user-specified parameters.
*
* @param solverType the optimization solver
* @param eps used in stopping criteria
* @param C used to control the regularization and loss.
*/
class Parameter() extends Serializable
{
var solverType : SolverType.Value = SolverType.L2_LR
var eps : Double = 1e-2
var C : Double = 1.0
var numSlaves = -1
}
| Chieh-Yen/experimental | src/main/scala/tw/edu/ntu/csie/liblinear/Parameter.scala | Scala | apache-2.0 | 818 |
package org.psesd.srx.shared.core
import org.psesd.srx.shared.core.extensions.ExtendedEnumeration
/** Enumeration of supported SRX operations.
*
* @version 1.0
* @since 1.0
* @author Stephen Pugmire (iTrellis, LLC)
**/
object SrxOperation extends ExtendedEnumeration {
type SrxOperation = Value
val Diagnostic = Value("Diagnostic")
val DiagnosticEnvironment = Value("DiagnosticEnvironment")
val DiagnosticHeaders = Value("DiagnosticHeaders")
val Info = Value("Info")
val Messages = Value("Messages")
val MessagesGetAll = Value("MessagesGetAll")
val MessagesGetLatest = Value("MessagesGetLatest")
val None = Value("")
val Sre = Value("Sre")
val SrePost = Value("SrePost")
val Test = Value("Test")
val Xsre = Value("Xsre")
val XsreGet = Value("XsreGet")
val XsreRefresh = Value("XsreRefresh")
} | PSESD/srx-shared-core | src/main/scala/org/psesd/srx/shared/core/SrxOperation.scala | Scala | mit | 834 |
package stealthnet.scala.network.connection
/**
* Connection listener object.
*
* Defines messages that can be notified to listeners.
*/
object ConnectionListener {
/* XXX - manager connection updates (state, etc) and closing */
/** Message: new connection initiated. */
case class NewConnection(cnx: StealthNetConnection)
/** Message: connection closed. */
case class ClosedConnection(cnx: StealthNetConnection)
}
| suiryc/StealthNet | core/src/main/scala/stealthnet/scala/network/connection/ConnectionListener.scala | Scala | gpl-3.0 | 432 |
package connections.network
import java.io.IOException
import java.net.SocketException
import connections.{BaseDeviceManager, ConnectionManager}
import slide.SystemInfo
import enums.ConnectionMode
class NetworkDeviceManager extends BaseDeviceManager {
private var ndc: NetworkDeviceConnection = null
private var backgroundScannerRunning: Boolean = true
@throws(classOf[IOException])
override def connect(ip: String): Unit = {
ndc = new NetworkDeviceConnection(ip) {
override def onClientOutOfDate(): Unit = {
throwError("The client is out of date. Please upgrade it.")
}
}
ndc.connect()
}
override def throwError(message: String): Unit = {}
override def startBackgroundScanner(): Unit = {
val t: Thread = new Thread(new Runnable {
override def run(): Unit = {
var dcCount: Int = 0
var udpDiscovery: BroadcastManager = null
try {
udpDiscovery = new BroadcastManager
}
catch {
case e: SocketException =>
throwError("Another instance of Slide is already running.")
System.exit(1)
}
while (backgroundScannerRunning) {
if (!SystemInfo.isNetworkIsAvailable) {
stopBackgroundScanner()
}
device = udpDiscovery.search
if (device != null) {
dcCount = 0
if (!ConnectionManager.hasConnection(ConnectionMode.WIFI)) {
onWifiConnectionAdded()
}
} else {
dcCount += 1
if (dcCount >= 4) {
if (ConnectionManager.hasConnection(ConnectionMode.WIFI)) {
onWifiConnectionRemoved()
}
}
}
}
}
})
t.start()
}
override def stopBackgroundScanner(): Unit =
backgroundScannerRunning = false
def ip: String = device.ip
}
| LorenK96/slide-desktop | src/main/scala/connections/network/NetworkDeviceManager.scala | Scala | gpl-2.0 | 2,282 |
package com.intel.analytics.bigdl.apps.model.inference.flink.ImageClassification
import com.intel.analytics.bigdl.orca.inference.InferenceModel
class MobileNetInferenceModel(var concurrentNum: Int = 1, modelPath: String, modelType: String, inputs: Array[String], outputs: Array[String], intraOpParallelismThreads: Int, interOpParallelismThreads: Int, usePerSessionThreads: Boolean) extends InferenceModel(concurrentNum) with Serializable {
doLoadTensorflow(modelPath, modelType, inputs, outputs, intraOpParallelismThreads, interOpParallelismThreads, usePerSessionThreads)
}
| intel-analytics/BigDL | apps/model-inference-examples/model-inference-flink/src/main/scala/com/intel/analytics/bigdl/apps/model/inference/flink/ImageClassification/MobileNetInferenceModel.scala | Scala | apache-2.0 | 578 |
/*
* Copyright (C) 2017 LREN CHUV for Human Brain Project
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ch.chuv.lren.woken.service
import akka.actor.ActorSystem
import cats.effect._
import ch.chuv.lren.woken.JsonUtils
import ch.chuv.lren.woken.backends.faas.AlgorithmExecutor
import ch.chuv.lren.woken.backends.worker.WokenWorker
import ch.chuv.lren.woken.config.WokenConfiguration
import ch.chuv.lren.woken.core.model.database.FeaturesTableDescription
import ch.chuv.lren.woken.dao._
import ch.chuv.lren.woken.errors.ErrorReporter
import ch.chuv.lren.woken.messages.datasets.TableId
import ch.chuv.lren.woken.config.ConfigurationInstances._
import ch.chuv.lren.woken.Predefined.FeaturesDatabase._
import ch.chuv.lren.woken.Predefined.VariablesMetas._
import org.scalamock.scalatest.MockFactory
import scala.concurrent.ExecutionContext
import scala.language.higherKinds
object TestServices extends JsonUtils with MockFactory {
lazy val wokenRepository: WokenRepository[IO] = new WokenInMemoryRepository[IO]()
lazy val emptyVariablesMetaService: VariablesMetaRepository[IO] =
new MetadataInMemoryRepository[IO]().variablesMeta
lazy val localVariablesMetaService: VariablesMetaRepository[IO] = {
val metaService = new MetadataInMemoryRepository[IO]().variablesMeta
metaService.put(churnVariablesMeta)
metaService.put(sampleVariablesMeta)
metaService.put(featuresAVariablesMeta)
metaService.put(featuresBVariablesMeta)
metaService.put(featuresCVariablesMeta)
metaService.put(featuresMixedVariablesMeta)
metaService
}
lazy val algorithmLibraryService: AlgorithmLibraryService = AlgorithmLibraryService()
lazy val featuresService: FeaturesService[IO] = FeaturesService(
new FeaturesInMemoryRepository[IO](featuresDbConfiguration, tablesContent)
)
val emptyFeaturesTableId = TableId(featuresDb.code, "empty_table")
val emptyFeatureTable =
FeaturesTableDescription(emptyFeaturesTableId, Nil, None, validateSchema = false, None, 0.67)
lazy val emptyFeaturesTableService: FeaturesTableService[IO] = new FeaturesTableServiceImpl(
new FeaturesTableInMemoryRepository[IO](emptyFeatureTable, Nil, None, Nil)
)
implicit val ec: ExecutionContext = ExecutionContext.global
implicit lazy val defaultContextShift: ContextShift[IO] = IO.contextShift(ec)
implicit lazy val defaultTimer: Timer[IO] = cats.effect.IO.timer(ec)
def databaseServices(config: WokenConfiguration): DatabaseServices[IO] = {
val datasetService: DatasetService = ConfBasedDatasetService(config.config, config.jobs)
val queryToJobService = QueryToJobService(featuresService,
localVariablesMetaService,
config.jobs,
config.algorithmLookup)
DatabaseServices(
config,
featuresService,
wokenRepository,
localVariablesMetaService,
queryToJobService,
datasetService,
algorithmLibraryService
)
}
lazy val dispatcherService: DispatcherService = mock[DispatcherService]
lazy val wokenWorker: WokenWorker[IO] = mock[WokenWorker[IO]]
lazy val algorithmExecutor: AlgorithmExecutor[IO] = mock[AlgorithmExecutor[IO]]
lazy val errorReporter: ErrorReporter = mock[ErrorReporter]
def backendServices(system: ActorSystem): BackendServices[IO] =
BackendServices(
dispatcherService = dispatcherService,
algorithmExecutor = algorithmExecutor,
wokenWorker = wokenWorker,
errorReporter = errorReporter
)
}
| LREN-CHUV/workflow | src/test/scala/ch/chuv/lren/woken/service/TestServices.scala | Scala | apache-2.0 | 4,274 |
import de.qualitune.util.StringUtils
import org.scalatest.FunSuite
/**
* @author Max Leuthaeuser
* @since 05.05.12
*/
class StringUtilTests extends FunSuite {
test("Testing replaceAllWithPrefix") {
// single occurrences
val t1 = "a.bla()"
val t2 = "!a.bla()"
val t3 = "xyz.bla()|a.bla()"
val t4 = "xyz.bla()||a.bla()"
val t5 = "xyz.bla()&a.bla()"
val t6 = "xyz.bla()&&a.bla()"
val e1 = "b.bla()"
val e2 = "!b.bla()"
val e3 = "xyz.bla()|b.bla()"
val e4 = "xyz.bla()||b.bla()"
val e5 = "xyz.bla()&b.bla()"
val e6 = "xyz.bla()&&b.bla()"
// multiple occurrences
val t7 = "a.bla()|a.pla()"
val t8 = "!a.bla()|!a.pla()"
val t9 = "a.bla()|xyz.bla()|a.bla()"
val t10 = "xyz.bla()||a.bla()||a.bla()"
val t11 = "xyz.bla()&a.bla()&a.bla()"
val t12 = "xyz.bla()&&a.bla()&&a.bla()"
val e7 = "b.bla()|b.pla()"
val e8 = "!b.bla()|!b.pla()"
val e9 = "b.bla()|xyz.bla()|b.bla()"
val e10 = "xyz.bla()||b.bla()||b.bla()"
val e11 = "xyz.bla()&b.bla()&b.bla()"
val e12 = "xyz.bla()&&b.bla()&&b.bla()"
// opening bracket
val t13 = "(a.bla()&bla())"
val e13 = "(b.bla()&bla())"
val t14 = "(a.bla()&bla()||a.pla())"
val e14 = "(b.bla()&bla()||b.pla())"
// parameters
val prefix = "(\\\\{|!|\\\\||&|\\\\()"
val pattern = "a."
val replacment = "b."
assert(e1 === StringUtils.replaceAllWithPrefix(t1, prefix, pattern, replacment))
assert(e2 === StringUtils.replaceAllWithPrefix(t2, prefix, pattern, replacment))
assert(e3 === StringUtils.replaceAllWithPrefix(t3, prefix, pattern, replacment))
assert(e4 === StringUtils.replaceAllWithPrefix(t4, prefix, pattern, replacment))
assert(e5 === StringUtils.replaceAllWithPrefix(t5, prefix, pattern, replacment))
assert(e6 === StringUtils.replaceAllWithPrefix(t6, prefix, pattern, replacment))
assert(e7 === StringUtils.replaceAllWithPrefix(t7, prefix, pattern, replacment))
assert(e8 === StringUtils.replaceAllWithPrefix(t8, prefix, pattern, replacment))
assert(e9 === StringUtils.replaceAllWithPrefix(t9, prefix, pattern, replacment))
assert(e10 === StringUtils.replaceAllWithPrefix(t10, prefix, pattern, replacment))
assert(e11 === StringUtils.replaceAllWithPrefix(t11, prefix, pattern, replacment))
assert(e12 === StringUtils.replaceAllWithPrefix(t12, prefix, pattern, replacment))
assert(e13 === StringUtils.replaceAllWithPrefix(t13, prefix, pattern, replacment))
assert(e14 === StringUtils.replaceAllWithPrefix(t14, prefix, pattern, replacment))
}
}
| max-leuthaeuser/CPSTextInterpreter | src/test/scala/StringUtilTests.scala | Scala | gpl-3.0 | 2,641 |
/*
* Copyright 2014 Richard Friend.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package holophonor.weave
class Nike {
def doIt() {
}
}
object Nike {
def apply() = new Nike()
}
| rikf/Holophonor | src/test/scala/holophonor/weave/Nike.scala | Scala | apache-2.0 | 707 |
package com.github.akiomik.dispatch.dropbox.core
import dispatch._
trait Method {
def complete: Req => Req
def apply(req: Req): Req = complete(req)
}
trait Path {
val path: Req => Req
def base: Req => Req
def root: Req => Req
}
trait DropboxPath extends Path {
def root: Req => Req = _ / "dropbox"
}
trait SandboxPath extends Path {
def root: Req => Req = _ / "sandbox"
}
object Account {
def info(): Info = Info()
case class Info(params: Map[String, String] = Map())
extends Method with Param[Info] with LocaleParam[Info] {
def complete = _ / "account" / "info" <<? params
def param[A: Show](key: String)(value: A): Info =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
}
case class File(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[File] with RevParam[File] {
def base: Req => Req = _ / "files"
def withParams: Req => Req = _ <<? params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): File =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
case class FilePut(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[FilePut] with LocaleParam[FilePut] {
def base: Req => Req = _ / "files_put"
def withParams: Req => Req = _ << params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): FilePut =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val overwrite = 'overwrite[Boolean]
val parentRev = 'parent_rev[String]
}
case class Metadata(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Metadata]
with RevParam[Metadata] with LocaleParam[Metadata]
with FileLimitParam[Metadata] with IncludeDeletedParam[Metadata] {
def base: Req => Req = _ / "metadata"
def withParams: Req => Req = _ <<? params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Metadata =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val hash = 'hash[String]
val list = 'list[Boolean]
}
case class Delta(params: Map[String, String] = Map())
extends Method with Param[Delta] with LocaleParam[Delta] {
def complete = _ / "delta" << params
def param[A: Show](key: String)(value: A): Delta =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val cursor = 'cursor[String]
val pathPrefix = 'path_prefix[String]
}
case class Revision(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Revision] with LocaleParam[Revision] {
def base: Req => Req = _ / "revisions"
def withParams: Req => Req = _ <<? params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Revision =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val revLimit = 'rev_limit[Int]
}
case class Restore(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Restore]
with RevParam[Restore] with LocaleParam[Restore] {
def base: Req => Req = _ / "restore"
def withParams: Req => Req = _ << params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Restore =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
// TODO
// ファクトリ化したい
case class Search(path: Req => Req, q: String, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Search] with FileLimitParam[Search]
with IncludeDeletedParam[Search] with LocaleParam[Search] {
val queryParams = params + ("query" -> q)
def base: Req => Req = _ / "search"
def withParams: Req => Req = _ <<? queryParams
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Search =
copy(params = queryParams + (key -> implicitly[Show[A]].shows(value)))
}
// object Search {
// def apply(path: Req => Req, q: String): Search = Search(path, Map("query" -> q))
// }
case class Share(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Share] with LocaleParam[Share] {
def base: Req => Req = _ / "shares"
def withParams: Req => Req = _ << params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Share =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val shortUrl = 'short_url[Boolean]
}
case class Media(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Media] with LocaleParam[Media] {
def base: Req => Req = _ / "media"
def withParams: Req => Req = _ << params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Media =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
case class CopyRef(path: Req => Req) extends Method with DropboxPath {
def base: Req => Req = _ / "copy_ref"
def complete = base andThen root andThen path
}
case class Thumbnail(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath with Param[Thumbnail] {
def base: Req => Req = _ / "thumbnails"
def withParams: Req => Req = _ <<? params
def complete = base andThen root andThen path andThen withParams
def param[A: Show](key: String)(value: A): Thumbnail =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val format = 'format[String]
val size = 'size[String]
}
// TODO
// PUT&POST
case class ChunkedUpload(path: Req => Req, params: Map[String, String] = Map())
extends Method with DropboxPath {
def base: Req => Req = _ / "chunked_upload"
def withParams: Req => Req = _ << params
def complete = base andThen root andThen path andThen withParams
}
object FileOps {
def copy(to: String): Copy =
Copy(Map("root" -> "dropbox", "to_path" -> to))
case class Copy(params: Map[String, String] = Map())
extends Method with Param[Copy]
with TwoFileOpsParam[Copy] with LocaleParam[Copy] {
def complete = _ / "fileops" / "copy" << params
def param[A: Show](key: String)(value: A): Copy =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
val fromCopyRef = 'from_copy_ref[String]
}
def createFolder(path: String): CreateFolder =
CreateFolder(Map("root" -> "dropbox", "path" -> path))
case class CreateFolder(params: Map[String, String] = Map())
extends Method with Param[CreateFolder]
with OneFileOpsParam[CreateFolder] with LocaleParam[CreateFolder] {
def complete = _ / "fileops" / "create_folder" << params
def param[A: Show](key: String)(value: A): CreateFolder =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
def delete(path: String): Delete =
Delete(Map("root" -> "dropbox", "path" -> path))
case class Delete(params: Map[String, String] = Map())
extends Method with Param[Delete]
with OneFileOpsParam[Delete] with LocaleParam[Delete] {
def complete = _ / "fileops" / "delete" << params
def param[A: Show](key: String)(value: A): Delete =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
def move(to: String): Move =
Move(Map("root" -> "dropbox", "to_path" -> to))
case class Move(params: Map[String, String] = Map())
extends Method with Param[Move]
with TwoFileOpsParam[Move] with LocaleParam[Move] {
def complete = _ / "fileops" / "move" << params
def param[A: Show](key: String)(value: A): Move =
copy(params = params + (key -> implicitly[Show[A]].shows(value)))
}
}
| akiomik/dispatch-dropbox | src/main/scala/com/github/akiomik/dispatch/dropbox/core/requests.scala | Scala | mit | 7,931 |
//
// JsonGenerator.scala -- Scala object JsonGenerator
// Project OrcTests
//
// Created by jthywiss on Sep 5, 2017.
//
// Copyright (c) 2018 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.test.util
import scala.collection.JavaConverters.{ enumerationAsScalaIteratorConverter, iterableAsScalaIterableConverter, mapAsScalaMapConverter }
/** Generates JavaScript Object Notation (JSON) per RFC 7159 and ECMA-262.
*
* Strings and chars are written as strings, integer and floating point
* types are written as "numbers", booleans are written as booleans, and
* null is written as "null". Maps and Dictionaries are written as
* "objects", and other Traversables, Iterables, and Enumerations are
* written as "arrays". All other types cause an exception.
*
* @author jthywiss
*/
object JsonGenerator {
def apply(out: Appendable)(value: Any): Unit = {
serializeJsonProperty(out)(value, "", " ")
}
def serializeJsonProperty(out: Appendable)(value: Any, currIndent: String, addlIndent: String): Unit = {
value match {
//Maybe: case (object with toJson method) => object.toJson()
case null => out.append("null")
case true => out.append("true")
case false => out.append("false")
case s: String => writeQuotedJsonString(out)(s)
case c: Char => writeQuotedJsonString(out)(c.toString)
case _: Byte | _: Short | _: Int | _: Long | _: Float | _: Double => out.append(value.toString)
case n: Number => out.append(n.toString)
case o: scala.collection.Map[_,_] => serializeJsonObject(out)(o, currIndent, addlIndent)
case o: java.util.Map[_,_] => serializeJsonObject(out)(o.asScala, currIndent, addlIndent)
case a: TraversableOnce[_] => serializeJsonArray(out)(a, currIndent, addlIndent)
case a: java.lang.Iterable[_] => serializeJsonArray(out)(a.asScala, currIndent, addlIndent)
case a: java.util.Enumeration[_] => serializeJsonArray(out)(a.asScala, currIndent, addlIndent)
case _ => throw new IllegalArgumentException(s"Can't write a ${value.getClass.getName} as JSON: $value")
//Maybe: case _ => out.append(quoteJsonString(value.toString)) /* Fallback to toString */
}
}
def writeQuotedJsonString(out: Appendable)(str: String): Unit = {
out.append("\\"")
str.foreach(_ match {
case '"' => out.append("\\\\\\"")
case '\\\\' => out.append("\\\\\\\\")
case '\\b' => out.append("\\\\b")
case '\\f' => out.append("\\\\f")
case '\\n' => out.append("\\\\n")
case '\\r' => out.append("\\\\r")
case '\\t' => out.append("\\\\t")
case ch if ch < ' ' => {
out.append("\\\\u")
out.append(("0000" + ch.toHexString).takeRight(4))
}
case ch => out.append(ch)
})
out.append("\\"")
}
def serializeJsonObject(out: Appendable)(value: Iterable[(_,_)], currIndent: String, addlIndent: String): Unit = {
if (value.isEmpty) {
out.append("{}")
} else {
val newIndent = currIndent + addlIndent
var first = true
val sortedProperties = scala.collection.mutable.SortedMap[String,Any]()
value.foreach({ kv =>
sortedProperties.put(kv._1.toString, kv._2)
})
out.append("{\\n")
out.append(newIndent)
sortedProperties.foreach({ kv =>
if (first) {
first = false
} else {
out.append(",\\n" + newIndent)
}
writeQuotedJsonString(out)(kv._1)
out.append(": " )
serializeJsonProperty(out)(kv._2, newIndent, addlIndent)
})
out.append("\\n")
out.append(currIndent)
out.append("}")
}
}
def serializeJsonArray(out: Appendable)(value: TraversableOnce[Any], currIndent: String, addlIndent: String): Unit = {
if (value.isEmpty) {
out.append("[]")
} else {
val newIndent = currIndent + addlIndent
var first = true
out.append("[\\n" + newIndent)
value.foreach({ element =>
if (first) {
first = false
} else {
out.append(",\\n" + newIndent)
}
serializeJsonProperty(out)(element, newIndent, addlIndent)
})
out.append("\\n" + currIndent + "]")
}
}
}
| orc-lang/orc | OrcTests/src/orc/test/util/JsonGenerator.scala | Scala | bsd-3-clause | 4,401 |
package com.seanshubin.todo.sample
import java.io.{OutputStream, InputStream}
import scala.annotation.tailrec
object IoUtil {
def feedInputStreamToOutputStream(inputStream: InputStream, outputStream: OutputStream) {
@tailrec
def loop(byte: Int) {
if (byte != -1) {
outputStream.write(byte)
loop(inputStream.read())
}
}
loop(inputStream.read())
}
}
| SeanShubin/javascript-todo-samples | gui/src/main/scala/com/seanshubin/todo/sample/IoUtil.scala | Scala | unlicense | 398 |
import java.lang.reflect.Modifier
trait HaveFinalMethod {
final def finalMethod: String = "final"
}
class Child extends HaveFinalMethod
object Test {
def main(args: Array[String]): Unit = {
val meth = classOf[Child].getMethod("finalMethod")
assert(meth.isBridge)
val mods = meth.getModifiers
assert(!Modifier.isFinal(mods))
}
}
| som-snytt/dotty | tests/run/t11485.scala | Scala | apache-2.0 | 353 |
class A {
var x: Int = 10
def compare(c: Int = 5, a: A = this): Boolean = if (c == a.x) true else false
}
class B extends A {
def updateThenCompare(c: Int): Boolean = {
x = c
compare() // error
}
val result = updateThenCompare(5)
}
| dotty-staging/dotty | tests/init/neg/default-this.scala | Scala | apache-2.0 | 269 |
package intron
import org.apache.spark.SparkContext
import org.scalatest.{ BeforeAndAfterAll, FlatSpec }
class DataSpec extends FlatSpec with BeforeAndAfterAll {
var sc: SparkContext = _
val ExonsFilePath: String = getClass.getResource("/data/exons.sample").toString
val GenesFilePath: String = getClass.getResource("/data/genes.sample").toString
override def beforeAll() = {
sc = new SparkContext("local[4]", "intron-prediction-test")
}
override def afterAll() = {
if (sc != null) {
sc.stop()
}
}
"Data" should "get genes correctly" in {
assert(Data.getGenesWithoutExons(sc, GenesFilePath).count() == 5)
}
"Data" should "get exons correctly" in {
assert(Data.getExons(sc, ExonsFilePath).count() == 10)
}
"Data" should "be aligned between genes and exons" in {
val exons = Data.getExons(sc, ExonsFilePath)
assert(Data.getGenesWithoutExons(sc, GenesFilePath).takeSample(true, 10).forall { gene =>
val geneExons = exons.filter(_.geneId == gene.geneId).collect()
geneExons.forall(geneExon => gene.sequence.contains(geneExon.sequence))
})
}
}
| bbiletskyy/intron-prediction | src/test/scala/intron/DataSpec.scala | Scala | apache-2.0 | 1,123 |
/**
* Created by Administrator on 2015/1/4.
*/
println("哈哈哈,hello world scala...")
| fangguanya/study | Java/scala/src/greetcall.scala | Scala | mit | 91 |
package loader.core
/** A more complex implementation for core, where Element is extended through composition with
* an arbitrary Data object which can be built from the parent and Status object.
*/
trait ExtCore extends definition.Impl {
type Status = ExtCore.Status[Key]
type Dlg >: Null <: DlgBase
type Elt = EltBase
protected[this] val noStatus = new Status(noKey)
protected[this] type Data>:Null //some data used in association to the element
trait EltBase extends super.EltBase {
def data: Data
/** This trait is used to create copies of the current Elt for changing parser and userCtx.
* It must refer to the same fields as the copied item.
* It must be "transparent", i.e. must not make any new method call (otehrwise methods with side effects would pose problems.)
*/
protected[this] trait Copy {this:EltBase=>
override val data = EltBase.this.data //refer to the already computed data
override def copy[P<:BaseParser with Singleton](p:P#Parser,u:UCtx[P]) = EltBase.this.copy(p,u) //idempotent
}
}
trait DlgBase extends super.DlgBase {dlg:Dlg=>
def getData(e:Elt):Data //getting the data for the current element
def onName(e:Elt,key:Key) = new Status(key)
def apply[X<:BaseParser with Singleton](u:UCtx[X],cbks:Cbks*): X#Parser=>Element[X] = builder(_,u,null,noStatus,cbks:_*)
def apply[X<:BaseParser with Singleton](cbks:Cbks*): UCtx[X] => X#Parser=>Element[X] = apply(_,cbks:_*)
val builder = new EltBuilder {
def apply[X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], parent: Elt, s: Status):Element[X] = new Element(parser,userCtx,dlg,s.key,parent)
def apply[X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], parent: Elt, s: Status, cbks: Cbks*):Element[X] with WithCallbacks = new ElementCbks(parser,userCtx,dlg,s,parent, cbks:_*)
def apply[X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], parent: Elt, s: Status, cb:Cbk, cbks: Cbks*):Element[X] with WithCallback = new ElementCbk(parser,userCtx,dlg,s,parent, cb, cbks:_*)
}
}
def apply[X<:BaseParser with Singleton](u:UCtx[X],dlg:Dlg) :X#Parser=>Element[X] = dlg.builder(_,u,null,noStatus)
def apply[X<:BaseParser with Singleton](u:UCtx[X],dlg:Dlg,cbks:Cbks*):X#Parser=>Element[X] = dlg.builder(_,u,null,noStatus,cbks:_*)
protected class Element[X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], dlg:Dlg, key:Key, parent:Elt) extends ElementBase[X](parser,userCtx,dlg,key,parent) with EltBase {
val data:Data = dlg.getData(this)
def status = new Status(key)
protected class Copy[P<:BaseParser with Singleton](p:P#Parser,u:UCtx[P],val cb:Cbk,val cbks:Cbks*) extends Element[P](p,u,dlg,key,parent) with super.Copy
def copy[P<:BaseParser with Singleton](p:P#Parser,u:UCtx[P]):Elt { type Builder=P } = new Copy(p,u,null,null)
}
protected class ElementCbks[X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], dlg:Dlg, s:Status, parent:Elt, val cbks:Cbks*) extends Element(parser,userCtx,dlg,s.key,parent) with WithCallbacks {
override def copy[P<:BaseParser with Singleton](p:P#Parser,u:UCtx[P]):Elt { type Builder=P } = new Copy(p,u,null,cbks:_*) with WithCallbacks
}
protected class ElementCbk [X<:BaseParser with Singleton](parser:X#Parser, userCtx:UCtx[X], dlg:Dlg, s:Status, parent:Elt, val cb:Cbk, cbks:Cbks*) extends ElementCbks(parser,userCtx,dlg,s,parent,cbks:_*) with WithCallback {
override def copy[P<:BaseParser with Singleton](p:P#Parser,u:UCtx[P]):Elt { type Builder=P } = new Copy(p,u,cb,cbks:_*) with WithCallbacks
}
}
object ExtCore {
class Status[K>:Null](key:K) extends Core.Status(key)
//using Abstract prevents code bloating due to trait expansion
abstract class Abstract[+D>:Null] extends ExtCore { protected[this] type Data=D }
} | Y-P-/data-processing-binding | Core/src/loader/core/ExtCore.scala | Scala | gpl-3.0 | 3,986 |
package models.request
import java.util.Date
import play.api.data.validation.{Constraint, Invalid, Valid, ValidationError}
case class EventCreateRequest(
date: Date
, title: String
, country: String
, city: String
, url: String
) {
/**
* Make key string
*
* @return Key string
*/
def key =
for (c <- city + country if (c >= 'A' && c >= 'Z') || (c >= 'a' && c >= 'z') || (c >= '0' && c >= '9'))
yield c.toLower
}
object EventCreateRequest {
/**
* Validate key
*
* @return
*/
def keyCheckConstraint: Constraint[EventCreateRequest] = Constraint("constraints.keycheck")({
request =>
if (request.key.length == 0) {
Invalid(Seq(ValidationError("Invalid key")))
} else {
Valid
}
})
}
| yh1224/ingressevents | app/models/request/EventCreateRequest.scala | Scala | gpl-2.0 | 819 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.elasticsearch.util
import akka.http.scaladsl.model.headers.{Host, RawHeader}
import akka.http.scaladsl.model.{HttpEntity, HttpMethods, HttpRequest, Uri}
import com.amazonaws.auth.{AWSCredentials, AWSSessionCredentials}
import com.sumologic.elasticsearch.restlastic.dsl.Dsl._
import com.sumologic.elasticsearch.restlastic.dsl.V6
import org.junit.runner.RunWith
import org.scalatest.{Matchers, WordSpec}
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class AwsRequestSignerTest extends WordSpec with Matchers {
val dummyCredentials = new AWSCredentials {
override def getAWSAccessKeyId: String = "AKIDEXAMPLE"
override def getAWSSecretKey: String = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
}
val region = "us-east-1"
val service = "host"
val dateStr = "20110909"
val dateTimeStr = "20110909T233600Z"
val signer = new TestSigner(dateStr, dateTimeStr, dummyCredentials, region, service)
"Handle a vanilla example" in {
// Example from http://docs.aws.amazon.com/general/latest/gr/signature-v4-test-suite.html
val req = HttpRequest(
method = HttpMethods.GET,
uri = Uri.from(
host = "host.foo.com",
path = "/"
).withQuery(Uri.Query("foo" -> "Zoo", "foo" -> "aha")),
headers = List(
RawHeader("Date", "Mon, 09 Sep 2011 23:36:00 GMT"),
RawHeader("Host", "host.foo.com")
)
)
val expectedCanonical =
"""|GET
|/
|foo=Zoo&foo=aha
|date:Mon, 09 Sep 2011 23:36:00 GMT
|host:host.foo.com
|
|date;host
|e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855""".stripMargin
signer.createCanonicalRequest(req) should be(expectedCanonical)
val expectedStringTosign = """AWS4-HMAC-SHA256
|20110909T233600Z
|20110909/us-east-1/host/aws4_request
|e25f777ba161a0f1baf778a87faf057187cf5987f17953320e3ca399feb5f00d""".stripMargin
signer.stringToSign(req, dateTimeStr, dateStr) should be (expectedStringTosign)
val withAuthHeader = signer.withAuthHeader(req)
val expected = "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20110909/us-east-1/host/aws4_request, SignedHeaders=date;host, Signature=be7148d34ebccdc6423b19085378aa0bee970bdc61d144bd1a8c48c33079ab09"
val actual = withAuthHeader.headers.filter(_.name == "Authorization").head.value
actual should be(expected)
}
"sign this create index request" in {
val realDateStr = "20151016"
val realDateTimeStr = "20151016T025745Z"
val signer = new TestSigner(realDateStr, realDateTimeStr, dummyCredentials, region, "es")
val req = HttpRequest(
method = HttpMethods.POST,
uri = Uri.from(
host = "search-kwan-metrics-es-l2fecxdxfit54aod5dgpqchndq.us-east-1.es.amazonaws.com",
path = "/metrics-catalog-index"
),
entity = HttpEntity(CreateIndex().toJsonStr(V6))
)
val expectedCanonical = "POST" +
"\\n/metrics-catalog-index" +
"\\n\\nhost:search-kwan-metrics-es-l2fecxdxfit54aod5dgpqchndq.us-east-1.es.amazonaws.com" +
"\\nx-amz-date:20151016T025745Z" +
"\\n\\nhost;x-amz-date" +
"\\n44136fa355b3678a1146ad16f7e8649e94fb4fc21fe77e8310c060f61caaff8a"
signer.createCanonicalRequest(signer.completedRequest(req, realDateTimeStr)) should be(expectedCanonical)
}
"sign with missing host header and specified host:port url creates valid host header" in {
val signer = new TestSigner("", "", dummyCredentials, region, "es")
val req = signer.withAuthHeader(HttpRequest(uri = Uri("http://0.0.0.0:9200/some/path"),headers = Nil))
req.headers.find(_.is("host")).map(_.value).getOrElse("") should be("0.0.0.0:9200")
}
"sign with missing host header and specified host, but no port, creates valid host header" in {
val signer = new TestSigner("", "", dummyCredentials, region, "es")
val req = signer.withAuthHeader(HttpRequest(uri = Uri("http://0.0.0.0/some/path"),headers = Nil))
req.headers.find(_.is("host")).map(_.value).getOrElse("") should be("0.0.0.0")
}
"sign with host header AND specified host:port url, host header wins" in {
val signer = new TestSigner("", "", dummyCredentials, region, "es")
val req = signer.withAuthHeader(HttpRequest(uri = Uri("http://somehost:9200/some/path"),headers = List(Host("0.0.0.0"))))
req.headers.find(_.is("host")).map(_.value).getOrElse("") should be("0.0.0.0")
}
"sign this put request" in {
val jsonBody = """{
| "key":"_3",
| "value":"if_octets",
| "value_auto":{
| "input":"if_octets"
| },
| "cust_id_str":"0000000000000138"
|}""".stripMargin
val expectedCanonical =
"POST\\n/metrics-catalog-index/kv/_3%3Aif_octets%3A0000000000000138\\n\\nhost:" +
"search-kwan-metrics-es-l2fecxdxfit54aod5dgpqchndq.us-east-1.es.amazonaws.com\\nx-amz-date:" +
"20151016T182449Z\\n\\nhost;x-amz-date" +
"\\n7acc8e40a1838b09f1db33569f74e1fda7fb919bdc65cb3eee1b4ee166088d19"
val expectedToSing = "AWS4-HMAC-SHA256\\n" +
"20151016T182449Z\\n" +
"20151016/us-east-1/es/aws4_request\\n" +
"326f6093970bae063fa26fb389fe85d806910df9b2edb1a99ae0cbf23889b8b2"
val realDateStr = "20151016"
val realDateTimeStr = "20151016T182449Z"
val signer = new TestSigner(realDateStr, realDateTimeStr, dummyCredentials, region, "es")
val req = {
val preReq = HttpRequest(
method = HttpMethods.POST,
uri = Uri.from(
host = "search-kwan-metrics-es-l2fecxdxfit54aod5dgpqchndq.us-east-1.es.amazonaws.com",
path = "/metrics-catalog-index/kv/_3:if_octets:0000000000000138"
),
entity = HttpEntity(jsonBody)
)
signer.completedRequest(preReq, realDateTimeStr)
}
signer.createCanonicalRequest(req) should be(expectedCanonical)
signer.stringToSign(req, realDateTimeStr, realDateStr) should be(expectedToSing)
}
"add a session key if AWSSessionCredentials are provided" in {
val dummySessionCredentials = new AWSSessionCredentials {
override def getAWSAccessKeyId: String = "AKIDEXAMPLE"
override def getAWSSecretKey: String = "wJalrXUtnFEMI/K7MDENG+bPxRfiCYEXAMPLEKEY"
override def getSessionToken: String = "SESSIONTOKEN"
}
val sessionSigner = new TestSigner(dateStr, dateTimeStr, dummySessionCredentials, region, "es")
val req = sessionSigner.withAuthHeader(HttpRequest())
val containsHeader = req.headers.exists(header => {
header.name == "X-Amz-Security-Token" &&
header.value == "SESSIONTOKEN"
})
containsHeader should be(true)
}
}
class TestSigner(dateStr: String, datetimeStr: String, creds: AWSCredentials, region: String, service: String)
extends AwsRequestSigner(creds, region, service) {
override def currentDateStrings = (datetimeStr, dateStr)
}
| SumoLogic/elasticsearch-client | elasticsearch-aws/src/test/scala/com/sumologic/elasticsearch/util/AwsRequestSignerTest.scala | Scala | apache-2.0 | 7,862 |
package mesosphere.marathon.core.plugin
import scala.reflect.ClassTag
trait PluginManager {
def plugins[T](implicit ct: ClassTag[T]): Seq[T]
def definitions: PluginDefinitions
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/plugin/PluginManager.scala | Scala | apache-2.0 | 187 |
/*
* Copyright 2020 ABSA Group Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package za.co.absa.spline.producer.modelmapper
import za.co.absa.spline.producer.model.v1_2.{ExecutionEvent, ExecutionPlan}
trait ModelMapper[P, E] {
def fromDTO(plan: P): ExecutionPlan
def fromDTO(event: E): ExecutionEvent
}
| AbsaOSS/spline | producer-model-mapper/src/main/scala/za/co/absa/spline/producer/modelmapper/ModelMapper.scala | Scala | apache-2.0 | 835 |
package com.andbutso.poker
import com.andbutso.poker.fixtures.AllFixtures
import com.andbutso.poker.handhistory.ChipAmount
import org.specs2.mutable.SpecificationWithJUnit
import org.specs2.mock.Mockito
trait ParentSpec extends SpecificationWithJUnit with AllFixtures with Mockito {
args.execute(isolated = true, sequential = true)
def $(string: String) = ChipAmount.fromString(string)
def identityPF[A]: PartialFunction[A, A] = { case x => x }
} | marcel/texas | src/test/scala/com/andbutso/poker/ParentSpec.scala | Scala | mit | 456 |
package nat.traversal
import com.typesafe.scalalogging.StrictLogging
import nat.traversal.upnp.UPnPManager
import nat.traversal.upnp.ssdp.SSDPClientService
object Boot extends StrictLogging {
/* It's useful to get a logger here, so that logging system gets initialized.
* Otherwise we are likely to get messages about loggers not working because
* instantiated during initialization phase.
* See: http://www.slf4j.org/codes.html#substituteLogger
*/
import UPnPManager.system
import UPnPManager.materializer
def main(args: Array[String]): Unit = {
UPnPManager.startServer()
SSDPClientService.discover
//Thread.sleep(10000)
//UPnPManager.stopServer()
}
}
| suiryc/nat-traversal | src/main/scala/nat/traversal/Boot.scala | Scala | gpl-3.0 | 700 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.batch.table
import java.sql.Timestamp
import org.apache.flink.api.java.typeutils.GenericTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.table.api.Types
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.Null
import org.apache.flink.table.runtime.utils.CommonTestData.NonPojo
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
class SetOperatorsTest extends TableTestBase {
@Test
def testInWithFilter(): Unit = {
val util = batchTestUtil()
val t = util.addTable[((Int, Int), String, (Int, Int))]("A", 'a, 'b, 'c)
val elements = t.where('b === "two").select('a).as("a1")
val in = t.select("*").where('c.in(elements))
val expected = unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
batchTableNode(0),
unaryNode(
"DataSetDistinct",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a AS a1"),
term("where", "=(b, 'two')")
),
term("distinct", "a1")
),
term("where", "=(c, a1)"),
term("join", "a", "b", "c", "a1"),
term("joinType", "InnerJoin")
),
term("select", "a", "b", "c")
)
util.verifyTable(in, expected)
}
@Test
def testInWithProject(): Unit = {
val util = batchTestUtil()
val t = util.addTable[(Int, Timestamp, String)]("A", 'a, 'b, 'c)
val in = t.select('b.in(Timestamp.valueOf("1972-02-22 07:12:00.333"))).as("b2")
val expected = unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "IN(b, 1972-02-22 07:12:00.333) AS b2")
)
util.verifyTable(in, expected)
}
@Test
def testUnionNullableTypes(): Unit = {
val util = batchTestUtil()
val t = util.addTable[((Int, String), (Int, String), Int)]("A", 'a, 'b, 'c)
val in = t.select('a)
.unionAll(
t.select(('c > 0) ? ('b, Null(createTypeInformation[(Int, String)]))))
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a")
),
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "CASE(>(c, 0), b, null) AS _c0")
),
term("union", "a")
)
util.verifyTable(in, expected)
}
@Test
def testUnionAnyType(): Unit = {
val util = batchTestUtil()
val typeInfo = Types.ROW(
new GenericTypeInfo(classOf[NonPojo]),
new GenericTypeInfo(classOf[NonPojo]))
val t = util.addJavaTable(typeInfo, "A", "a, b")
val in = t.select('a).unionAll(t.select('b))
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a")
),
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b")
),
term("union", "a")
)
util.verifyJavaTable(in, expected)
}
@Test
def testFilterUnionTranspose(): Unit = {
val util = batchTestUtil()
val left = util.addTable[(Int, Long, String)]("left", 'a, 'b, 'c)
val right = util.addTable[(Int, Long, String)]("right", 'a, 'b, 'c)
val result = left.unionAll(right)
.where('a > 0)
.groupBy('b)
.select('a.sum as 'a, 'b as 'b, 'c.count as 'c)
val expected = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a", "b", "c"),
term("where", ">(a, 0)")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "a", "b", "c"),
term("where", ">(a, 0)")
),
term("union", "a", "b", "c")
),
term("groupBy", "b"),
term("select", "b", "SUM(a) AS TMP_0", "COUNT(c) AS TMP_1")
),
term("select", "TMP_0 AS a", "b", "TMP_1 AS c")
)
util.verifyTable(result, expected)
}
@Test
def testFilterMinusTranspose(): Unit = {
val util = batchTestUtil()
val left = util.addTable[(Int, Long, String)]("left", 'a, 'b, 'c)
val right = util.addTable[(Int, Long, String)]("right", 'a, 'b, 'c)
val result = left.minusAll(right)
.where('a > 0)
.groupBy('b)
.select('a.sum as 'a, 'b as 'b, 'c.count as 'c)
val expected = unaryNode(
"DataSetCalc",
unaryNode(
"DataSetAggregate",
binaryNode(
"DataSetMinus",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "a", "b", "c"),
term("where", ">(a, 0)")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "a", "b", "c"),
term("where", ">(a, 0)")
),
term("minus", "a", "b", "c")
),
term("groupBy", "b"),
term("select", "b", "SUM(a) AS TMP_0", "COUNT(c) AS TMP_1")
),
term("select", "TMP_0 AS a", "b", "TMP_1 AS c")
)
util.verifyTable(result, expected)
}
@Test
def testProjectUnionTranspose(): Unit = {
val util = batchTestUtil()
val left = util.addTable[(Int, Long, String)]("left", 'a, 'b, 'c)
val right = util.addTable[(Int, Long, String)]("right", 'a, 'b, 'c)
val result = left.select('a, 'b, 'c)
.unionAll(right.select('a, 'b, 'c))
.select('b, 'c)
val expected = binaryNode(
"DataSetUnion",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b", "c")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "b", "c")
),
term("union", "b", "c")
)
util.verifyTable(result, expected)
}
@Test
def testProjectMinusTranspose(): Unit = {
val util = batchTestUtil()
val left = util.addTable[(Int, Long, String)]("left", 'a, 'b, 'c)
val right = util.addTable[(Int, Long, String)]("right", 'a, 'b, 'c)
val result = left.select('a, 'b, 'c)
.minusAll(right.select('a, 'b, 'c))
.select('b, 'c)
val expected = binaryNode(
"DataSetMinus",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "b", "c")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "b", "c")
),
term("minus", "b", "c")
)
util.verifyTable(result, expected)
}
}
| zhangminglei/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/batch/table/SetOperatorsTest.scala | Scala | apache-2.0 | 7,522 |
package picasso.model.dbp
import picasso.utils.{LogCritical, LogError, LogWarning, LogNotice, LogInfo, LogDebug, Logger, Misc, MultiSet}
import picasso.math._
import picasso.math.WellPartialOrdering._
import picasso.graph._
import scala.collection.{GenSeq, GenIterable, GenMap}
class DepthBoundedProcess[P <: DBCT](trs: GenSeq[DepthBoundedTransition[P]])(implicit wpoConf: WellPartialOrdering[DepthBoundedConf[P]], wpoState: WellPartialOrdering[P#State]) extends WSTS with WADL {
type S = DepthBoundedConf[P]
implicit val ordering : WellPartialOrdering[S] = wpoConf
val stateOrdering = wpoState
/** copy constructor */
def this(dbp: DepthBoundedProcess[P]) = this(dbp.transitions)(dbp.ordering, dbp.stateOrdering)
def toGraphviz(name: String): scala.text.Document = {
import scala.text.Document._
var x = 0
val docOfTrs = trs map ( t => {
x = x + 1
t.toGraphviz("transition_"+x, "subgraph")
})
val oneDoc = docOfTrs.reduceRight(_ :/: _)
"digraph" :: " " :: name :: " {" :: nest(4, empty :/: oneDoc) :/: text("}")
}
type T = DepthBoundedTransition[P]
val trs2 = trs.par
def transitions = trs2
override def toString = {
transitions.mkString("\\n")
}
/** accelration/widening with witness:
* returns:
* - the result
* - the set of replicated nodes
* - the result before folding
* - the mapping used for the folding
*/
def tryAcceleratePairWithWitness(smaller: S, bigger: S): Option[(S, WideningWitness[P])] = {
val ms = (smaller morphisms bigger).toSeq
val seeds = ms.map(m => bigger.vertices -- m.values)
val (widenedUnfolded, usedSeed) = ((bigger, Map[P#V,P#V]()) /: seeds)( (acc, seed) => {
val (w,m) = bigger widenWithWitness seed
//println("YY seed: " + seed)
//println("YY m: " + m)
//print("acc: " + acc._1)
//print("w: " + w)
ordering.tryCompare(acc._1, w) match {
case Some(c) =>
if (c >= 0) {
//println("XX keeping acc")
acc
} else {
//println("XX selecting w")
(w, m)
}
case None =>
Logger(
"DepthBoundedProcess",
LogWarning,"widening returned incomparable graphs. TODO rewrite DepthBoundedProcess.tryAcceleratePairWithWitness"// +
// "\\n smaller = " + smaller +
// "\\n bigger = " + bigger +
// "\\n acc = " + acc._1 +
// "\\n widened = " + w
)
if (scala.util.Random.nextBoolean()) (w, m)
else acc
}
})
val (widened, folding) = widenedUnfolded.foldWithWitness
//println("Acceleration:")
//print(smaller.toGraphviz("smaller"))
//print(bigger.toGraphviz("bigger"))
//print(widened.toGraphviz("widenend"))
//println("usedSeed:" + usedSeed)
if (seeds.isEmpty) None
else {
val witness = new WideningWitness
witness.smaller = smaller
witness.bigger = bigger
witness.result = widened
witness.replicated = usedSeed
witness.unfoldedResult = widenedUnfolded
witness.folding = folding
Some((widened, witness))
}
}
def tryAcceleratePair(smaller: S, bigger: S): Option[S] = {
tryAcceleratePairWithWitness(smaller, bigger).map(_._1)
}
def wideningWithWitness(smaller: S, bigger: S): (S, WideningWitness[P]) = {
val opt = tryAcceleratePairWithWitness(smaller, bigger)
if (opt.isDefined) opt.get
else Logger.logAndThrow("DepthBoundedProcess", LogError, "widening not defined for " + smaller + " and " + bigger)
}
lazy val affinityMap: GenMap[(T,T), Int] = {
val pairs = for (t1 <- transitions; t2 <- transitions) yield {
//as produced: look at the nodes in t1.rhs that are not in t1.lhs (as a multiset)
val same1 = t1.hr.filter{ case (a,b) => a.state == b.state }
val produced = (t1.rhs -- same1.values -- t1.hk.keys).vertices
val producedLabels = MultiSet[P#State](produced.toSeq.map(_.state): _*)
//as consummed: look at the nodes in t2.lhs that are not in t2.rhs (as a multiset)
val same2 = t2.hr.filter{ case (a,b) => a.state == b.state }
val consummed = (t2.lhs -- t2.hk.values -- same2.keys).vertices
val consummedLabels = MultiSet[P#State](consummed.toSeq.map(_.state): _*)
//then return the cardinality of the intersection of the two multisets
val aff = (producedLabels intersect consummedLabels).size
//Console.println("affinity of " + t1 + " => " + t2 + " is " + aff)
//Console.println("producedLabels = " + producedLabels)
//Console.println("consummedLabels = " + consummedLabels)
((t1, t2), aff)
}
////////////////////
//TODO here is a good idea to partition
//val edges: Iterable[(T,T)] = pairs.filter(_._2 > 0).map(_._1).seq
//val trsGraph = DiGraph[GT.ULGT{type V = T}](edges)
//Console.println("|edges| = " + edges.size)
//Console.println("|graph| = " + trsGraph.vertices.size)
//val intGraph = trsGraph.morphFull[GT.ULGT{type V = Int}]((t => t.hashCode()), _ => (), _ => ())
//Console.println("graph = " + intGraph.toGraphviz("IG"))
//val sccs = intGraph.SCC
//Console.println("|scc| = " + sccs.size)
//Console.println("scc = \\n" + sccs.mkString("","\\n",""))
////////////////////
pairs.toMap
}
def transitionsAffinity(t1: T, t2: T): Int = affinityMap(t1 -> t2)
}
| dzufferey/picasso | core/src/main/scala/picasso/model/dbp/DepthBoundedProcess.scala | Scala | bsd-2-clause | 5,439 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.{File, IOException}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong}
import com.yammer.metrics.core.Gauge
import kafka.api._
import kafka.cluster.{BrokerEndPoint, Partition, Replica}
import kafka.common._
import kafka.controller.KafkaController
import kafka.log.{LogAppendInfo, LogManager}
import kafka.message.{ByteBufferMessageSet, MessageSet}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils._
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.utils.{Time => JTime}
import scala.collection._
/*
* Result metadata of a log append operation on the log
*/
case class LogAppendResult(info: LogAppendInfo, error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
}
/*
* Result metadata of a log read operation on the log
* @param info @FetchDataInfo returned by the @Log read
* @param hw high watermark of the local replica
* @param readSize amount of data that was read from the log i.e. size of the fetch
* @param isReadFromLogEnd true if the request read up to the log end offset snapshot
* when the read was initiated, false otherwise
* @param error Exception if error encountered while reading from the log
*/
case class LogReadResult(info: FetchDataInfo,
hw: Long,
readSize: Int,
isReadFromLogEnd : Boolean,
error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
override def toString = {
"Fetch Data: [%s], HW: [%d], readSize: [%d], isReadFromLogEnd: [%b], error: [%s]"
.format(info, hw, readSize, isReadFromLogEnd, error)
}
}
object LogReadResult {
val UnknownLogReadResult = LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata,
MessageSet.Empty),
-1L,
-1,
false)
}
case class BecomeLeaderOrFollowerResult(responseMap: collection.Map[(String, Int), Short], errorCode: Short) {
override def toString = {
"update results: [%s], global error: [%d]".format(responseMap, errorCode)
}
}
object ReplicaManager {
val HighWatermarkFilename = "replication-offset-checkpoint"
val IsrChangePropagationBlackOut = 5000L
val IsrChangePropagationInterval = 60000L
}
class ReplicaManager(val config: KafkaConfig,
metrics: Metrics,
time: Time,
jTime: JTime,
val zkUtils: ZkUtils,
scheduler: Scheduler,
val logManager: LogManager,
val isShuttingDown: AtomicBoolean,
threadNamePrefix: Option[String] = None) extends Logging with KafkaMetricsGroup {
/* epoch of the controller that last changed the leader */
@volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
private val localBrokerId = config.brokerId
private val allPartitions = new Pool[(String, Int), Partition]
private val replicaStateChangeLock = new Object
val replicaFetcherManager = new ReplicaFetcherManager(config, this, metrics, jTime, threadNamePrefix)
private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false)
val highWatermarkCheckpoints = config.logDirs.map(dir => (new File(dir).getAbsolutePath, new OffsetCheckpoint(new File(dir, ReplicaManager.HighWatermarkFilename)))).toMap
private var hwThreadInitialized = false
this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: "
val stateChangeLogger = KafkaController.stateChangeLogger
private val isrChangeSet: mutable.Set[TopicAndPartition] = new mutable.HashSet[TopicAndPartition]()
private val lastIsrChangeMs = new AtomicLong(System.currentTimeMillis())
private val lastIsrPropagationMs = new AtomicLong(System.currentTimeMillis())
val delayedProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", config.brokerId, config.producerPurgatoryPurgeIntervalRequests)
val delayedFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", config.brokerId, config.fetchPurgatoryPurgeIntervalRequests)
val leaderCount = newGauge(
"LeaderCount",
new Gauge[Int] {
def value = {
getLeaderPartitions().size
}
}
)
val partitionCount = newGauge(
"PartitionCount",
new Gauge[Int] {
def value = allPartitions.size
}
)
val underReplicatedPartitions = newGauge(
"UnderReplicatedPartitions",
new Gauge[Int] {
def value = underReplicatedPartitionCount()
}
)
val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS)
val isrShrinkRate = newMeter("IsrShrinksPerSec", "shrinks", TimeUnit.SECONDS)
def underReplicatedPartitionCount(): Int = {
getLeaderPartitions().count(_.isUnderReplicated)
}
def startHighWaterMarksCheckPointThread() = {
if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true))
scheduler.schedule("highwatermark-checkpoint", checkpointHighWatermarks, period = config.replicaHighWatermarkCheckpointIntervalMs, unit = TimeUnit.MILLISECONDS)
}
def recordIsrChange(topicAndPartition: TopicAndPartition) {
isrChangeSet synchronized {
isrChangeSet += topicAndPartition
lastIsrChangeMs.set(System.currentTimeMillis())
}
}
/**
* This function periodically runs to see if ISR needs to be propagated. It propagates ISR when:
* 1. There is ISR change not propagated yet.
* 2. There is no ISR Change in the last five seconds, or it has been more than 60 seconds since the last ISR propagation.
* This allows an occasional ISR change to be propagated within a few seconds, and avoids overwhelming controller and
* other brokers when large amount of ISR change occurs.
*/
def maybePropagateIsrChanges() {
val now = System.currentTimeMillis()
isrChangeSet synchronized {
if (isrChangeSet.nonEmpty &&
(lastIsrChangeMs.get() + ReplicaManager.IsrChangePropagationBlackOut < now ||
lastIsrPropagationMs.get() + ReplicaManager.IsrChangePropagationInterval < now)) {
ReplicationUtils.propagateIsrChanges(zkUtils, isrChangeSet)
isrChangeSet.clear()
lastIsrPropagationMs.set(now)
}
}
}
/**
* Try to complete some delayed produce requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for acks = -1)
* 2. A follower replica's fetch operation is received (for acks > 1)
*/
def tryCompleteDelayedProduce(key: DelayedOperationKey) {
val completed = delayedProducePurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d producer requests.".format(key.keyLabel, completed))
}
/**
* Try to complete some delayed fetch requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for regular fetch)
* 2. A new message set is appended to the local log (for follower fetch)
*/
def tryCompleteDelayedFetch(key: DelayedOperationKey) {
val completed = delayedFetchPurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d fetch requests.".format(key.keyLabel, completed))
}
def startup() {
// start ISR expiration thread
scheduler.schedule("isr-expiration", maybeShrinkIsr, period = config.replicaLagTimeMaxMs, unit = TimeUnit.MILLISECONDS)
scheduler.schedule("isr-change-propagation", maybePropagateIsrChanges, period = 2500L, unit = TimeUnit.MILLISECONDS)
}
def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = {
stateChangeLogger.trace("Broker %d handling stop replica (delete=%s) for partition [%s,%d]".format(localBrokerId,
deletePartition.toString, topic, partitionId))
val errorCode = ErrorMapping.NoError
getPartition(topic, partitionId) match {
case Some(partition) =>
if(deletePartition) {
val removedPartition = allPartitions.remove((topic, partitionId))
if (removedPartition != null)
removedPartition.delete() // this will delete the local log
}
case None =>
// Delete log and corresponding folders in case replica manager doesn't hold them anymore.
// This could happen when topic is being deleted while broker is down and recovers.
if(deletePartition) {
val topicAndPartition = TopicAndPartition(topic, partitionId)
if(logManager.getLog(topicAndPartition).isDefined) {
logManager.deleteLog(topicAndPartition)
}
}
stateChangeLogger.trace("Broker %d ignoring stop replica (delete=%s) for partition [%s,%d] as replica doesn't exist on broker"
.format(localBrokerId, deletePartition, topic, partitionId))
}
stateChangeLogger.trace("Broker %d finished handling stop replica (delete=%s) for partition [%s,%d]"
.format(localBrokerId, deletePartition, topic, partitionId))
errorCode
}
def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[TopicAndPartition, Short], Short) = {
replicaStateChangeLock synchronized {
val responseMap = new collection.mutable.HashMap[TopicAndPartition, Short]
if(stopReplicaRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d."
.format(localBrokerId, stopReplicaRequest.controllerEpoch) +
" Latest known controller epoch is %d " + controllerEpoch)
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
controllerEpoch = stopReplicaRequest.controllerEpoch
// First stop fetchers for all partitions, then stop the corresponding replicas
replicaFetcherManager.removeFetcherForPartitions(stopReplicaRequest.partitions.map(r => TopicAndPartition(r.topic, r.partition)))
for(topicAndPartition <- stopReplicaRequest.partitions){
val errorCode = stopReplica(topicAndPartition.topic, topicAndPartition.partition, stopReplicaRequest.deletePartitions)
responseMap.put(topicAndPartition, errorCode)
}
(responseMap, ErrorMapping.NoError)
}
}
}
def getOrCreatePartition(topic: String, partitionId: Int): Partition = {
var partition = allPartitions.get((topic, partitionId))
if (partition == null) {
allPartitions.putIfNotExists((topic, partitionId), new Partition(topic, partitionId, time, this))
partition = allPartitions.get((topic, partitionId))
}
partition
}
def getPartition(topic: String, partitionId: Int): Option[Partition] = {
val partition = allPartitions.get((topic, partitionId))
if (partition == null)
None
else
Some(partition)
}
def getReplicaOrException(topic: String, partition: Int): Replica = {
val replicaOpt = getReplica(topic, partition)
if(replicaOpt.isDefined)
replicaOpt.get
else
throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition))
}
def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None =>
throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId))
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(leaderReplica) => leaderReplica
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d"
.format(topic, partitionId, config.brokerId))
}
}
}
def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None => None
case Some(partition) => partition.getReplica(replicaId)
}
}
/**
* Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas;
* the callback function will be triggered either when timeout or the required acks are satisfied
*/
def appendMessages(timeout: Long,
requiredAcks: Short,
internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
responseCallback: Map[TopicAndPartition, ProducerResponseStatus] => Unit) {
if (isValidRequiredAcks(requiredAcks)) {
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val produceStatus = localProduceResults.map { case (topicAndPartition, result) =>
topicAndPartition ->
ProducePartitionStatus(
result.info.lastOffset + 1, // required offset
ProducerResponseStatus(result.errorCode, result.info.firstOffset)) // response status
}
if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) {
// create delayed produce operation
val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed produce operation
val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory
// this is because while the delayed produce operation is being created, new
// requests may arrive and hence make this operation completable.
delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys)
} else {
// we can respond immediately
val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus)
responseCallback(produceResponseStatus)
}
} else {
// If required.acks is outside accepted range, something is wrong with the client
// Just return an error and don't handle the request at all
val responseStatus = messagesPerPartition.map {
case (topicAndPartition, messageSet) =>
(topicAndPartition ->
ProducerResponseStatus(Errors.INVALID_REQUIRED_ACKS.code,
LogAppendInfo.UnknownLogAppendInfo.firstOffset))
}
responseCallback(responseStatus)
}
}
// If all the following conditions are true, we need to put a delayed produce request and wait for replication to complete
//
// 1. required acks = -1
// 2. there is data to append
// 3. at least one partition append was successful (fewer errors than partitions)
private def delayedRequestRequired(requiredAcks: Short, messagesPerPartition: Map[TopicAndPartition, MessageSet],
localProduceResults: Map[TopicAndPartition, LogAppendResult]): Boolean = {
requiredAcks == -1 &&
messagesPerPartition.size > 0 &&
localProduceResults.values.count(_.error.isDefined) < messagesPerPartition.size
}
private def isValidRequiredAcks(requiredAcks: Short): Boolean = {
requiredAcks == -1 || requiredAcks == 1 || requiredAcks == 0
}
/**
* Append the messages to the local replica logs
*/
private def appendToLocalLog(internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
requiredAcks: Short): Map[TopicAndPartition, LogAppendResult] = {
trace("Append [%s] to local log ".format(messagesPerPartition))
messagesPerPartition.map { case (topicAndPartition, messages) =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).totalProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark()
// reject appending to internal topics if it is not allowed
if (Topic.InternalTopics.contains(topicAndPartition.topic) && !internalTopicsAllowed) {
(topicAndPartition, LogAppendResult(
LogAppendInfo.UnknownLogAppendInfo,
Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicAndPartition.topic)))))
} else {
try {
val partitionOpt = getPartition(topicAndPartition.topic, topicAndPartition.partition)
val info = partitionOpt match {
case Some(partition) =>
partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks)
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, localBrokerId))
}
val numAppendedMessages =
if (info.firstOffset == -1L || info.lastOffset == -1L)
0
else
info.lastOffset - info.firstOffset + 1
// update stats for successfully appended bytes and messages as bytesInRate and messageInRate
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).messagesInRate.mark(numAppendedMessages)
BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages)
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.sizeInBytes, topicAndPartition.topic, topicAndPartition.partition, info.firstOffset, info.lastOffset))
(topicAndPartition, LogAppendResult(info))
} catch {
// NOTE: Failed produce requests metric is not incremented for known exceptions
// it is supposed to indicate un-expected failures of a broker in handling a produce request
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
(topicAndPartition, null)
case utpe: UnknownTopicOrPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(utpe)))
case nle: NotLeaderForPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(nle)))
case mtle: MessageSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtle)))
case mstle: MessageSetSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstle)))
case imse : InvalidMessageSizeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(imse)))
case t: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing append operation on partition %s".format(topicAndPartition), t)
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(t)))
}
}
}
}
/**
* Fetch messages from the leader replica, and wait until enough data can be fetched and return;
* the callback function will be triggered either when timeout or required fetch info is satisfied
*/
def fetchMessages(timeout: Long,
replicaId: Int,
fetchMinBytes: Int,
fetchInfo: immutable.Map[TopicAndPartition, PartitionFetchInfo],
responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit) {
val isFromFollower = replicaId >= 0
val fetchOnlyFromLeader: Boolean = replicaId != Request.DebuggingConsumerId
val fetchOnlyCommitted: Boolean = ! Request.isValidBrokerId(replicaId)
// read from local logs
val logReadResults = readFromLocalLog(fetchOnlyFromLeader, fetchOnlyCommitted, fetchInfo)
// if the fetch comes from the follower,
// update its corresponding log end offset
if(Request.isValidBrokerId(replicaId))
updateFollowerLogReadResults(replicaId, logReadResults)
// check if this fetch request can be satisfied right away
val bytesReadable = logReadResults.values.map(_.info.messageSet.sizeInBytes).sum
val errorReadingData = logReadResults.values.foldLeft(false) ((errorIncurred, readResult) =>
errorIncurred || (readResult.errorCode != ErrorMapping.NoError))
// respond immediately if 1) fetch request does not want to wait
// 2) fetch request does not require any data
// 3) has enough data to respond
// 4) some error happens while reading data
if(timeout <= 0 || fetchInfo.size <= 0 || bytesReadable >= fetchMinBytes || errorReadingData) {
val fetchPartitionData = logReadResults.mapValues(result =>
FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet))
responseCallback(fetchPartitionData)
} else {
// construct the fetch results from the read results
val fetchPartitionStatus = logReadResults.map { case (topicAndPartition, result) =>
(topicAndPartition, FetchPartitionStatus(result.info.fetchOffsetMetadata, fetchInfo.get(topicAndPartition).get))
}
val fetchMetadata = FetchMetadata(fetchMinBytes, fetchOnlyFromLeader, fetchOnlyCommitted, isFromFollower, fetchPartitionStatus)
val delayedFetch = new DelayedFetch(timeout, fetchMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed fetch operation
val delayedFetchKeys = fetchPartitionStatus.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory;
// this is because while the delayed fetch operation is being created, new requests
// may arrive and hence make this operation completable.
delayedFetchPurgatory.tryCompleteElseWatch(delayedFetch, delayedFetchKeys)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
def readFromLocalLog(fetchOnlyFromLeader: Boolean,
readOnlyCommitted: Boolean,
readPartitionInfo: Map[TopicAndPartition, PartitionFetchInfo]): Map[TopicAndPartition, LogReadResult] = {
readPartitionInfo.map { case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark()
val partitionDataAndOffsetInfo =
try {
trace("Fetching log segment for topic %s, partition %d, offset %d, size %d".format(topic, partition, offset, fetchSize))
// decide whether to only fetch from leader
val localReplica = if (fetchOnlyFromLeader)
getLeaderReplicaIfLocal(topic, partition)
else
getReplicaOrException(topic, partition)
// decide whether to only fetch committed data (i.e. messages below high watermark)
val maxOffsetOpt = if (readOnlyCommitted)
Some(localReplica.highWatermark.messageOffset)
else
None
/* Read the LogOffsetMetadata prior to performing the read from the log.
* We use the LogOffsetMetadata to determine if a particular replica is in-sync or not.
* Using the log end offset after performing the read can lead to a race condition
* where data gets appended to the log immediately after the replica has consumed from it
* This can cause a replica to always be out of sync.
*/
val initialLogEndOffset = localReplica.logEndOffset
val logReadInfo = localReplica.log match {
case Some(log) =>
log.read(offset, fetchSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] does not have a local log".format(topic, partition))
FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty)
}
val readToEndOfLog = initialLogEndOffset.messageOffset - logReadInfo.fetchOffsetMetadata.messageOffset <= 0
LogReadResult(logReadInfo, localReplica.highWatermark.messageOffset, fetchSize, readToEndOfLog, None)
} catch {
// NOTE: Failed fetch requests metric is not incremented for known exceptions since it
// is supposed to indicate un-expected failure of a broker in handling a fetch request
case utpe: UnknownTopicOrPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(utpe))
case nle: NotLeaderForPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(nle))
case rnae: ReplicaNotAvailableException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(rnae))
case oor : OffsetOutOfRangeException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(oor))
case e: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().failedFetchRequestRate.mark()
error("Error processing fetch operation on partition [%s,%d] offset %d".format(topic, partition, offset))
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(e))
}
(TopicAndPartition(topic, partition), partitionDataAndOffsetInfo)
}
}
def maybeUpdateMetadataCache(updateMetadataRequest: UpdateMetadataRequest, metadataCache: MetadataCache) {
replicaStateChangeLock synchronized {
if(updateMetadataRequest.controllerEpoch < controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(localBrokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
} else {
metadataCache.updateCache(updateMetadataRequest, localBrokerId, stateChangeLogger)
controllerEpoch = updateMetadataRequest.controllerEpoch
}
}
}
def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest,
metadataCache: MetadataCache,
onLeadershipChange: (Iterable[Partition], Iterable[Partition]) => Unit): BecomeLeaderOrFollowerResult = {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, stateInfo, leaderAndISRRequest.correlationId,
leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topic, partition))
}
replicaStateChangeLock synchronized {
val responseMap = new mutable.HashMap[(String, Int), Short]
if (leaderAndISRRequest.controllerEpoch < controllerEpoch) {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " +
"its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch))
}
BecomeLeaderOrFollowerResult(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
val controllerId = leaderAndISRRequest.controllerId
val correlationId = leaderAndISRRequest.correlationId
controllerEpoch = leaderAndISRRequest.controllerEpoch
// First check partition's leader epoch
val partitionState = new mutable.HashMap[Partition, PartitionStateInfo]()
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partitionId), partitionStateInfo) =>
val partition = getOrCreatePartition(topic, partitionId)
val partitionLeaderEpoch = partition.getLeaderEpoch()
// If the leader epoch is valid record the epoch of the controller that made the leadership decision.
// This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path
if (partitionLeaderEpoch < partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch) {
if(partitionStateInfo.allReplicas.contains(config.brokerId))
partitionState.put(partition, partitionStateInfo)
else {
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] as itself is not in assigned replica list %s")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.allReplicas.mkString(",")))
responseMap.put((topic, partitionId), ErrorMapping.UnknownTopicOrPartitionCode)
}
} else {
// Otherwise record the error code in response
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] since its associated leader epoch %d is old. Current leader epoch is %d")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch, partitionLeaderEpoch))
responseMap.put((topic, partitionId), ErrorMapping.StaleLeaderEpochCode)
}
}
val partitionsTobeLeader = partitionState.filter { case (partition, partitionStateInfo) =>
partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId
}
val partitionsToBeFollower = (partitionState -- partitionsTobeLeader.keys)
val partitionsBecomeLeader = if (!partitionsTobeLeader.isEmpty)
makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap)
else
Set.empty[Partition]
val partitionsBecomeFollower = if (!partitionsToBeFollower.isEmpty)
makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.correlationId, responseMap, metadataCache)
else
Set.empty[Partition]
// we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions
// have been completely populated before starting the checkpointing there by avoiding weird race conditions
if (!hwThreadInitialized) {
startHighWaterMarksCheckPointThread()
hwThreadInitialized = true
}
replicaFetcherManager.shutdownIdleFetcherThreads()
onLeadershipChange(partitionsBecomeLeader, partitionsBecomeFollower)
BecomeLeaderOrFollowerResult(responseMap, ErrorMapping.NoError)
}
}
}
/*
* Make the current broker to become leader for a given set of partitions by:
*
* 1. Stop fetchers for these partitions
* 2. Update the partition metadata in cache
* 3. Add these partitions to the leader partitions set
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it. Otherwise,
* return the set of partitions that are made leader due to this method
*
* TODO: the above may need to be fixed later
*/
private def makeLeaders(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short]): Set[Partition] = {
partitionState.foreach(state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))))
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
val partitionsToMakeLeaders: mutable.Set[Partition] = mutable.Set()
try {
// First stop fetchers for all the partitions
replicaFetcherManager.removeFetcherForPartitions(partitionState.keySet.map(new TopicAndPartition(_)))
// Update the partition information to be the leader
partitionState.foreach{ case (partition, partitionStateInfo) =>
if (partition.makeLeader(controllerId, partitionStateInfo, correlationId))
partitionsToMakeLeaders += partition
else
stateChangeLogger.info(("Broker %d skipped the become-leader state change after marking its partition as leader with correlation id %d from " +
"controller %d epoch %d for partition %s since it is already the leader for the partition.")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(partition.topic, partition.partitionId)));
}
partitionsToMakeLeaders.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-leader request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
} catch {
case e: Throwable =>
partitionState.foreach { state =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d" +
" epoch %d for partition %s").format(localBrokerId, correlationId, controllerId, epoch,
TopicAndPartition(state._1.topic, state._1.partitionId))
stateChangeLogger.error(errorMsg, e)
}
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionsToMakeLeaders
}
/*
* Make the current broker to become follower for a given set of partitions by:
*
* 1. Remove these partitions from the leader partitions set.
* 2. Mark the replicas as followers so that no more data can be added from the producer clients.
* 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads.
* 4. Truncate the log and checkpoint offsets for these partitions.
* 5. If the broker is not shutting down, add the fetcher to the new leaders.
*
* The ordering of doing these steps make sure that the replicas in transition will not
* take any more messages before checkpointing offsets so that all messages before the checkpoint
* are guaranteed to be flushed to disks
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it. Otherwise,
* return the set of partitions that are made follower due to this method
*/
private def makeFollowers(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short],
metadataCache: MetadataCache) : Set[Partition] = {
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set()
try {
// TODO: Delete leaders from LeaderAndIsrRequest
partitionState.foreach{ case (partition, partitionStateInfo) =>
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
val newLeaderBrokerId = leaderIsrAndControllerEpoch.leaderAndIsr.leader
metadataCache.getAliveBrokers.find(_.id == newLeaderBrokerId) match {
// Only change partition state when the leader is available
case Some(leaderBroker) =>
if (partition.makeFollower(controllerId, partitionStateInfo, correlationId))
partitionsToMakeFollower += partition
else
stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since the new leader %d is the same as the old leader")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
case None =>
// The leader broker should always be present in the metadata cache.
// If not, we should record the error message and abort the transition process for this partition
stateChangeLogger.error(("Broker %d received LeaderAndIsrRequest with correlation id %d from controller" +
" %d epoch %d for partition [%s,%d] but cannot become follower since the new leader %d is unavailable.")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
// Create the local replica even if the leader is unavailable. This is required to ensure that we include
// the partition's high watermark in the checkpoint file (see KAFKA-1647)
partition.getOrCreateReplica()
}
}
replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(new TopicAndPartition(_)))
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
logManager.truncateTo(partitionsToMakeFollower.map(partition => (new TopicAndPartition(partition), partition.getOrCreateReplica().highWatermark.messageOffset)).toMap)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d truncated logs and checkpointed recovery boundaries for partition [%s,%d] as part of " +
"become-follower request with correlation id %d from controller %d epoch %d").format(localBrokerId,
partition.topic, partition.partitionId, correlationId, controllerId, epoch))
}
if (isShuttingDown.get()) {
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d skipped the adding-fetcher step of the become-follower state change with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since it is shutting down").format(localBrokerId, correlationId,
controllerId, epoch, partition.topic, partition.partitionId))
}
}
else {
// we do not need to check if the leader exists again since this has been done at the beginning of this process
val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition =>
new TopicAndPartition(partition) -> BrokerAndInitialOffset(
metadataCache.getAliveBrokers.find(_.id == partition.leaderReplicaIdOpt.get).get.getBrokerEndPoint(config.interBrokerSecurityProtocol),
partition.getReplica().get.logEndOffset.messageOffset)).toMap
replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d started fetcher to new leader as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition [%s,%d]")
.format(localBrokerId, controllerId, epoch, correlationId, partition.topic, partition.partitionId))
}
}
} catch {
case e: Throwable =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request with correlationId %d received from controller %d " +
"epoch %d").format(localBrokerId, correlationId, controllerId, epoch)
stateChangeLogger.error(errorMsg, e)
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionsToMakeFollower
}
private def maybeShrinkIsr(): Unit = {
trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR")
allPartitions.values.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs))
}
private def updateFollowerLogReadResults(replicaId: Int, readResults: Map[TopicAndPartition, LogReadResult]) {
debug("Recording follower broker %d log read results: %s ".format(replicaId, readResults))
readResults.foreach { case (topicAndPartition, readResult) =>
getPartition(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(partition) =>
partition.updateReplicaLogReadResult(replicaId, readResult)
// for producer requests with ack > 1, we need to check
// if they can be unblocked after some follower's log end offsets have moved
tryCompleteDelayedProduce(new TopicPartitionOperationKey(topicAndPartition))
case None =>
warn("While recording the replica LEO, the partition %s hasn't been created.".format(topicAndPartition))
}
}
}
private def getLeaderPartitions() : List[Partition] = {
allPartitions.values.filter(_.leaderReplicaIfLocal().isDefined).toList
}
// Flushes the highwatermark value for all partitions to the highwatermark file
def checkpointHighWatermarks() {
val replicas = allPartitions.values.map(_.getReplica(config.brokerId)).collect{case Some(replica) => replica}
val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParentFile.getAbsolutePath)
for((dir, reps) <- replicasByDir) {
val hwms = reps.map(r => (new TopicAndPartition(r) -> r.highWatermark.messageOffset)).toMap
try {
highWatermarkCheckpoints(dir).write(hwms)
} catch {
case e: IOException =>
fatal("Error writing to highwatermark file: ", e)
Runtime.getRuntime().halt(1)
}
}
}
// High watermark do not need to be checkpointed only when under unit tests
def shutdown(checkpointHW: Boolean = true) {
info("Shutting down")
replicaFetcherManager.shutdown()
delayedFetchPurgatory.shutdown()
delayedProducePurgatory.shutdown()
if (checkpointHW)
checkpointHighWatermarks()
info("Shut down completely")
}
}
| eljefe6a/kafka | core/src/main/scala/kafka/server/ReplicaManager.scala | Scala | apache-2.0 | 47,068 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools.compact
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat
import org.geotools.util.factory.Hints
import org.locationtech.geomesa.fs.storage.api.FileSystemStorage
import org.locationtech.geomesa.fs.storage.api.StorageMetadata.PartitionMetadata
import org.locationtech.geomesa.fs.storage.common.jobs.StorageConfiguration
import org.locationtech.geomesa.fs.storage.common.utils.StorageUtils.FileType
import org.locationtech.geomesa.fs.storage.orc.jobs.OrcStorageConfiguration
import org.locationtech.geomesa.fs.tools.compact.FileSystemCompactionJob.CompactionMapper
import org.locationtech.geomesa.fs.tools.ingest.StorageJobUtils
import org.locationtech.geomesa.jobs.mapreduce.{GeoMesaOutputFormat, JobWithLibJars}
import org.locationtech.geomesa.parquet.jobs.ParquetStorageConfiguration
import org.locationtech.geomesa.tools.Command
import org.locationtech.geomesa.tools.utils.StatusCallback
import org.locationtech.geomesa.utils.text.TextTools
import org.opengis.feature.simple.SimpleFeature
trait FileSystemCompactionJob extends StorageConfiguration with JobWithLibJars {
def run(
storage: FileSystemStorage,
partitions: Seq[PartitionMetadata],
tempPath: Option[Path],
libjarsFiles: Seq[String],
libjarsPaths: Iterator[() => Seq[File]],
statusCallback: StatusCallback): (Long, Long) = {
val job = Job.getInstance(new Configuration(storage.context.conf), "GeoMesa Storage Compaction")
setLibJars(job, libjarsFiles, libjarsPaths)
job.setJarByClass(this.getClass)
// InputFormat and Mappers
job.setInputFormatClass(classOf[PartitionInputFormat])
job.setMapperClass(classOf[CompactionMapper])
// No reducers - Mapper will read/write its own things
job.setNumReduceTasks(0)
job.setMapOutputKeyClass(classOf[Void])
job.setMapOutputValueClass(classOf[SimpleFeature])
job.setOutputKeyClass(classOf[Void])
job.setOutputValueClass(classOf[SimpleFeature])
val qualifiedTempPath = tempPath.map(storage.context.fc.makeQualified)
StorageConfiguration.setRootPath(job.getConfiguration, storage.context.root)
StorageConfiguration.setPartitions(job.getConfiguration, partitions.map(_.name).toArray)
StorageConfiguration.setFileType(job.getConfiguration, FileType.Compacted)
FileOutputFormat.setOutputPath(job, qualifiedTempPath.getOrElse(storage.context.root))
// MapReduce options
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
job.getConfiguration.set("mapreduce.job.user.classpath.first", "true")
configureOutput(storage.metadata.sft, job)
// save the existing files so we can delete them afterwards
val existingDataFiles = partitions.map(p => (p, storage.getFilePaths(p.name).toList)).toList
Command.user.info("Submitting job - please wait...")
job.submit()
Command.user.info(s"Tracking available at ${job.getStatus.getTrackingUrl}")
def mapCounters = Seq(("mapped", written(job)), ("failed", failed(job)))
while (!job.isComplete) {
Thread.sleep(1000)
if (job.getStatus.getState != JobStatus.State.PREP) {
val mapProgress = job.mapProgress()
if (mapProgress < 1f) {
statusCallback("Map: ", mapProgress, mapCounters, done = false)
}
}
}
statusCallback("Map: ", job.mapProgress(), mapCounters, done = true)
statusCallback.reset()
val counterResult = (written(job), failed(job))
if (!job.isSuccessful) {
Command.user.error(s"Job failed with state ${job.getStatus.getState} due to: ${job.getStatus.getFailureInfo}")
} else {
val copied = qualifiedTempPath.forall { tp =>
StorageJobUtils.distCopy(tp, storage.context.root, statusCallback)
}
if (copied) {
Command.user.info("Removing old files")
existingDataFiles.foreach { case (partition, files) =>
files.foreach(f => storage.context.fc.delete(f.path, false))
storage.metadata.removePartition(partition)
Command.user.info(s"Removed ${TextTools.getPlural(files.size, "file")} in partition ${partition.name}")
}
Command.user.info("Compacting metadata")
storage.metadata.compact(None, 4)
}
}
counterResult
}
private def written(job: Job): Long =
job.getCounters.findCounter(GeoMesaOutputFormat.Counters.Group, GeoMesaOutputFormat.Counters.Written).getValue
private def failed(job: Job): Long =
job.getCounters.findCounter(GeoMesaOutputFormat.Counters.Group, GeoMesaOutputFormat.Counters.Failed).getValue
}
object FileSystemCompactionJob {
class ParquetCompactionJob extends FileSystemCompactionJob with ParquetStorageConfiguration
class OrcCompactionJob extends FileSystemCompactionJob with OrcStorageConfiguration
/**
* Mapper that simply reads the input format and writes the output to the sample node. This mapper
* is paired with the PartitionRecordReader which will feed all the features into a single map task
*/
class CompactionMapper extends Mapper[Void, SimpleFeature, Void, SimpleFeature] with LazyLogging {
type Context = Mapper[Void, SimpleFeature, Void, SimpleFeature]#Context
private var written: Counter = _
private var mapped: Counter = _
override def setup(context: Context): Unit = {
super.setup(context)
written = context.getCounter(GeoMesaOutputFormat.Counters.Group, GeoMesaOutputFormat.Counters.Written)
mapped = context.getCounter("org.locationtech.geomesa.fs.compaction", "mapped")
}
override def map(key: Void, sf: SimpleFeature, context: Context): Unit = {
sf.getUserData.put(Hints.USE_PROVIDED_FID, java.lang.Boolean.TRUE)
mapped.increment(1)
context.write(null, sf)
written.increment(1)
}
}
} | elahrvivaz/geomesa | geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/compact/FileSystemCompactionJob.scala | Scala | apache-2.0 | 6,458 |
package com.socrata.soql.typed
trait Typable[Type] {
def typ: Type
}
| socrata-platform/soql-reference | soql-analyzer/src/main/scala/com/socrata/soql/typed/Typable.scala | Scala | apache-2.0 | 73 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.lang.{Double => JDouble, Float => JFloat, Long => JLong}
import java.math.{BigDecimal => JBigDecimal}
import scala.collection.mutable.ArrayBuffer
import scala.util.Try
import org.apache.hadoop.fs.Path
import org.apache.hadoop.util.Shell
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.expressions.{Cast, Literal}
import org.apache.spark.sql.types._
private[sql] case class Partition(values: Row, path: String)
private[sql] case class PartitionSpec(partitionColumns: StructType, partitions: Seq[Partition])
private[sql] object PartitionSpec {
val emptySpec = PartitionSpec(StructType(Seq.empty[StructField]), Seq.empty[Partition])
}
private[sql] object PartitioningUtils {
// This duplicates default value of Hive `ConfVars.DEFAULTPARTITIONNAME`, since sql/core doesn't
// depend on Hive.
private[sql] val DEFAULT_PARTITION_NAME = "__HIVE_DEFAULT_PARTITION__"
private[sql] case class PartitionValues(columnNames: Seq[String], literals: Seq[Literal]) {
require(columnNames.size == literals.size)
}
/**
* Given a group of qualified paths, tries to parse them and returns a partition specification.
* For example, given:
* {{{
* hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14
* hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28
* }}}
* it returns:
* {{{
* PartitionSpec(
* partitionColumns = StructType(
* StructField(name = "a", dataType = IntegerType, nullable = true),
* StructField(name = "b", dataType = StringType, nullable = true),
* StructField(name = "c", dataType = DoubleType, nullable = true)),
* partitions = Seq(
* Partition(
* values = Row(1, "hello", 3.14),
* path = "hdfs://<host>:<port>/path/to/partition/a=1/b=hello/c=3.14"),
* Partition(
* values = Row(2, "world", 6.28),
* path = "hdfs://<host>:<port>/path/to/partition/a=2/b=world/c=6.28")))
* }}}
*/
private[sql] def parsePartitions(
paths: Seq[Path],
defaultPartitionName: String): PartitionSpec = {
// First, we need to parse every partition's path and see if we can find partition values.
val pathsWithPartitionValues = paths.flatMap { path =>
parsePartition(path, defaultPartitionName).map(path -> _)
}
if (pathsWithPartitionValues.isEmpty) {
// This dataset is not partitioned.
PartitionSpec.emptySpec
} else {
// This dataset is partitioned. We need to check whether all partitions have the same
// partition columns and resolve potential type conflicts.
val resolvedPartitionValues = resolvePartitions(pathsWithPartitionValues.map(_._2))
// Creates the StructType which represents the partition columns.
val fields = {
val PartitionValues(columnNames, literals) = resolvedPartitionValues.head
columnNames.zip(literals).map { case (name, Literal(_, dataType)) =>
// We always assume partition columns are nullable since we've no idea whether null values
// will be appended in the future.
StructField(name, dataType, nullable = true)
}
}
// Finally, we create `Partition`s based on paths and resolved partition values.
val partitions = resolvedPartitionValues.zip(pathsWithPartitionValues).map {
case (PartitionValues(_, literals), (path, _)) =>
Partition(Row.fromSeq(literals.map(_.value)), path.toString)
}
PartitionSpec(StructType(fields), partitions)
}
}
/**
* Parses a single partition, returns column names and values of each partition column. For
* example, given:
* {{{
* path = hdfs://<host>:<port>/path/to/partition/a=42/b=hello/c=3.14
* }}}
* it returns:
* {{{
* PartitionValues(
* Seq("a", "b", "c"),
* Seq(
* Literal.create(42, IntegerType),
* Literal.create("hello", StringType),
* Literal.create(3.14, FloatType)))
* }}}
*/
private[sql] def parsePartition(
path: Path,
defaultPartitionName: String): Option[PartitionValues] = {
val columns = ArrayBuffer.empty[(String, Literal)]
// Old Hadoop versions don't have `Path.isRoot`
var finished = path.getParent == null
var chopped = path
while (!finished) {
// Sometimes (e.g., when speculative task is enabled), temporary directories may be left
// uncleaned. Here we simply ignore them.
if (chopped.getName.toLowerCase == "_temporary") {
return None
}
val maybeColumn = parsePartitionColumn(chopped.getName, defaultPartitionName)
maybeColumn.foreach(columns += _)
chopped = chopped.getParent
finished = maybeColumn.isEmpty || chopped.getParent == null
}
if (columns.isEmpty) {
None
} else {
val (columnNames, values) = columns.reverse.unzip
Some(PartitionValues(columnNames, values))
}
}
private def parsePartitionColumn(
columnSpec: String,
defaultPartitionName: String): Option[(String, Literal)] = {
val equalSignIndex = columnSpec.indexOf('=')
if (equalSignIndex == -1) {
None
} else {
val columnName = columnSpec.take(equalSignIndex)
assert(columnName.nonEmpty, s"Empty partition column name in '$columnSpec'")
val rawColumnValue = columnSpec.drop(equalSignIndex + 1)
assert(rawColumnValue.nonEmpty, s"Empty partition column value in '$columnSpec'")
val literal = inferPartitionColumnValue(rawColumnValue, defaultPartitionName)
Some(columnName -> literal)
}
}
/**
* Resolves possible type conflicts between partitions by up-casting "lower" types. The up-
* casting order is:
* {{{
* NullType ->
* IntegerType -> LongType ->
* FloatType -> DoubleType -> DecimalType.Unlimited ->
* StringType
* }}}
*/
private[sql] def resolvePartitions(values: Seq[PartitionValues]): Seq[PartitionValues] = {
// Column names of all partitions must match
val distinctPartitionsColNames = values.map(_.columnNames).distinct
if (distinctPartitionsColNames.isEmpty) {
Seq.empty
} else {
assert(distinctPartitionsColNames.size == 1, {
val list = distinctPartitionsColNames.mkString("\\t", "\\n\\t", "")
s"Conflicting partition column names detected:\\n$list"
})
// Resolves possible type conflicts for each column
val columnCount = values.head.columnNames.size
val resolvedValues = (0 until columnCount).map { i =>
resolveTypeConflicts(values.map(_.literals(i)))
}
// Fills resolved literals back to each partition
values.zipWithIndex.map { case (d, index) =>
d.copy(literals = resolvedValues.map(_(index)))
}
}
}
/**
* Converts a string to a `Literal` with automatic type inference. Currently only supports
* [[IntegerType]], [[LongType]], [[FloatType]], [[DoubleType]], [[DecimalType.Unlimited]], and
* [[StringType]].
*/
private[sql] def inferPartitionColumnValue(
raw: String,
defaultPartitionName: String): Literal = {
// First tries integral types
Try(Literal.create(Integer.parseInt(raw), IntegerType))
.orElse(Try(Literal.create(JLong.parseLong(raw), LongType)))
// Then falls back to fractional types
.orElse(Try(Literal.create(JFloat.parseFloat(raw), FloatType)))
.orElse(Try(Literal.create(JDouble.parseDouble(raw), DoubleType)))
.orElse(Try(Literal.create(new JBigDecimal(raw), DecimalType.Unlimited)))
// Then falls back to string
.getOrElse {
if (raw == defaultPartitionName) Literal.create(null, NullType)
else Literal.create(unescapePathName(raw), StringType)
}
}
private val upCastingOrder: Seq[DataType] =
Seq(NullType, IntegerType, LongType, FloatType, DoubleType, DecimalType.Unlimited, StringType)
/**
* Given a collection of [[Literal]]s, resolves possible type conflicts by up-casting "lower"
* types.
*/
private def resolveTypeConflicts(literals: Seq[Literal]): Seq[Literal] = {
val desiredType = {
val topType = literals.map(_.dataType).maxBy(upCastingOrder.indexOf(_))
// Falls back to string if all values of this column are null or empty string
if (topType == NullType) StringType else topType
}
literals.map { case l @ Literal(_, dataType) =>
Literal.create(Cast(l, desiredType).eval(), desiredType)
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////
// The following string escaping code is mainly copied from Hive (o.a.h.h.common.FileUtils).
//////////////////////////////////////////////////////////////////////////////////////////////////
val charToEscape = {
val bitSet = new java.util.BitSet(128)
/**
* ASCII 01-1F are HTTP control characters that need to be escaped.
* \\u000A and \\u000D are \\n and \\r, respectively.
*/
val clist = Array(
'\\u0001', '\\u0002', '\\u0003', '\\u0004', '\\u0005', '\\u0006', '\\u0007', '\\u0008', '\\u0009',
'\\n', '\\u000B', '\\u000C', '\\r', '\\u000E', '\\u000F', '\\u0010', '\\u0011', '\\u0012', '\\u0013',
'\\u0014', '\\u0015', '\\u0016', '\\u0017', '\\u0018', '\\u0019', '\\u001A', '\\u001B', '\\u001C',
'\\u001D', '\\u001E', '\\u001F', '"', '#', '%', '\\'', '*', '/', ':', '=', '?', '\\\\', '\\u007F',
'{', '[', ']', '^')
clist.foreach(bitSet.set(_))
if (Shell.WINDOWS) {
Array(' ', '<', '>', '|').foreach(bitSet.set(_))
}
bitSet
}
def needsEscaping(c: Char): Boolean = {
c >= 0 && c < charToEscape.size() && charToEscape.get(c)
}
def escapePathName(path: String): String = {
val builder = new StringBuilder()
path.foreach { c =>
if (needsEscaping(c)) {
builder.append('%')
builder.append(f"${c.asInstanceOf[Int]}%02x")
} else {
builder.append(c)
}
}
builder.toString()
}
def unescapePathName(path: String): String = {
val sb = new StringBuilder
var i = 0
while (i < path.length) {
val c = path.charAt(i)
if (c == '%' && i + 2 < path.length) {
val code: Int = try {
Integer.valueOf(path.substring(i + 1, i + 3), 16)
} catch { case e: Exception =>
-1: Integer
}
if (code >= 0) {
sb.append(code.asInstanceOf[Char])
i += 3
} else {
sb.append(c)
i += 1
}
} else {
sb.append(c)
i += 1
}
}
sb.toString()
}
}
| andrewor14/iolap | sql/core/src/main/scala/org/apache/spark/sql/sources/PartitioningUtils.scala | Scala | apache-2.0 | 11,416 |
package buck
object MainMixed extends App {
val argString = args map { _.toUpperCase } mkString ","
new Class2(argString).sayHello()
}
| LegNeato/buck | test/com/facebook/buck/jvm/scala/testdata/scala_binary/MainMixed.scala | Scala | apache-2.0 | 140 |
/*
* Demo of using by name implicits to resolve (hidden) divergence issues when
* traversing recursive generic structures.
*
* See https://stackoverflow.com/questions/25923974
*/
sealed trait HList
object HList {
implicit class Syntax[L <: HList](l: L) {
def ::[U](u: U): U :: L = new ::(u, l)
}
}
sealed trait HNil extends HList
object HNil extends HNil
case class ::[+H, +T <: HList](head : H, tail : T) extends HList
trait Generic[T] {
type Repr
def to(t: T): Repr
def from(r: Repr): T
}
object Generic {
type Aux[T, Repr0] = Generic[T] { type Repr = Repr0 }
}
trait DeepHLister[R] {
type Out
def apply(r: R): Out
}
object DeepHLister extends DeepHLister0 {
def apply[R](implicit dh: DeepHLister[R]): Aux[R, dh.Out] = dh
implicit def consDeepHLister[H, OutH <: HList, T <: HList, OutT <: HList](implicit
dhh: DeepHLister.Aux[H, OutH],
dht: DeepHLister.Aux[T, OutT]
): Aux[H :: T, OutH :: OutT] = new DeepHLister[H :: T] {
type Out = OutH :: OutT
def apply(r: H :: T) = dhh(r.head) :: dht(r.tail)
}
implicit object hnilDeepHLister extends DeepHLister[HNil] {
type Out = HNil
def apply(r: HNil) = HNil
}
}
trait DeepHLister0 extends DeepHLister1 {
implicit def genDeepHLister[T, R <: HList, OutR <: HList](implicit
gen: Generic.Aux[T, R],
dhr: => DeepHLister.Aux[R, OutR]
): Aux[T, OutR] = new DeepHLister[T] {
type Out = OutR
def apply(r: T) = dhr(gen.to(r))
}
}
trait DeepHLister1 {
type Aux[R, Out0] = DeepHLister[R] { type Out = Out0 }
implicit def default[T]: Aux[T, T] = new DeepHLister[T] {
type Out = T
def apply(r: T): T = r
}
}
object Test extends App {
}
object DeepHListerDemo extends App {
case class A(x: Int, y: String)
object A {
type ARepr = Int :: String :: HNil
implicit val aGen: Generic.Aux[A, ARepr] = new Generic[A] {
type Repr = ARepr
def to(t: A): Repr = t.x :: t.y :: HNil
def from(r: Repr): A = A(r.head, r.tail.head)
}
}
case class B(x: A, y: A)
object B {
type BRepr = A :: A :: HNil
implicit val bGen: Generic.Aux[B, BRepr] = new Generic[B] {
type Repr = BRepr
def to(t: B): Repr = t.x :: t.y :: HNil
def from(r: Repr): B = B(r.head, r.tail.head)
}
}
case class C(b: B, a: A)
object C {
type CRepr = B :: A :: HNil
implicit val cGen: Generic.Aux[C, CRepr] = new Generic[C] {
type Repr = CRepr
def to(t: C): Repr = t.b :: t.a :: HNil
def from(r: Repr): C = C(r.head, r.tail.head)
}
}
case class D(a: A, b: B)
object D {
type DRepr = A :: B :: HNil
implicit val dGen: Generic.Aux[D, DRepr] = new Generic[D] {
type Repr = DRepr
def to(t: D): Repr = t.a :: t.b :: HNil
def from(r: Repr): D = D(r.head, r.tail.head)
}
}
def typed[T](t : => T): Unit = {}
type ARepr = Int :: String :: HNil
type BRepr = ARepr :: ARepr :: HNil
type CRepr = BRepr :: ARepr :: HNil
type DRepr = ARepr :: BRepr :: HNil
val adhl = DeepHLister[A :: HNil]
typed[DeepHLister.Aux[A :: HNil, ARepr :: HNil]](adhl)
val bdhl = DeepHLister[B :: HNil]
typed[DeepHLister.Aux[B :: HNil, BRepr :: HNil]](bdhl)
val cdhl = DeepHLister[C :: HNil]
typed[DeepHLister.Aux[C :: HNil, CRepr :: HNil]](cdhl)
val ddhl = DeepHLister[D :: HNil]
typed[DeepHLister.Aux[D :: HNil, DRepr :: HNil]](ddhl)
}
| lrytz/scala | test/files/run/byname-implicits-6.scala | Scala | apache-2.0 | 3,369 |
package scalaSci.util
/*::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::*/
/**
* This trait is used to report errors showing the class and method within
* which the error or flaw occured.
*/
trait Error {
/** Name of the class where the error occured
*/
private val className = getClass.getSimpleName()
/*::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::*/
/**
* Show the flaw by printing the error message.
* @param method the method where the error occured
* @param message the error message
*/
def flaw(method: String, message: String) {
println("ERROR @ " + className + "." + method + ": " + message)
} // flaw
}
// Error trait
| scalalab/scalalab | source/src/main/scala/scalaSci/util/Error.scala | Scala | mit | 728 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.executionplan.builders
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.commands._
import org.neo4j.cypher.internal.compiler.v2_3.commands.expressions._
import org.neo4j.cypher.internal.compiler.v2_3.commands.predicates.Equals
import org.neo4j.cypher.internal.compiler.v2_3.commands.values.TokenType.PropertyKey
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.PartiallySolvedQuery
import org.neo4j.cypher.internal.compiler.v2_3.pipes.NodeStartPipe
import org.neo4j.cypher.internal.compiler.v2_3.spi.SchemaTypes.IndexDescriptor
import org.neo4j.cypher.internal.compiler.v2_3.spi.PlanContext
import org.neo4j.cypher.internal.frontend.v2_3.helpers.NonEmptyList
import org.neo4j.cypher.internal.frontend.v2_3.{ExclusiveBound, InclusiveBound, IndexHintException}
class StartPointBuilderTest extends BuilderTest {
context = mock[PlanContext]
val builder = new StartPointBuilder()
test("says_yes_to_node_by_id_queries") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(NodeByIndexQuery("s", "idx", Literal("foo")))))
assertAccepts(q)
}
test("plans index seek by prefix") {
val range = PrefixSeekRangeExpression(PrefixRange(Literal("prefix")))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, AnyIndex, Some(RangeQueryExpression(range))))))
when(context.getIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans unique index seek by prefix") {
val range = PrefixSeekRangeExpression(PrefixRange(Literal("prefix")))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, UniqueIndex, Some(RangeQueryExpression(range))))))
when(context.getUniqueIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans index seek for textual range query") {
val range = InequalitySeekRangeExpression(RangeLessThan(NonEmptyList(InclusiveBound(Literal("xxx")))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, AnyIndex, Some(RangeQueryExpression(range))))))
when(context.getIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans index seek for textual range query with several ranges") {
val range = InequalitySeekRangeExpression(RangeBetween(RangeGreaterThan(NonEmptyList(InclusiveBound(Literal("xxx")), ExclusiveBound(Literal("yyy")))),
RangeLessThan(NonEmptyList(ExclusiveBound(Literal("@@@"))))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, AnyIndex, Some(RangeQueryExpression(range))))))
when(context.getIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans index seek for numerical range query") {
val range = InequalitySeekRangeExpression(RangeGreaterThan(NonEmptyList(InclusiveBound(Literal(42)))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, UniqueIndex, Some(RangeQueryExpression(range))))))
when(context.getUniqueIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans index seek for numerical range query with several ranges") {
val range = InequalitySeekRangeExpression(RangeBetween(RangeGreaterThan(NonEmptyList(InclusiveBound(Literal(Double.NaN)), ExclusiveBound(Literal(25.5)))),
RangeLessThan(NonEmptyList(ExclusiveBound(Literal(Double.NegativeInfinity))))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, UniqueIndex, Some(RangeQueryExpression(range))))))
when(context.getUniqueIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans unique index seek for textual range query") {
val range = InequalitySeekRangeExpression(RangeLessThan(NonEmptyList(InclusiveBound(Literal("xxx")))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, UniqueIndex, Some(RangeQueryExpression(range))))))
when(context.getUniqueIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("plans unique index seek for numerical range query") {
val range = InequalitySeekRangeExpression(RangeGreaterThan(NonEmptyList(InclusiveBound(Literal(42)))))
val labelName = "Label"
val propertyName = "prop"
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(SchemaIndex("n", labelName, propertyName, UniqueIndex, Some(RangeQueryExpression(range))))))
when(context.getUniqueIndexRule(labelName, propertyName)).thenReturn(Some(IndexDescriptor(123,456)))
assertAccepts(q)
}
test("only_takes_one_start_item_at_the_time") {
val q = PartiallySolvedQuery().
copy(start = Seq(
Unsolved(NodeByIndexQuery("s", "idx", Literal("foo"))),
Unsolved(NodeByIndexQuery("x", "idx", Literal("foo")))))
val remaining = assertAccepts(q).query
remaining.start.filter(_.solved) should have length 1
remaining.start.filterNot(_.solved) should have length 1
}
test("fixes_node_by_id_and_keeps_the_rest_around") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(NodeByIndexQuery("s", "idx", Literal("foo"))), Unsolved(RelationshipById("x", 1))))
val result = assertAccepts(q).query
val expected = Set(Solved(NodeByIndexQuery("s", "idx", Literal("foo"))), Unsolved(RelationshipById("x", 1)))
result.start.toSet should equal(expected)
}
test("says_no_to_already_solved_node_by_id_queries") {
val q = PartiallySolvedQuery().
copy(start = Seq(Solved(NodeByIndexQuery("s", "idx", Literal("foo")))))
assertRejects(q)
}
test("builds_a_nice_start_pipe") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(NodeByIndexQuery("s", "idx", Literal("foo")))))
val remainingQ = assertAccepts(q).query
remainingQ.start should equal(Seq(Solved(NodeByIndexQuery("s", "idx", Literal("foo")))))
}
test("does_not_offer_to_solve_empty_queries") {
//GIVEN WHEN THEN
assertRejects(PartiallySolvedQuery())
}
test("offers_to_solve_query_with_index_hints") {
val propertyKey= PropertyKey("name")
val labelName: String = "Person"
//GIVEN
val q = PartiallySolvedQuery().copy(
where = Seq(Unsolved(Equals(Property(Identifier("n"), propertyKey), Literal("Stefan")))),
start = Seq(Unsolved(SchemaIndex("n", labelName, propertyKey.name, AnyIndex, Some(SingleQueryExpression(Literal("a")))))))
when(context.getIndexRule(labelName, propertyKey.name)).thenReturn(Some(IndexDescriptor(123,456)))
//THEN
val producedPlan = assertAccepts(q)
producedPlan.pipe shouldBe a [NodeStartPipe]
}
test("throws_exception_if_no_index_is_found") {
//GIVEN
val propertyKey= PropertyKey("name")
val q = PartiallySolvedQuery().copy(
where = Seq(Unsolved(Equals(Property(Identifier("n"), propertyKey), Literal("Stefan")))),
start = Seq(Unsolved(SchemaIndex("n", "Person", "name", AnyIndex, None))))
when(context.getIndexRule(any(), any())).thenReturn(None)
//THEN
intercept[IndexHintException](assertAccepts(q))
}
test("says_yes_to_global_queries") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(AllNodes("s"))))
assertAccepts(q)
}
test("says_yes_to_global_rel_queries") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(AllRelationships("s"))))
assertAccepts(q)
}
test("only_takes_one_global_start_item_at_the_time") {
val q = PartiallySolvedQuery().
copy(start = Seq(Unsolved(AllNodes("s")), Unsolved(AllNodes("x"))))
val remaining = assertAccepts(q).query
remaining.start.filter(_.solved) should have length 1
remaining.start.filterNot(_.solved) should have length 1
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/executionplan/builders/StartPointBuilderTest.scala | Scala | apache-2.0 | 9,584 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala
import junit.framework.TestCase
import junit.framework.TestCase.assertEquals
import org.apache.camel.model._
/**
* Test using the low level processor definition API from Scala.
*
* In previous versions of the use of generics in the model package we would get compile errors in Scala
* when trying to do things like this
*/
class UsingModelTest extends TestCase {
def testUsingModel() {
val routes = new RoutesDefinition
val route = routes.route
route.from("seda:foo")
val bean = new BeanDefinition("myBean", "someMethod")
route.addOutput(bean)
assertEquals("Route[[From[seda:foo]] -> [Bean[ref:myBean method:someMethod]]]", route.toString)
}
}
| CandleCandle/camel | components/camel-scala/src/test/scala/org/apache/camel/scala/UsingModelTest.scala | Scala | apache-2.0 | 1,518 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.sparql
import com.signalcollect.triplerush.TriplePattern
object VariableEncoding {
@inline def variableIdToDecodingIndex(variableId: Int): Int = -(variableId + 1)
@inline def requiredVariableBindingsSlots(query: Seq[TriplePattern]): Int = {
var minId = 0
query.foreach {
tp =>
minId = math.min(minId, tp.s)
minId = math.min(minId, tp.p)
minId = math.min(minId, tp.o)
}
-minId
}
}
| uzh/triplerush | src/main/scala/com/signalcollect/triplerush/sparql/VariableEncoding.scala | Scala | apache-2.0 | 1,123 |
package es.um.nosql.streaminginference.benchmark
import scala.collection.immutable.HashMap
import org.mongodb.scala.bson.collection.immutable.Document
class Aggregate(fields:Int, version:Int, depth:Int)
{
private lazy val primitives:HashMap[String, String] =
{
var prim = HashMap[String,String]()
for (i <- 1 to fields)
prim += "field-" + i -> "test"
prim
}
private lazy val aggregate:Option[(String, Aggregate)] =
{
if (depth > 0)
{
val agg = new Aggregate(fields=fields, version = 0, depth = depth-1)
Some("entity-0", agg)
}
else
None
}
def toDocument():Document = {
val document: Document = Document("_id" -> 1, "name" -> "test")
document
}
def toJsonStr(depth: Int):String = {
val indent = 6+(2*depth)
var str:String = primitives.map
{
case (name, value) => " "*indent+"\\""+name+"\\": \\"" + value + "\\""
}
.mkString(",\\n")
if (aggregate.isDefined)
{
val agg = aggregate.get
str += ",\\n"+
" "*indent +
"\\"" +
agg._1 +
"\\":\\n"+
" "*indent +
"{\\n" +
agg._2.toJsonStr(depth+1) +
"\\n" +
" "*indent +
"}"
}
if (version > 0)
{
if (!str.isEmpty()) str += ",\\n"
str += " "*indent + "\\"version-"+version+"\\": true"
}
str
}
} | catedrasaes-umu/NoSQLDataEngineering | projects/es.um.nosql.streaminginference.benchmark/src/es/um/nosql/streaminginference/benchmark/Aggregate.scala | Scala | mit | 1,436 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.sinks
import org.apache.flink.streaming.api.datastream.{DataStream, DataStreamSink}
import org.apache.flink.table.api.Table
/**
* Defines an external [[TableSink]] to emit a streaming [[Table]] with insert, update, and delete
* changes.
*
* @tparam T Type of records that this [[TableSink]] expects and supports.
*/
trait BaseRetractStreamTableSink[T] extends StreamTableSink[T] {
/** Emits the DataStream. */
def emitDataStream(dataStream: DataStream[T]): DataStreamSink[_]
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/sinks/BaseRetractStreamTableSink.scala | Scala | apache-2.0 | 1,332 |
package bad.robot.temperature
import bad.robot.temperature.task.FixedTimeMeasurement
import scalaz.\/
trait TemperatureWriter {
def write(measurement: Measurement): Error \/ Unit
}
trait FixedTimeMeasurementWriter {
def write(measurement: FixedTimeMeasurement): Error \/ List[SensorReading]
}
| tobyweston/temperature-machine | src/main/scala/bad/robot/temperature/TemperatureWriter.scala | Scala | apache-2.0 | 301 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package patterns
import com.intellij.psi.PsiElement
import com.intellij.psi.tree.TokenSet
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScGuard}
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
trait ScCaseClause extends ScalaPsiElement {
def pattern: Option[ScPattern] = findChild(classOf[ScPattern])
def expr: Option[ScExpression] = findChild(classOf[ScExpression])
def guard: Option[ScGuard] = findChild(classOf[ScGuard])
def funType: Option[PsiElement] = {
val result = getNode.getChildren(TokenSet.create(ScalaTokenTypes.tFUNTYPE,
ScalaTokenTypes.tFUNTYPE_ASCII))
if (result.length != 1) None
else Some(result(0).getPsi)
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitCaseClause(this)
}
}
object ScCaseClause {
def unapply(e: ScCaseClause): Option[(Option[ScPattern], Option[ScGuard], Option[ScExpression])] =
Option(e).map(e => (e.pattern, e.guard, e.expr))
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/base/patterns/ScCaseClause.scala | Scala | apache-2.0 | 1,113 |
/*
* Distributed as part of Scalala, a linear algebra library.
*
* Copyright (C) 2008- Daniel Ramage
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA
*/
package scalala;
package tensor;
import domain._;
import generic._;
import scalala.operators._;
import scalala.scalar.{Scalar,ScalarDecimal};
import scalala.generic.collection._;
import scalala.generic.math.{CanMean,CanVariance,CanSqrt};
/**
* A Tensor is a map from keys K (with a domain) to numeric scalar values V.
* More specific operations are available on tensors indexed by a single key
* (Tensor1, Vector) or pair of keys (Tensor2, Matrix).
*
* Note that this trait does not support for comprehensions, although its
* two sub-traits: TensorSeqLike and TensorMapLike do. The difference is
* the way in which the collection is viewed. TensorSeqLike views the
* collection as one containing values. TensorMapLike views the collection
* as one containing key value pairs.
*
* @author dramage
*/
trait TensorLike
[@specialized(Int, Long) K,
@specialized(Int, Long, Float, Double, Boolean) V,
+D<:IterableDomain[K],
+This<:Tensor[K,V]]
extends DomainFunction[K, V, D]
with operators.NumericOps[This] with HasValuesMonadic[This,V] {
self =>
type Domain = D;
protected type Self = This;
/** Returns a pointer to this cast as an instance of This. */
def repr : This = this.asInstanceOf[This];
/** Provides information about the underlying scalar value type V. */
implicit val scalar : Scalar[V];
/**
* Returns a builder for constructing new instances like this one,
* on the given domain.
*/
def newBuilder[NK,NV](domain : IterableDomain[NK])(implicit scalar : Scalar[NV])
: TensorBuilder[NK,NV,Tensor[NK,NV]] = domain match {
case that : IndexDomain =>
mutable.Vector[NV](that)(scalar).asBuilder;
case that : Domain1[_] =>
mutable.Tensor1[NK,NV](that.asInstanceOf[Domain1[NK]])(scalar).asBuilder;
case that : TableDomain =>
mutable.Matrix[NV](that)(scalar).asBuilder;
case that : Domain2[_,_] =>
mutable.Tensor2[Any,Any,NV](that.asInstanceOf[Domain2[Any,Any]])(scalar).asBuilder.asInstanceOf[TensorBuilder[NK,NV,Tensor[NK,NV]]];
// TODO: add this in when we have a mutable.TensorN
// case that : ProductNDomain[_] =>
// mutable.TensorN(that)(implicitly[Scalar[NV]]).asBuilder;
case _ =>
mutable.Tensor[NK,NV](domain).asBuilder;
}
//
// Collection contents
//
/** The size of this collection. */
def size : Int;
/** An upper bound on the number of non zero values in this collection. */
def nonzeroSize =
size;
/**
* Returns the pairs of (K,V) that make up this tensor for use in
* for comprehensions. The returned object can be viewed as a
* Map[K,V].
*/
def pairs : TensorPairsMonadic[K,V,This] =
new TensorPairsMonadic[K,V,This] { override def repr = self.repr; }
/**
* Returns the keys that make up this tensor for use in
* for comprehensions. The returned object can be viewed as an
* Iterable[K].
*/
def keys : TensorKeysMonadic[K,V,This] =
new TensorKeysMonadic[K,V,This] { override def repr = self.repr; }
/**
* Returns the values that make up this tensor for use in
* for comprehensions. The returned object can be viewed as an
* Iterable[V].
*/
def values : TensorValuesMonadic[K,V,This] =
new TensorValuesMonadic[K,V,This] { override def repr = self.repr; }
/**
* Returns a monadic nonzero elements of this tensor. Then call one of
* .pairs .keys or .values for use in for comprehensions.
*/
def nonzero : TensorNonZeroMonadic[K,V,This] =
new TensorNonZeroMonadic[K,V,This] { override def repr = self.repr; }
/**
* Applies the given function to each key and its corresponding value.
*/
def foreachPair[U](fn: (K,V) => U) : Unit =
foreachKey[U](k => fn(k,apply(k)));
/**
* Applies the given function to each key and its corresponding value
* if the value is non-zero (and possibly also some that are zeros).
*
* @return true if all elements in the tensor were visited.
*/
def foreachNonZeroPair[U](fn : ((K,V)=>U)) : Boolean = {
this.foreachPair[U](fn);
true;
}
/** Applies the given function to each key in the tensor. */
def foreachKey[U](fn: K => U) : Unit =
domain.foreach[U](fn);
/**
* Applies the given function to each key if its corresponding value
* is non-zero (and possibly some zero-valued keys as well).
*
* @return true if all keys in the tensor were visisted.
*/
def foreachNonZeroKey[U](fn : K => U) : Boolean = {
this.foreachKey[U](fn);
true;
}
/**
* Applies the given function to each value in the map (one for
* each element of the domain, including zeros).
*/
def foreachValue[U](fn : (V=>U)) =
foreachKey[U](k => fn(apply(k)));
/**
* Applies the given function to every non-zero value in the map
* (and possibly some zeros, too).
*
* @return true if all elements in the map were visited.
*/
def foreachNonZeroValue[U](fn : (V=>U)) = {
this.foreachValue[U](fn);
true;
}
/** Returns true if and only if the given predicate is true for all elements. */
def forallPairs(fn : (K,V) => Boolean) : Boolean = {
foreachPair((k,v) => if (!fn(k,v)) return false);
return true;
}
/** Returns true if and only if the given predicate is true for all elements. */
def forallValues(fn : V => Boolean) : Boolean = {
foreachValue(v => if (!fn(v)) return false);
return true;
}
/** Returns true if and only if the given predicate is true for all non-zero elements. */
def forallNonZeroPairs(fn : (K,V) => Boolean) : Boolean = {
foreachNonZeroPair((k,v) => if (!fn(k,v)) return false);
return true;
}
/** Returns true if and only if the given predicate is true for all non-zero elements. */
def forallNonZeroValues(fn : V => Boolean) : Boolean = {
foreachNonZeroValue(v => if (!fn(v)) return false);
return true;
}
/** Creates a new map containing a transformed copy of this map. */
def mapPairs[TT>:This,O,That](f : (K,V) => O)
(implicit bf : CanMapKeyValuePairs[TT, K, V, O, That]) : That =
bf.map(repr, f);
/** Maps all non-zero key-value pairs values. */
def mapNonZeroPairs[TT>:This,O,That](f : (K,V) => O)
(implicit bf : CanMapKeyValuePairs[TT, K, V, O, That]) : That =
bf.mapNonZero(repr.asInstanceOf[TT], f);
/** Creates a new map containing a transformed copy of this map. */
def mapValues[TT>:This,O,That](f : V => O)
(implicit bf : CanMapValues[TT, V, O, That]) : That =
bf.map(repr.asInstanceOf[TT], f);
/** Maps all non-zero values. */
def mapNonZeroValues[TT>:This,O,That](f : V => O)
(implicit bf : CanMapValues[TT, V, O, That]) : That =
bf.mapNonZero(repr.asInstanceOf[TT], f);
/** Iterates over the keys in the tensor. */
def keysIterator : Iterator[K] =
domain.iterator;
/** Iterates over the (possibly) non-zero keys in the tensor. */
def keysIteratorNonZero : Iterator[K] =
keysIterator;
/** Iterates over the values in the tensor. */
def valuesIterator : Iterator[V] =
keysIterator.map(apply);
/** Iterates over the (possibly) non-zero values in the tensor. */
def valuesIteratorNonZero : Iterator[V] =
valuesIterator;
/** Iterates over all elements in the domain and the corresponding value. */
def pairsIterator : Iterator[(K,V)] =
keysIterator.map(k => (k,this(k)));
/** Iterates over the (possibly) non-zero pairs in the tensor. */
def pairsIteratorNonZero : Iterator[(K,V)] =
pairsIterator;
/** Returns some key for which the given predicate is true. */
def find(p : V => Boolean) : Option[K] = {
foreachKey(k => if (p(apply(k))) return Some(k));
return None;
}
/** Returns the keys for which the given predicate is true. */
def findAll(p : V => Boolean) : Iterator[K] =
keysIterator.filter(this andThen p);
/**
* Constructs a view of this map on which calls to mapValues are
* chained together and lazily evaluated.
*/
def view[That](implicit bf : CanView[This,That]) : That =
bf.apply(repr);
/**
* Creates a new Tensor over the same domain using the given value
* function to create each return value in the map.
*/
def joinAll[TT>:This,V2,RV,That](tensor : Tensor[K,V2])(fn : (K,V,V2) => RV)
(implicit jj : CanJoin[TT, Tensor[K,V2], K, V, V2]) : Unit =
jj.joinAll[RV](repr.asInstanceOf[TT], tensor, (k,v1,v2) => fn(k,v1,v2));
/**
* Creates a new Tensor over the same domain using the given value
* function to create each return value in the map where keys in
* both this and m are non-zero.
*/
def joinBothNonZero[TT>:This,V2,RV,That](tensor : Tensor[K,V2])(fn : (K,V,V2) => RV)
(implicit jj : CanJoin[TT, Tensor[K,V2], K, V, V2]) : Unit =
jj.joinBothNonZero[RV](repr.asInstanceOf[TT], tensor, (k,v1,v2) => fn(k,v1,v2));
/**
* Creates a new Tensor over the same domain using the given value
* function to create each return value in the map where keys in
* either this or m are non-zero.
*/
def joinEitherNonZero[TT>:This,V2,RV,That](tensor : Tensor[K,V2])(fn : (K,V,V2) => RV)
(implicit jj : CanJoin[TT, Tensor[K,V2], K, V, V2]) : Unit =
jj.joinEitherNonZero[RV](repr.asInstanceOf[TT], tensor, (k,v1,v2) => fn(k,v1,v2));
//
// Slice construction
//
/** The value at the given key. Takes precedence over apply(keys : K*). */
def apply(key : K) : V;
/** Creates a view backed by the given keys, returning them as a sequence. */
def apply[That](keys : K*)
(implicit bf : CanSliceVector[This, K, That]) : That =
bf(repr, keys);
/** Creates a view backed by the "true" elements in selected. */
def apply[That](selected : Tensor[K,Boolean])
(implicit bf : CanSliceVector[This, K, That]) : That =
bf(repr, domain.filter(selected).toIndexedSeq);
/** Creates a view backed by the given keys, returning them as a sequence. */
def apply[That](keys : TraversableOnce[K])
(implicit bf : CanSliceVector[This, K, That]) : That =
bf(repr, keys.toIndexedSeq);
/** Creates a view for the given elements with new indexes I, backed by this map. */
def apply[I,That](keys : (I,K)*)
(implicit bf : CanSliceTensor[This, K, I, That]) : That =
apply[I,That](keys.toMap[I,K](Predef.conforms[(I, K)]));
/** Creates a view for the given elements with new indexes I, backed by this map. */
def apply[I,That](keys : TraversableOnce[(I,K)])
(implicit bf : CanSliceTensor[This, K, I, That]) : That =
apply[I,That](keys.toMap[I,K](Predef.conforms[(I, K)]));
/** Creates a view for the given elements with new indexes I, backed by this map. */
def apply[I,That](keys : scala.collection.Map[I,K])
(implicit bf : CanSliceTensor[This, K, I, That]) : That =
bf(repr, keys);
//
// Sorting
//
/**
* Returns the elements of this.domain ordered by their values in this map.
* Currently this method is not particularly efficient, as it creates several
* in-memory arrays the size of the domain.
*/
def argsort(implicit ord : Ordering[V]) : List[K] =
keys.toList.sortWith((i:K, j:K) => ord.lt(this(i), this(j)));
/**
* Returns a sorted view of the current map. Equivalent to calling
* <code>x(x.argsort)</code>. Changes to the sorted view are
* written-through to the underlying map.
*/
def sorted[That](implicit bf : CanSliceVector[This, K, That], ord : Ordering[V]) : That =
this.apply[That](this.argsort);
//
// Collection level queries
//
/** Returns a key associated with the largest value in the tensor. */
def argmax : K = {
if (!pairsIterator.hasNext) {
throw new UnsupportedOperationException("Empty .max");
}
var (arg,max) = pairsIterator.next;
foreachPair((k,v) => if (scalar.>(v, max)) { max = v; arg = k; });
arg;
}
/** Returns a key associated with the smallest value in the tensor. */
def argmin : K = {
if (!pairsIterator.hasNext) {
throw new UnsupportedOperationException("Empty .min");
}
var (arg,min) = pairsIterator.next;
foreachPair((k,v) => if (scalar.<(v,min)) { min = v; arg = k; });
arg;
}
/** Returns the max of the values in this tensor. */
def max : V = {
if (!valuesIterator.hasNext) {
throw new UnsupportedOperationException("Empty .max");
}
var max = valuesIterator.next;
if (foreachNonZeroValue(v => { max = scalar.max(max,v) })) {
return max;
} else {
return scalar.max(max, scalar.zero);
}
}
/** Returns the min of the values in this tensor. */
def min : V = {
if (!valuesIterator.hasNext) {
throw new UnsupportedOperationException("Empty .min");
}
var min = valuesIterator.next;
if (foreachNonZeroValue(v => { min = scalar.min(min,v); })) {
return min;
} else {
return scalar.min(min, scalar.zero)
}
}
/** Returns the sum of the values in this tensor. */
def sum : V = {
var sum = scalar.zero;
foreachNonZeroValue(v => sum = scalar.+(sum,v));
return sum;
}
/**
* Returns the mean of the values in this tensor.
* The returned type is a decimal version of the scalar type of this tensor.
*/
def mean[D](implicit calc : CanMean[This,D]) : D =
calc(repr);
/**
* Returns the variance of the values in this tensor.
* The returned type is a decimal version of the scalar type of this tensor.
*/
def variance[D](implicit calc : CanVariance[This,D]) : D =
calc(repr);
/**
* Returns the standard deviation of the values in this tensor.
* The returned type is a decimal version of the scalar type of this tensor.
*/
def stddev[D](implicit calc : CanVariance[This,D], sqrt : CanSqrt[D,D]) : D =
sqrt(calc(repr));
//
// Conversions
//
/** Returns an ordering over the domain based on the values in this map. */
def asOrdering(implicit ord : Ordering[V]) : Ordering[K] = new Ordering[K] {
override def compare(a : K, b : K) = ord.compare(self(a), self(b));
}
/** Returns an unmodifiable Map-like view of this Tensor. */
def asMap : scala.collection.Map[K,V] =
pairs;
/** Creates a new copy of this Tensor as a scala map. */
def toMap : Map[K,V] =
asMap.toMap;
protected[this] def buildMkValueString : V=>String = {
if (scalar == implicitly[Scalar[Double]])
(v : V) => String.format("% g".format(v.asInstanceOf[Double]));
else if (scalar == implicitly[Scalar[Float]])
(v : V) => String.format("% g".format(v.asInstanceOf[Float]));
else
(v : V) => v.toString;
}
protected[this] def buildMkKeyString : K=>String = {
(k : K) => k.toString;
}
/**
* Creates a string for the first n non-zero items using the given key string
* and value string builder.
*/
def toString(n : Int, mkKeyString : K=>String, mkValueString : V=>String) : String = {
val iter = keysIterator;
val keys = iter.take(n).toList;
if (keys.isEmpty)
return "";
val newline = System.getProperty("line.separator");
val keyWidth = keys.iterator.map(mkKeyString).map(_.length).max+1;
val rv = (for (key <- nonzero.keys.iterator) yield {
val ks = mkKeyString(key);
ks + (" " * (keyWidth-ks.length)) + mkValueString(apply(key));
}).mkString(newline);
if (iter.hasNext) {
rv + newline + "... ("+(domain.size) +" total)";
} else {
rv;
}
}
/**
* Creates a string representation of the first 10 (potentially) non-zero
* items in the tensor.
*/
override def toString : String = {
toString(10, buildMkKeyString, buildMkValueString);
}
//
// Equality
//
/**
* Default implementation iterates the full domain in order, checking
* that each function returns the same value.
*/
override def equals(other : Any) : Boolean = other match {
case that: Tensor[_,_] =>
(this eq that) ||
(that canEqual this) &&
(this.domain == that.domain) &&
({ val casted = that.asInstanceOf[Tensor[K,V]];
this.joinEitherNonZero(casted) { (k,v1,v2) => if (v1 != v2) return false; }
true; });
case _ => false;
}
/** From recipe in "Programming in Scala" section 28.4. */
protected def canEqual(other : Any) : Boolean = other match {
case that : Tensor[_,_] => true;
case _ => false;
}
override def hashCode() =
domain.hashCode + pairsIterator.foldLeft(1)((hash,v) => 41 * hash + v.hashCode);
}
/**
* K Tensor is a map from keys K (with a domain) to numeric scalar values V.
* More specific operations are available on tensors indexed by a single key
* (Tensor1, Vector) or pair of keys (Tensor2, Matrix).
*
* @author dramage
*/
trait Tensor
[@specialized(Int,Long) K, @specialized(Int,Long,Float,Double,Boolean) V]
extends TensorLike[K, V, IterableDomain[K], Tensor[K, V]];
object Tensor {
/** Constructs a tensor for the given domain. */
def apply[K,V:Scalar](domain : IterableDomain[K]) =
mutable.Tensor.apply(domain);
implicit def canView[K, V:Scalar] =
new CanView[Tensor[K,V],TensorView.IdentityView[K,V,Tensor[K,V]]] {
override def apply(from : Tensor[K,V]) =
new TensorView.IdentityViewImpl[K,V,Tensor[K,V]](from);
}
implicit def canSliceTensor[K1, K2, V:Scalar] =
new CanSliceTensor[Tensor[K1,V], K1, K2, Tensor[K2,V]] {
override def apply(from : Tensor[K1,V], keymap : scala.collection.Map[K2,K1]) =
new TensorSlice.FromKeyMap[K1, K2, V, Tensor[K1,V]](from, keymap);
}
implicit def canSliceVectorCol[K, V:Scalar] =
new CanSliceVector[Tensor[K,V], K, VectorCol[V]] {
override def apply(from : Tensor[K,V], keys : Seq[K]) =
new VectorColSlice.FromKeySeq[K,V,Tensor[K,V]](from, keys);
}
implicit def canMapValues[K, V, RV, This, D, That]
(implicit view : This=>Tensor[K,V], d : CanGetDomain[This,D],
bf : CanBuildTensorFrom[This, D, K, RV, That],
s : Scalar[RV])
: CanMapValues[This,V,RV,That]
= new CanMapValues[This,V,RV,That] {
override def map(from : This, fn : (V=>RV)) = {
val builder = bf(from, from.domain.asInstanceOf[D]);
from.foreachPair((k,v) => builder(k) = fn(v));
builder.result;
}
override def mapNonZero(from : This, fn : (V=>RV)) = {
val builder = bf(from, from.domain.asInstanceOf[D]);
from.foreachNonZeroPair((k,v) => builder(k) = fn(v));
builder.result;
}
}
implicit def canMapKeyValuePairs[K, V, RV, This, D, That]
(implicit view : This=>Tensor[K,V], d : CanGetDomain[This,D],
bf : CanBuildTensorFrom[This, D, K, RV, That],
s : Scalar[RV])
: CanMapKeyValuePairs[This,K,V,RV,That]
= new CanMapKeyValuePairs[This,K,V,RV,That] {
override def map(from : This, fn : ((K,V)=>RV)) = {
val builder = bf(from, from.domain.asInstanceOf[D]);
from.foreachPair((k,v) => builder(k) = fn(k,v));
builder.result;
}
override def mapNonZero(from : This, fn : ((K,V)=>RV)) = {
val builder = bf(from, from.domain.asInstanceOf[D]);
from.foreachNonZeroPair((k,v) => builder(k) = fn(k,v));
builder.result;
}
}
implicit def canJoin[K, V1, V2, A, B]
(implicit viewA : A=>Tensor[K,V1], viewB : B=>Tensor[K,V2])
: CanJoin[A, B, K, V1, V2] =
new CanJoin[A, B, K, V1, V2] {
override def joinAll[RV](_a : A, _b : B, fn : (K,V1,V2)=>RV) = {
val a = viewA(_a);
val b = viewB(_b);
a.checkDomain(b.domain);
val visited = scala.collection.mutable.HashSet[K]()
a.foreachPair{(k,aV) =>
fn(k,aV,b(k))
visited += k
};
b.foreachPair{ (k,bV) =>
if(!visited(k)) {
fn(k,a(k),bV)
}
}
}
override def joinEitherNonZero[RV](_a : A, _b : B, fn : (K,V1,V2)=>RV) = {
val a = viewA(_a);
val b = viewB(_b);
val aZ = a.scalar.zero;
val bZ = b.scalar.zero;
a.checkDomain(b.domain);
// keys that are visited and are zero (after function application to
// catch mutations)
var visitedZeros : scala.collection.mutable.HashSet[K] = null;
val wholeDomainVisited = a.foreachNonZeroPair((k,aV) => {
val bV = b(k);
if (aV != aZ || bV != bZ) {
fn(k, aV, bV);
}
// if value in a at k is now zero (possibly after mutation), remember
// that we visited it
if (a(k) == aZ) {
if (visitedZeros == null) {
visitedZeros = scala.collection.mutable.HashSet[K]();
}
visitedZeros += k;
}
});
if (!wholeDomainVisited || a.domain != b.domain) {
b.foreachNonZeroPair((k,bV) => {
val aV = a(k);
if (aV == aZ && bV != bZ && (visitedZeros == null || !visitedZeros.contains(k))) {
fn(k, aV, bV);
}
});
}
}
override def joinBothNonZero[RV](_a : A, _b : B, fn : (K,V1,V2)=>RV) = {
val a = viewA(_a);
val b = viewB(_b);
a.checkDomain(b.domain);
val aZ = a.scalar.zero;
val bZ = b.scalar.zero;
if (a.nonzeroSize <= b.nonzeroSize) {
a.foreachNonZeroPair((k,aV) => {
val bV = b(k);
if (aV != aZ && bV != bZ) fn(k, aV, bV);
});
} else {
b.foreachNonZeroPair((k,bV) => {
val aV = a(k);
if (aV != aZ && bV != bZ) fn(k, aV, bV);
});
}
}
}
implicit def opTensorUnary[K,V,RV,Op<:OpType,This,That]
(implicit view : This=>Tensor[K,V],
op : UnaryOp[V,Op,RV],
bf : CanMapValues[This,V,RV,That])
: UnaryOp[This,Op,That] = new UnaryOp[This,Op,That] {
def opType = op.opType;
override def apply(from : This) =
bf.map(from, op.apply _);
}
implicit def opTensorTensor[K,D,V1,V2,Op<:OpType,RV,A,B,That]
(implicit v1 : A=>Tensor[K,V1], v2 : B=>Tensor[K,V2],
df : CanGetDomain[A,D],
op : BinaryOp[V1,V2,Op,RV],
jj : CanJoin[A,B,K,V1,V2],
bf : CanBuildTensorForBinaryOp[A,B,D,K,RV,Op,That])
: BinaryOp[A,B,Op,That]
= new BinaryOp[A,B,Op,That] {
override def opType = op.opType;
override def apply(a : A, b : B) = {
val builder = bf(a,b,(a.domain union b.domain).asInstanceOf[D]);
if (opType == OpMul) {
jj.joinBothNonZero(a,b,(k,v1,v2) => builder(k) = op(v1,v2));
} else if(opType == OpAdd || opType == OpSub) {
jj.joinEitherNonZero(a,b,(k,v1,v2) => builder(k) = op(v1,v2));
} else {
jj.joinAll(a,b,(k,v1,v2) => builder(k) = op(v1,v2));
}
builder.result;
}
}
implicit def opTensorScalar[K,V1,V2,Op<:OpType,RV,This,That]
(implicit view : This=>Tensor[K,V1],
op : BinaryOp[V1,V2,Op,RV],
bf : CanMapValues[This,V1,RV,That],
s : Scalar[V2])
: BinaryOp[This,V2,Op,That]
= new BinaryOp[This,V2,Op,That] {
override def opType = op.opType;
override def apply(a : This, b : V2) = {
if (opType == OpMul && !s.isNaN(b)) {
bf.mapNonZero(a, v => op(v, b));
} else {
bf.map(a, v => op(v, b));
}
}
}
implicit def opScalarTensor[K,V1,V2,Op<:OpType,RV,This,That]
(implicit view : This=>Tensor[K,V2],
op : BinaryOp[V1,V2,Op,RV],
bf : CanMapValues[This,V2,RV,That],
s : Scalar[V1])
: BinaryOp[V1,This,Op,That]
= new BinaryOp[V1,This,Op,That] {
override def opType = op.opType;
override def apply(a : V1, b : This) = {
if (opType == OpMul && !s.isNaN(a)) {
bf.mapNonZero(b, v => op(a, v));
} else {
bf.map(b, v => op(a, v));
}
}
}
implicit def opTensorInnerProduct[K,V1,V2,A,B,RV]
(implicit viewA : A=>Tensor[K,V1], viewB: B=>Tensor[K,V2],
mul : BinaryOp[V1,V2,OpMul,RV], add : BinaryOp[RV,RV,OpAdd,RV],
compatible : CompatibleShape[A,B], s : Scalar[RV])
: BinaryOp[A,B,OpMulInner,RV]
= new BinaryOp[A,B,OpMulInner,RV] {
override def opType = OpMulInner;
override def apply(a: A, b: B) = {
a.checkDomain(b.domain);
var sum = s.zero;
if (a.nonzeroSize <= b.nonzeroSize) {
a.foreachNonZeroPair((k,aV) => sum = add(sum, mul(aV, b(k))));
} else {
b.foreachNonZeroPair((k,bV) => sum = add(sum, mul(a(k), bV)));
}
sum;
}
}
}
| scalala/Scalala | src/main/scala/scalala/tensor/Tensor.scala | Scala | lgpl-2.1 | 24,870 |
package net.kaliber.basicAuthentication
import com.typesafe.config.ConfigValueType
import java.util.UUID
import org.apache.commons.codec.binary.Base64
import play.api.Configuration
import play.api.http.HeaderNames.AUTHORIZATION
import play.api.http.HeaderNames.WWW_AUTHENTICATE
import play.api.libs.Crypto
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import play.api.mvc.Cookie
import play.api.mvc.Filter
import play.api.mvc.RequestHeader
import play.api.mvc.Result
import play.api.mvc.Results.Unauthorized
import scala.collection.JavaConverters._
import scala.concurrent.Future
class BasicAuthenticationFilter(configurationFactory: => BasicAuthenticationFilterConfiguration) extends Filter {
def apply(next: RequestHeader => Future[Result])(requestHeader: RequestHeader): Future[Result] =
if (configuration.enabled && isNotExcluded(requestHeader))
checkAuthentication(requestHeader, next)
else next(requestHeader)
private def isNotExcluded(requestHeader: RequestHeader): Boolean =
!configuration.excluded.exists( requestHeader.path matches _ )
private def checkAuthentication(requestHeader: RequestHeader, next: RequestHeader => Future[Result]): Future[Result] =
if (isAuthorized(requestHeader)) addCookie(next(requestHeader))
else unauthorizedResult
private def isAuthorized(requestHeader: RequestHeader) = {
lazy val authorizedByHeader =
requestHeader.headers.get(AUTHORIZATION).exists(expectedHeaderValues)
lazy val authorizedByCookie =
requestHeader.cookies.get(COOKIE_NAME).exists(_.value == cookieValue)
authorizedByHeader || authorizedByCookie
}
private def addCookie(result: Future[Result]) =
result.map(_.withCookies(cookie))
private lazy val configuration = configurationFactory
private lazy val unauthorizedResult =
Future successful Unauthorized.withHeaders(WWW_AUTHENTICATE -> realm)
private lazy val COOKIE_NAME = "play-basic-authentication-filter"
private lazy val cookie = Cookie(COOKIE_NAME, cookieValue)
private lazy val cookieValue =
Crypto.sign(configuration.username + configuration.passwords)
private lazy val expectedHeaderValues =
configuration.passwords.map { password =>
val combined = configuration.username + ":" + password
val credentials = Base64.encodeBase64String(combined.getBytes)
basic(credentials)
}
private def realm = basic(s"""realm=\\"${configuration.realm}"""")
private def basic(content: String) = s"Basic $content"
}
object BasicAuthenticationFilter {
def apply() = new BasicAuthenticationFilter(
BasicAuthenticationFilterConfiguration.parse(
play.api.Play.current.configuration
)
)
def apply(configuration: => Configuration) = new BasicAuthenticationFilter(
BasicAuthenticationFilterConfiguration parse configuration
)
}
case class BasicAuthenticationFilterConfiguration(
realm: String,
enabled: Boolean,
username: String,
passwords: Set[String],
excluded: Set[String])
object BasicAuthenticationFilterConfiguration {
private val defaultRealm = "Application"
private def credentialsMissingRealm(realm: String) =
s"$realm: The username or password could not be found in the configuration."
def parse(configuration: Configuration) = {
val root = "basicAuthentication."
def boolean(key: String) = configuration.getBoolean(root + key)
def string(key: String) = configuration.getString(root + key)
def seq(key: String) =
Option(configuration.underlying getValue (root + key)).map { value =>
value.valueType match {
case ConfigValueType.LIST => value.unwrapped.asInstanceOf[java.util.List[String]].asScala
case ConfigValueType.STRING => Seq(value.unwrapped.asInstanceOf[String])
case _ => sys.error(s"Unexpected value at `${root + key}`, expected STRING or LIST")
}
}
val enabled = boolean("enabled").getOrElse(true)
val credentials: Option[(String, Set[String])] = for {
username <- string("username")
passwords <- seq("password")
} yield (username, passwords.toSet)
val (username, passwords) = {
def uuid = UUID.randomUUID.toString
credentials.getOrElse((uuid, Set(uuid)))
}
def realm(hasCredentials: Boolean) = {
val realm = string("realm").getOrElse(defaultRealm)
if (hasCredentials) realm
else credentialsMissingRealm(realm)
}
val excluded = configuration.getStringSeq(root + "excluded")
.getOrElse(Seq.empty)
.toSet
BasicAuthenticationFilterConfiguration(
realm(credentials.isDefined),
enabled,
username,
passwords,
excluded
)
}
}
| Kaliber/play-basic-authentication-filter | src/main/scala/net/kaliber/basicAuthentication/BasicAuthenticationFilter.scala | Scala | mit | 4,693 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.util.Arrays
import java.security.MessageDigest
import java.nio.ByteBuffer
import kafka.utils._
import org.apache.kafka.common.utils.Utils
trait OffsetMap {
def slots: Int
def put(key: ByteBuffer, offset: Long)
def get(key: ByteBuffer): Long
def clear()
def size: Int
def utilization: Double = size.toDouble / slots
}
/**
* An hash table used for deduplicating the log. This hash table uses a cryptographicly secure hash of the key as a proxy for the key
* for comparisons and to save space on object overhead. Collisions are resolved by probing. This hash table does not support deletes.
* @param memory The amount of memory this map can use
* @param hashAlgorithm The hash algorithm instance to use: MD2, MD5, SHA-1, SHA-256, SHA-384, SHA-512
*/
@nonthreadsafe
class SkimpyOffsetMap(val memory: Int, val hashAlgorithm: String = "MD5") extends OffsetMap {
private val bytes = ByteBuffer.allocate(memory)
/* the hash algorithm instance to use, defualt is MD5 */
private val digest = MessageDigest.getInstance(hashAlgorithm)
/* the number of bytes for this hash algorithm */
private val hashSize = digest.getDigestLength
/* create some hash buffers to avoid reallocating each time */
private val hash1 = new Array[Byte](hashSize)
private val hash2 = new Array[Byte](hashSize)
/* number of entries put into the map */
private var entries = 0
/* number of lookups on the map */
private var lookups = 0L
/* the number of probes for all lookups */
private var probes = 0L
/**
* The number of bytes of space each entry uses (the number of bytes in the hash plus an 8 byte offset)
*/
val bytesPerEntry = hashSize + 8
/**
* The maximum number of entries this map can contain
*/
val slots: Int = memory / bytesPerEntry
/**
* Associate this offset to the given key.
* @param key The key
* @param offset The offset
*/
override def put(key: ByteBuffer, offset: Long) {
require(entries < slots, "Attempt to add a new entry to a full offset map.")
lookups += 1
hashInto(key, hash1)
// probe until we find the first empty slot
var attempt = 0
var pos = positionOf(hash1, attempt)
while(!isEmpty(pos)) {
bytes.position(pos)
bytes.get(hash2)
if(Arrays.equals(hash1, hash2)) {
// we found an existing entry, overwrite it and return (size does not change)
bytes.putLong(offset)
return
}
attempt += 1
pos = positionOf(hash1, attempt)
}
// found an empty slot, update it--size grows by 1
bytes.position(pos)
bytes.put(hash1)
bytes.putLong(offset)
entries += 1
}
/**
* Check that there is no entry at the given position
*/
private def isEmpty(position: Int): Boolean =
bytes.getLong(position) == 0 && bytes.getLong(position + 8) == 0 && bytes.getLong(position + 16) == 0
/**
* Get the offset associated with this key.
* @param key The key
* @return The offset associated with this key or -1 if the key is not found
*/
override def get(key: ByteBuffer): Long = {
lookups += 1
hashInto(key, hash1)
// search for the hash of this key by repeated probing until we find the hash we are looking for or we find an empty slot
var attempt = 0
var pos = 0
do {
pos = positionOf(hash1, attempt)
bytes.position(pos)
if(isEmpty(pos))
return -1L
bytes.get(hash2)
attempt += 1
} while(!Arrays.equals(hash1, hash2))
bytes.getLong()
}
/**
* Change the salt used for key hashing making all existing keys unfindable.
* Doesn't actually zero out the array.
*/
override def clear() {
this.entries = 0
this.lookups = 0L
this.probes = 0L
Arrays.fill(bytes.array, bytes.arrayOffset, bytes.arrayOffset + bytes.limit, 0.toByte)
}
/**
* The number of entries put into the map (note that not all may remain)
*/
override def size: Int = entries
/**
* The rate of collisions in the lookups
*/
def collisionRate: Double =
(this.probes - this.lookups) / this.lookups.toDouble
/**
* Calculate the ith probe position. We first try reading successive integers from the hash itself
* then if all of those fail we degrade to linear probing.
* @param hash The hash of the key to find the position for
* @param attempt The ith probe
* @return The byte offset in the buffer at which the ith probing for the given hash would reside
*/
private def positionOf(hash: Array[Byte], attempt: Int): Int = {
val probe = CoreUtils.readInt(hash, math.min(attempt, hashSize - 4)) + math.max(0, attempt - hashSize + 4)
val slot = Utils.abs(probe) % slots
this.probes += 1
slot * bytesPerEntry
}
/**
* The offset at which we have stored the given key
* @param key The key to hash
* @param buffer The buffer to store the hash into
*/
private def hashInto(key: ByteBuffer, buffer: Array[Byte]) {
key.mark()
digest.update(key)
key.reset()
digest.digest(buffer, 0, hashSize)
}
}
| dongjiaqiang/kafka | core/src/main/scala/kafka/log/OffsetMap.scala | Scala | apache-2.0 | 5,916 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.json
import java.util.Comparator
import scala.util.control.Exception.allCatch
import com.fasterxml.jackson.core._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.analysis.TypeCoercion
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.json.JacksonUtils.nextUntil
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.LegacyDateFormats.FAST_DATE_FORMAT
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
private[sql] class JsonInferSchema(options: JSONOptions) extends Serializable {
private val decimalParser = ExprUtils.getDecimalParser(options.locale)
private val timestampFormatter = TimestampFormatter(
options.timestampFormatInRead,
options.zoneId,
options.locale,
legacyFormat = FAST_DATE_FORMAT,
isParsing = true)
/**
* Infer the type of a collection of json records in three stages:
* 1. Infer the type of each record
* 2. Merge types by choosing the lowest type necessary to cover equal keys
* 3. Replace any remaining null fields with string, the top type
*/
def infer[T](
json: RDD[T],
createParser: (JsonFactory, T) => JsonParser): StructType = {
val parseMode = options.parseMode
val columnNameOfCorruptRecord = options.columnNameOfCorruptRecord
// In each RDD partition, perform schema inference on each row and merge afterwards.
val typeMerger = JsonInferSchema.compatibleRootType(columnNameOfCorruptRecord, parseMode)
val mergedTypesFromPartitions = json.mapPartitions { iter =>
val factory = options.buildJsonFactory()
iter.flatMap { row =>
try {
Utils.tryWithResource(createParser(factory, row)) { parser =>
parser.nextToken()
Some(inferField(parser))
}
} catch {
case e @ (_: RuntimeException | _: JsonProcessingException) => parseMode match {
case PermissiveMode =>
Some(StructType(Seq(StructField(columnNameOfCorruptRecord, StringType))))
case DropMalformedMode =>
None
case FailFastMode =>
throw QueryExecutionErrors.malformedRecordsDetectedInSchemaInferenceError(e)
}
}
}.reduceOption(typeMerger).toIterator
}
// Here we manually submit a fold-like Spark job, so that we can set the SQLConf when running
// the fold functions in the scheduler event loop thread.
val existingConf = SQLConf.get
var rootType: DataType = StructType(Nil)
val foldPartition = (iter: Iterator[DataType]) => iter.fold(StructType(Nil))(typeMerger)
val mergeResult = (index: Int, taskResult: DataType) => {
rootType = SQLConf.withExistingConf(existingConf) {
typeMerger(rootType, taskResult)
}
}
json.sparkContext.runJob(mergedTypesFromPartitions, foldPartition, mergeResult)
canonicalizeType(rootType, options)
.find(_.isInstanceOf[StructType])
// canonicalizeType erases all empty structs, including the only one we want to keep
.getOrElse(StructType(Nil)).asInstanceOf[StructType]
}
/**
* Infer the type of a json document from the parser's token stream
*/
def inferField(parser: JsonParser): DataType = {
import com.fasterxml.jackson.core.JsonToken._
parser.getCurrentToken match {
case null | VALUE_NULL => NullType
case FIELD_NAME =>
parser.nextToken()
inferField(parser)
case VALUE_STRING if parser.getTextLength < 1 =>
// Zero length strings and nulls have special handling to deal
// with JSON generators that do not distinguish between the two.
// To accurately infer types for empty strings that are really
// meant to represent nulls we assume that the two are isomorphic
// but will defer treating null fields as strings until all the
// record fields' types have been combined.
NullType
case VALUE_STRING =>
val field = parser.getText
lazy val decimalTry = allCatch opt {
val bigDecimal = decimalParser(field)
DecimalType(bigDecimal.precision, bigDecimal.scale)
}
if (options.prefersDecimal && decimalTry.isDefined) {
decimalTry.get
} else if (options.inferTimestamp &&
(allCatch opt timestampFormatter.parse(field)).isDefined) {
TimestampType
} else {
StringType
}
case START_OBJECT =>
val builder = Array.newBuilder[StructField]
while (nextUntil(parser, END_OBJECT)) {
builder += StructField(
parser.getCurrentName,
inferField(parser),
nullable = true)
}
val fields: Array[StructField] = builder.result()
// Note: other code relies on this sorting for correctness, so don't remove it!
java.util.Arrays.sort(fields, JsonInferSchema.structFieldComparator)
StructType(fields)
case START_ARRAY =>
// If this JSON array is empty, we use NullType as a placeholder.
// If this array is not empty in other JSON objects, we can resolve
// the type as we pass through all JSON objects.
var elementType: DataType = NullType
while (nextUntil(parser, END_ARRAY)) {
elementType = JsonInferSchema.compatibleType(
elementType, inferField(parser))
}
ArrayType(elementType)
case (VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT) if options.primitivesAsString => StringType
case (VALUE_TRUE | VALUE_FALSE) if options.primitivesAsString => StringType
case VALUE_NUMBER_INT | VALUE_NUMBER_FLOAT =>
import JsonParser.NumberType._
parser.getNumberType match {
// For Integer values, use LongType by default.
case INT | LONG => LongType
// Since we do not have a data type backed by BigInteger,
// when we see a Java BigInteger, we use DecimalType.
case BIG_INTEGER | BIG_DECIMAL =>
val v = parser.getDecimalValue
if (Math.max(v.precision(), v.scale()) <= DecimalType.MAX_PRECISION) {
DecimalType(Math.max(v.precision(), v.scale()), v.scale())
} else {
DoubleType
}
case FLOAT | DOUBLE if options.prefersDecimal =>
val v = parser.getDecimalValue
if (Math.max(v.precision(), v.scale()) <= DecimalType.MAX_PRECISION) {
DecimalType(Math.max(v.precision(), v.scale()), v.scale())
} else {
DoubleType
}
case FLOAT | DOUBLE =>
DoubleType
}
case VALUE_TRUE | VALUE_FALSE => BooleanType
case _ =>
throw QueryExecutionErrors.malformedJSONError()
}
}
/**
* Recursively canonicalizes inferred types, e.g., removes StructTypes with no fields,
* drops NullTypes or converts them to StringType based on provided options.
*/
private[catalyst] def canonicalizeType(
tpe: DataType, options: JSONOptions): Option[DataType] = tpe match {
case at: ArrayType =>
canonicalizeType(at.elementType, options)
.map(t => at.copy(elementType = t))
case StructType(fields) =>
val canonicalFields = fields.filter(_.name.nonEmpty).flatMap { f =>
canonicalizeType(f.dataType, options)
.map(t => f.copy(dataType = t))
}
// SPARK-8093: empty structs should be deleted
if (canonicalFields.isEmpty) {
None
} else {
Some(StructType(canonicalFields))
}
case NullType =>
if (options.dropFieldIfAllNull) {
None
} else {
Some(StringType)
}
case other => Some(other)
}
}
object JsonInferSchema {
val structFieldComparator = new Comparator[StructField] {
override def compare(o1: StructField, o2: StructField): Int = {
o1.name.compareTo(o2.name)
}
}
def isSorted(arr: Array[StructField]): Boolean = {
var i: Int = 0
while (i < arr.length - 1) {
if (structFieldComparator.compare(arr(i), arr(i + 1)) > 0) {
return false
}
i += 1
}
true
}
def withCorruptField(
struct: StructType,
other: DataType,
columnNameOfCorruptRecords: String,
parseMode: ParseMode): StructType = parseMode match {
case PermissiveMode =>
// If we see any other data type at the root level, we get records that cannot be
// parsed. So, we use the struct as the data type and add the corrupt field to the schema.
if (!struct.fieldNames.contains(columnNameOfCorruptRecords)) {
// If this given struct does not have a column used for corrupt records,
// add this field.
val newFields: Array[StructField] =
StructField(columnNameOfCorruptRecords, StringType, nullable = true) +: struct.fields
// Note: other code relies on this sorting for correctness, so don't remove it!
java.util.Arrays.sort(newFields, structFieldComparator)
StructType(newFields)
} else {
// Otherwise, just return this struct.
struct
}
case DropMalformedMode =>
// If corrupt record handling is disabled we retain the valid schema and discard the other.
struct
case FailFastMode =>
// If `other` is not struct type, consider it as malformed one and throws an exception.
throw QueryExecutionErrors.malformedRecordsDetectedInSchemaInferenceError(other)
}
/**
* Remove top-level ArrayType wrappers and merge the remaining schemas
*/
def compatibleRootType(
columnNameOfCorruptRecords: String,
parseMode: ParseMode): (DataType, DataType) => DataType = {
// Since we support array of json objects at the top level,
// we need to check the element type and find the root level data type.
case (ArrayType(ty1, _), ty2) =>
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
case (ty1, ArrayType(ty2, _)) =>
compatibleRootType(columnNameOfCorruptRecords, parseMode)(ty1, ty2)
// Discard null/empty documents
case (struct: StructType, NullType) => struct
case (NullType, struct: StructType) => struct
case (struct: StructType, o) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, o, columnNameOfCorruptRecords, parseMode)
case (o, struct: StructType) if !o.isInstanceOf[StructType] =>
withCorruptField(struct, o, columnNameOfCorruptRecords, parseMode)
// If we get anything else, we call compatibleType.
// Usually, when we reach here, ty1 and ty2 are two StructTypes.
case (ty1, ty2) => compatibleType(ty1, ty2)
}
private[this] val emptyStructFieldArray = Array.empty[StructField]
/**
* Returns the most general data type for two given data types.
*/
def compatibleType(t1: DataType, t2: DataType): DataType = {
TypeCoercion.findTightestCommonType(t1, t2).getOrElse {
// t1 or t2 is a StructType, ArrayType, or an unexpected type.
(t1, t2) match {
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, _: DecimalType) | (_: DecimalType, DoubleType) =>
DoubleType
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
DoubleType
} else {
DecimalType(range + scale, scale)
}
case (StructType(fields1), StructType(fields2)) =>
// Both fields1 and fields2 should be sorted by name, since inferField performs sorting.
// Therefore, we can take advantage of the fact that we're merging sorted lists and skip
// building a hash map or performing additional sorting.
assert(isSorted(fields1),
s"${StructType.simpleString}'s fields were not sorted: ${fields1.toSeq}")
assert(isSorted(fields2),
s"${StructType.simpleString}'s fields were not sorted: ${fields2.toSeq}")
val newFields = new java.util.ArrayList[StructField]()
var f1Idx = 0
var f2Idx = 0
while (f1Idx < fields1.length && f2Idx < fields2.length) {
val f1Name = fields1(f1Idx).name
val f2Name = fields2(f2Idx).name
val comp = f1Name.compareTo(f2Name)
if (comp == 0) {
val dataType = compatibleType(fields1(f1Idx).dataType, fields2(f2Idx).dataType)
newFields.add(StructField(f1Name, dataType, nullable = true))
f1Idx += 1
f2Idx += 1
} else if (comp < 0) { // f1Name < f2Name
newFields.add(fields1(f1Idx))
f1Idx += 1
} else { // f1Name > f2Name
newFields.add(fields2(f2Idx))
f2Idx += 1
}
}
while (f1Idx < fields1.length) {
newFields.add(fields1(f1Idx))
f1Idx += 1
}
while (f2Idx < fields2.length) {
newFields.add(fields2(f2Idx))
f2Idx += 1
}
StructType(newFields.toArray(emptyStructFieldArray))
case (ArrayType(elementType1, containsNull1), ArrayType(elementType2, containsNull2)) =>
ArrayType(compatibleType(elementType1, elementType2), containsNull1 || containsNull2)
// The case that given `DecimalType` is capable of given `IntegralType` is handled in
// `findTightestCommonType`. Both cases below will be executed only when the given
// `DecimalType` is not capable of the given `IntegralType`.
case (t1: IntegralType, t2: DecimalType) =>
compatibleType(DecimalType.forType(t1), t2)
case (t1: DecimalType, t2: IntegralType) =>
compatibleType(t1, DecimalType.forType(t2))
// strings and every string is a Json object.
case (_, _) => StringType
}
}
}
}
| chuckchen/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JsonInferSchema.scala | Scala | apache-2.0 | 15,109 |
package pl.touk.nussknacker.test
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.time.{Millis, Minutes, Span}
trait ExtremelyPatientScalaFutures extends ScalaFutures with Eventually {
final override implicit def patienceConfig: PatienceConfig = PatienceConfig(timeout = scaled(Span(5, Minutes)), interval = scaled(Span(100, Millis)))
}
| TouK/nussknacker | utils/test-utils/src/main/scala/pl/touk/nussknacker/test/ExtremelyPatientScalaFutures.scala | Scala | apache-2.0 | 374 |
/*
* Copyright 2014-2020 Rik van der Kleij
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package intellij.haskell.external.execution
import java.nio.charset.Charset
import com.intellij.execution.configurations.GeneralCommandLine
import com.intellij.execution.configurations.GeneralCommandLine.ParentEnvironmentType
import com.intellij.execution.process
import com.intellij.execution.process._
import com.intellij.openapi.progress.ProgressIndicator
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.Key
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.VfsUtil
import com.intellij.util.io.BaseOutputReader
import intellij.haskell.{GlobalInfo, HaskellNotificationGroup}
import org.jetbrains.jps.incremental.messages.BuildMessage
import org.jetbrains.jps.incremental.messages.BuildMessage.Kind
import scala.concurrent.duration._
import scala.jdk.CollectionConverters._
object CommandLine {
val DefaultTimeout: FiniteDuration = 60.seconds
val DefaultNotifyBalloonError = false
val DefaultIgnoreExitCode = false
val DefaultLogOutput = false
def run(project: Project, commandPath: String, arguments: Seq[String], timeoutInMillis: Long = DefaultTimeout.toMillis,
notifyBalloonError: Boolean = DefaultNotifyBalloonError, ignoreExitCode: Boolean = DefaultIgnoreExitCode,
logOutput: Boolean = DefaultLogOutput, charset: Option[Charset] = None): ProcessOutput = {
run3(Some(project), project.getBasePath, commandPath, arguments, timeoutInMillis, notifyBalloonError, ignoreExitCode,
logOutput, charset)
}
def runInWorkDir(project: Project, workDir: String, commandPath: String, arguments: Seq[String], timeoutInMillis: Long = DefaultTimeout.toMillis,
notifyBalloonError: Boolean = DefaultNotifyBalloonError, ignoreExitCode: Boolean = DefaultIgnoreExitCode,
logOutput: Boolean = DefaultLogOutput, charset: Option[Charset] = None): ProcessOutput = {
run3(Some(project), workDir, commandPath, arguments, timeoutInMillis, notifyBalloonError, ignoreExitCode,
logOutput, charset)
}
def runInHomeDir(commandPath: String, arguments: Seq[String], timeoutInMillis: Long = DefaultTimeout.toMillis,
notifyBalloonError: Boolean = DefaultNotifyBalloonError, ignoreExitCode: Boolean = DefaultIgnoreExitCode,
logOutput: Boolean = DefaultLogOutput, charset: Option[Charset] = None): ProcessOutput = {
run3(None, VfsUtil.getUserHomeDir.getPath, commandPath, arguments, timeoutInMillis, notifyBalloonError, ignoreExitCode,
logOutput, charset)
}
def runWithProgressIndicator(project: Project, workDir: Option[String], commandPath: String, arguments: Seq[String],
progressIndicator: Option[ProgressIndicator], charset: Option[Charset] = None): CapturingProcessHandler = {
val commandLine = createCommandLine(workDir.getOrElse(project.getBasePath), commandPath, arguments, charset)
new CapturingProcessHandler(commandLine) {
override protected def createProcessAdapter(processOutput: ProcessOutput): CapturingProcessAdapter = {
progressIndicator match {
case Some(pi) => new CapturingProcessToProgressIndicator(project, pi)
case None => super.createProcessAdapter(processOutput)
}
}
override def readerOptions(): BaseOutputReader.Options = {
BaseOutputReader.Options.forMostlySilentProcess()
}
}
}
private def run3(project: Option[Project], workDir: String, commandPath: String, arguments: Seq[String], timeoutInMillis: Long = DefaultTimeout.toMillis,
notifyBalloonError: Boolean = DefaultNotifyBalloonError, ignoreExitCode: Boolean = DefaultIgnoreExitCode,
logOutput: Boolean = DefaultLogOutput, charset: Option[Charset] = None): ProcessOutput = {
val commandLine = createCommandLine(workDir, commandPath, arguments, charset)
if (!logOutput) {
HaskellNotificationGroup.logInfoEvent(project, s"Executing: ${commandLine.getCommandLineString} ")
}
val processHandler = createProcessHandler(project, commandLine, logOutput)
val processOutput = processHandler.map(_.runProcess(timeoutInMillis.toInt, true)).getOrElse(new process.ProcessOutput(-1))
if (processOutput.isTimeout) {
val message = s"Timeout while executing `${commandLine.getCommandLineString}`"
if (notifyBalloonError) {
HaskellNotificationGroup.logErrorBalloonEvent(project, message)
} else {
HaskellNotificationGroup.logErrorEvent(project, message)
}
processOutput
} else if (!ignoreExitCode && processOutput.getExitCode != 0) {
val errorMessage = createLogMessage(commandLine, processOutput)
val message = s"Executing `${commandLine.getCommandLineString}` failed: $errorMessage"
if (notifyBalloonError) HaskellNotificationGroup.logErrorBalloonEvent(project, message) else HaskellNotificationGroup.logErrorEvent(project, message)
processOutput
} else {
processOutput
}
}
def createCommandLine(workDir: String, commandPath: String, arguments: Seq[String], charset: Option[Charset] = None): GeneralCommandLine = {
val commandLine = new GeneralCommandLine
commandLine.withWorkDirectory(workDir)
commandLine.setExePath(commandPath)
commandLine.addParameters(arguments.asJava)
commandLine.withParentEnvironmentType(ParentEnvironmentType.CONSOLE)
commandLine.withEnvironment(GlobalInfo.pathVariables)
charset.foreach(commandLine.setCharset)
commandLine
}
private def createProcessHandler(project: Option[Project], cmd: GeneralCommandLine, logOutput: Boolean): Option[CapturingProcessHandler] = {
try {
if (logOutput) {
Some(
new CapturingProcessHandler(cmd) {
override protected def createProcessAdapter(processOutput: ProcessOutput): CapturingProcessAdapter = new CapturingProcessToLog(project, cmd, processOutput)
})
} else {
Some(new CapturingProcessHandler(cmd))
}
} catch {
case e: ProcessNotCreatedException =>
HaskellNotificationGroup.logErrorBalloonEvent(project, e.getMessage)
None
}
}
private def createLogMessage(cmd: GeneralCommandLine, processOutput: ProcessOutput): String = {
s"${cmd.getCommandLineString}: ${processOutput.getStdoutLines.asScala.mkString("\\n")} \\n ${processOutput.getStderrLines.asScala.mkString("\\n")}"
}
}
object AnsiDecoder {
def decodeAnsiCommandsToString(ansi: String, outputType: Key[_], decoder: AnsiEscapeDecoder): String = {
val buffer = new StringBuilder()
decoder.escapeText(ansi, outputType, (text, _) => buffer.append(text))
buffer.result()
}
}
private class HaskellBuildMessage(message: String, kind: Kind) extends BuildMessage(message, kind)
private class CapturingProcessToLog(val project: Option[Project], val cmd: GeneralCommandLine, val output: ProcessOutput) extends CapturingProcessAdapter(output) {
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = {
super.onTextAvailable(event, outputType)
addToLog(event.getText, outputType)
}
private def addToLog(text: String, outputType: Key[_]): Unit = {
val trimmedText = text.trim
if (trimmedText.nonEmpty) {
HaskellNotificationGroup.logInfoEvent(project, s"${cmd.getCommandLineString}: $trimmedText")
}
}
}
private class CapturingProcessToProgressIndicator(project: Project, progressIndicator: ProgressIndicator) extends CapturingProcessAdapter() {
private val ansiEscapeDecoder = new AnsiEscapeDecoder()
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = {
val text = AnsiDecoder.decodeAnsiCommandsToString(event.getText, outputType, ansiEscapeDecoder)
if (!StringUtil.isEmptyOrSpaces(text)) {
progressIndicator.setText2(text)
HaskellNotificationGroup.logInfoEvent(project, text)
}
}
}
sealed trait CaptureOutput
object CaptureOutputToLog extends CaptureOutput
| rikvdkleij/intellij-haskell | src/main/scala/intellij/haskell/external/execution/CommandLine.scala | Scala | apache-2.0 | 8,587 |
/*
*
* /\\\\\\\\\\
* /\\\\\\///\\\\\\
* /\\\\\\/ \\///\\\\\\ /\\\\\\\\\\\\\\\\\\ /\\\\\\ /\\\\\\
* /\\\\\\ \\//\\\\\\ /\\\\\\/////\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\ \\/// /\\\\\\\\\\ /\\\\\\\\\\ /\\\\\\ /\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\ \\////\\\\\\//// /\\\\\\ /\\\\\\///\\\\\\\\\\///\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\//////
* \\//\\\\\\ /\\\\\\ \\/\\\\\\////// \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\
* \\///\\\\\\ /\\\\\\ \\/\\\\\\ \\/\\\\\\_/\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\////////\\\\\\
* \\///\\\\\\\\\\/ \\/\\\\\\ \\//\\\\\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\///// \\/// \\///// \\/// \\/// \\/// \\/// \\///////// \\//////////
*
* The mathematical programming library for Scala.
*
*/
package optimus.optimization.enums
import enumeratum._
import scala.collection.immutable._
sealed abstract class SolutionStatus(override val entryName: String) extends EnumEntry
object SolutionStatus extends Enum[SolutionStatus] {
val values: IndexedSeq[SolutionStatus] = findValues
case object NOT_SOLVED extends SolutionStatus("Not solved")
case object OPTIMAL extends SolutionStatus("Optimal")
case object SUBOPTIMAL extends SolutionStatus("Suboptimal")
case object UNBOUNDED extends SolutionStatus("Unbounded")
case object INFEASIBLE extends SolutionStatus("Infeasible")
}
| vagm/Optimus | core/src/main/scala/optimus/optimization/enums/SolutionStatus.scala | Scala | lgpl-3.0 | 1,352 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.examples.similarproduct
import org.apache.predictionio.controller.PPreparator
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
class Preparator
extends PPreparator[TrainingData, PreparedData] {
override
def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = {
new PreparedData(
users = trainingData.users,
items = trainingData.items,
viewEvents = trainingData.viewEvents,
likeEvents = trainingData.likeEvents) // ADDED
}
}
class PreparedData(
val users: RDD[(String, User)],
val items: RDD[(String, Item)],
val viewEvents: RDD[ViewEvent],
val likeEvents: RDD[LikeEvent] // ADDED
) extends Serializable
| takezoe/incubator-predictionio | examples/scala-parallel-similarproduct/multi-events-multi-algos/src/main/scala/Preparator.scala | Scala | apache-2.0 | 1,563 |
def flatMap[A, B](sa: State[S, A])(k: A => State[S, B]): State[S, B] =
State { s =>
val (a, s1) = runState(sa)(s)
val sb = k(a)
runState(sb)(s1)
} | hmemcpy/milewski-ctfp-pdf | src/content/3.5/code/scala/snippet18.scala | Scala | gpl-3.0 | 162 |
package actors
import akka.testkit.TestActorRef
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import protocol._
@RunWith(classOf[JUnitRunner])
class ScribeSpec extends Specification {
"The Scribe" should {
"report winner" in new AkkaTestkitSpecs2 {
val actorRef = TestActorRef[Scribe]
actorRef ! Winner("me")
}
}
}
| lukaszbudnik/kardz | src/test/scala/actors/ScribeSpec.scala | Scala | apache-2.0 | 408 |
package com.avsystem.commons
package macros
trait ApplierUnapplier[T, F] {
def apply(f: F): T
def unapply(t: T): F
}
object ApplyUnapplyTest {
case class Empty()
case class Single(int: Int)
case class Multiple(int: Int, str: String)
case class Gadt[T](t: T, list: List[T], cos: String)
case class Generic[T](value: String)
trait Custom[T]
object Custom {
def apply[T](t: T): Custom[T] = null
def unapply[T](whatever: Custom[T]): Option[T] = None
}
def applierUnapplier[T, F]: ApplierUnapplier[T, F] = macro TestMacros.applierUnapplier[T, F]
applierUnapplier[Empty, Unit]
applierUnapplier[Single, Int]
applierUnapplier[Multiple, (Int, String)]
applierUnapplier[Gadt[Int], (Int, List[Int], String)]
applierUnapplier[Custom[String], String]
applierUnapplier[Generic[String], String]
}
| AVSystem/scala-commons | commons-core/src/test/scala/com/avsystem/commons/macros/ApplyUnapplyTest.scala | Scala | mit | 829 |
def factorizer[A, B, C](p: C => A)(q: C => B): (C => (A, B)) =
x => (p(x), q(x)) | hmemcpy/milewski-ctfp-pdf | src/content/3.2/code/scala/snippet07.scala | Scala | gpl-3.0 | 82 |
package chandu0101.scalajs.react.components.util
/**
* Created by chandrasekharkode .
*/
object KeyLine {
object Desktop {
val GUTTER = 24
val GUTTER_LESS = 16
val INCREMENT = 64
val MENU_ITEM_HEIGHT = 32
}
def getIncrementalDim(dim : Double) = Math.ceil( dim / Desktop.INCREMENT) * Desktop.INCREMENT
}
| coreyauger/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/util/KeyLine.scala | Scala | apache-2.0 | 336 |
/**
* Copyright 2014 Dropbox, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
import djinni.writer.IndentWriter
import scala.collection.mutable
class JNIGenerator(spec: Spec) extends Generator(spec) {
val jniMarshal = new JNIMarshal(spec)
val cppMarshal = new CppMarshal(spec)
val javaMarshal = new JavaMarshal(spec)
val jniBaseLibClassIdentStyle = IdentStyle.prefix("H", IdentStyle.camelUpper)
val jniBaseLibFileIdentStyle = jniBaseLibClassIdentStyle
val writeJniCppFile = writeCppFileGeneric(spec.jniOutFolder.get, spec.jniNamespace, spec.jniFileIdentStyle, spec.jniIncludePrefix) _
def writeJniHppFile(name: String, origin: String, includes: Iterable[String], fwds: Iterable[String], f: IndentWriter => Unit, f2: IndentWriter => Unit = (w => {})) =
writeHppFileGeneric(spec.jniHeaderOutFolder.get, spec.jniNamespace, spec.jniFileIdentStyle)(name, origin, includes, fwds, f, f2)
class JNIRefs(name: String) {
var jniHpp = mutable.TreeSet[String]()
var jniCpp = mutable.TreeSet[String]()
jniHpp.add("#include " + q(spec.jniIncludeCppPrefix + spec.cppFileIdentStyle(name) + "." + spec.cppHeaderExt))
jniHpp.add("#include " + q(spec.jniBaseLibIncludePrefix + "djinni_support.hpp"))
spec.cppNnHeader match {
case Some(nnHdr) => jniHpp.add("#include " + nnHdr)
case _ =>
}
def find(ty: TypeRef) { find(ty.resolved) }
def find(tm: MExpr) {
tm.args.foreach(find)
find(tm.base)
}
def find(m: Meta) = for(r <- jniMarshal.references(m, name)) r match {
case ImportRef(arg) => jniCpp.add("#include " + arg)
case _ =>
}
}
override def generateEnum(origin: String, ident: Ident, doc: Doc, e: Enum) {
val refs = new JNIRefs(ident.name)
val jniHelper = jniMarshal.helperClass(ident)
val cppSelf = cppMarshal.fqTypename(ident, e)
writeJniHppFile(ident, origin, Iterable.concat(refs.jniHpp, refs.jniCpp), Nil, w => {
w.w(s"class $jniHelper final : ::djinni::JniEnum").bracedSemi {
w.wlOutdent("public:")
w.wl(s"using CppType = $cppSelf;")
w.wl(s"using JniType = jobject;")
w.wl
w.wl(s"using Boxed = $jniHelper;")
w.wl
w.wl(s"static CppType toCpp(JNIEnv* jniEnv, JniType j) { return static_cast<CppType>(::djinni::JniClass<$jniHelper>::get().ordinal(jniEnv, j)); }")
w.wl(s"static ::djinni::LocalRef<JniType> fromCpp(JNIEnv* jniEnv, CppType c) { return ::djinni::JniClass<$jniHelper>::get().create(jniEnv, static_cast<jint>(c)); }")
w.wl
w.wlOutdent("private:")
val classLookup = q(jniMarshal.undecoratedTypename(ident, e))
w.wl(s"$jniHelper() : JniEnum($classLookup) {}")
w.wl(s"friend ::djinni::JniClass<$jniHelper>;")
}
})
}
override def generateRecord(origin: String, ident: Ident, doc: Doc, params: Seq[TypeParam], r: Record) {
val refs = new JNIRefs(ident.name)
r.fields.foreach(f => refs.find(f.ty))
val jniHelper = jniMarshal.helperClass(ident)
val cppSelf = cppMarshal.fqTypename(ident, r) + cppTypeArgs(params)
def writeJniPrototype(w: IndentWriter) {
writeJniTypeParams(w, params)
w.w(s"class $jniHelper final").bracedSemi {
w.wlOutdent("public:")
w.wl(s"using CppType = $cppSelf;")
w.wl(s"using JniType = jobject;")
w.wl
w.wl(s"using Boxed = $jniHelper;")
w.wl
w.wl(s"~$jniHelper();")
w.wl
w.wl(s"static CppType toCpp(JNIEnv* jniEnv, JniType j);")
w.wl(s"static ::djinni::LocalRef<JniType> fromCpp(JNIEnv* jniEnv, const CppType& c);")
w.wl
w.wlOutdent("private:")
w.wl(s"$jniHelper();")
w.wl(s"friend ::djinni::JniClass<$jniHelper>;")
w.wl
val classLookup = q(jniMarshal.undecoratedTypename(ident, r))
w.wl(s"const ::djinni::GlobalRef<jclass> clazz { ::djinni::jniFindClass($classLookup) };")
val constructorSig = q(jniMarshal.javaMethodSignature(r.fields, None))
w.wl(s"const jmethodID jconstructor { ::djinni::jniGetMethodID(clazz.get(), ${q("<init>")}, $constructorSig) };")
for (f <- r.fields) {
val javaFieldName = idJava.field(f.ident)
val javaSig = q(jniMarshal.fqTypename(f.ty))
w.wl(s"const jfieldID field_$javaFieldName { ::djinni::jniGetFieldID(clazz.get(), ${q(javaFieldName)}, $javaSig) };")
}
}
}
def writeJniBody(w: IndentWriter) {
val jniHelperWithParams = jniHelper + typeParamsSignature(params)
// Defining ctor/dtor in the cpp file reduces build times
writeJniTypeParams(w, params)
w.wl(s"$jniHelperWithParams::$jniHelper() = default;")
w.wl
writeJniTypeParams(w, params)
w.wl(s"$jniHelperWithParams::~$jniHelper() = default;")
w.wl
writeJniTypeParams(w, params)
w.w(s"auto $jniHelperWithParams::fromCpp(JNIEnv* jniEnv, const CppType& c) -> ::djinni::LocalRef<JniType>").braced{
//w.wl(s"::${spec.jniNamespace}::JniLocalScope jscope(jniEnv, 10);")
if(r.fields.isEmpty) w.wl("(void)c; // Suppress warnings in release builds for empty records")
w.wl(s"const auto& data = ::djinni::JniClass<$jniHelper>::get();")
val call = "auto r = ::djinni::LocalRef<JniType>{jniEnv->NewObject("
w.w(call + "data.clazz.get(), data.jconstructor")
if(r.fields.nonEmpty) {
w.wl(",")
writeAlignedCall(w, " " * call.length(), r.fields, ")}", f => {
val name = idCpp.field(f.ident)
val param = jniMarshal.fromCpp(f.ty, s"c.$name")
s"::djinni::get($param)"
})
}
else
w.w(")}")
w.wl(";")
w.wl(s"::djinni::jniExceptionCheck(jniEnv);")
w.wl(s"return r;")
}
w.wl
writeJniTypeParams(w, params)
w.w(s"auto $jniHelperWithParams::toCpp(JNIEnv* jniEnv, JniType j) -> CppType").braced {
w.wl(s"::djinni::JniLocalScope jscope(jniEnv, ${r.fields.size + 1});")
w.wl(s"assert(j != nullptr);")
if(r.fields.isEmpty)
w.wl("(void)j; // Suppress warnings in release builds for empty records")
else
w.wl(s"const auto& data = ::djinni::JniClass<$jniHelper>::get();")
writeAlignedCall(w, "return {", r.fields, "}", f => {
val fieldId = "data.field_" + idJava.field(f.ident)
val jniFieldAccess = toJniCall(f.ty, (jt: String) => s"jniEnv->Get${jt}Field(j, $fieldId)")
jniMarshal.toCpp(f.ty, jniFieldAccess)
})
w.wl(";")
}
}
writeJniFiles(origin, params.nonEmpty, ident, refs, writeJniPrototype, writeJniBody)
}
override def generateInterface(origin: String, ident: Ident, doc: Doc, typeParams: Seq[TypeParam], i: Interface) {
val refs = new JNIRefs(ident.name)
i.methods.foreach(m => {
m.params.foreach(p => refs.find(p.ty))
m.ret.foreach(refs.find)
})
i.consts.foreach(c => {
refs.find(c.ty)
})
val jniSelf = jniMarshal.helperClass(ident)
val cppSelf = cppMarshal.fqTypename(ident, i) + cppTypeArgs(typeParams)
val classLookup = jniMarshal.undecoratedTypename(ident, i)
val baseType = s"::djinni::JniInterface<$cppSelf, $jniSelf>"
def writeJniPrototype(w: IndentWriter) {
writeJniTypeParams(w, typeParams)
w.w(s"class $jniSelf final : $baseType").bracedSemi {
w.wlOutdent(s"public:")
spec.cppNnType match {
case Some(nnPtr) =>
w.wl(s"using CppType = ${nnPtr}<$cppSelf>;")
w.wl(s"using CppOptType = std::shared_ptr<$cppSelf>;")
case _ =>
w.wl(s"using CppType = std::shared_ptr<$cppSelf>;")
}
w.wl(s"using JniType = jobject;")
w.wl
w.wl(s"using Boxed = $jniSelf;")
w.wl
w.wl(s"~$jniSelf();")
w.wl
if (spec.cppNnType.nonEmpty) {
def nnCheck(expr: String): String = spec.cppNnCheckExpression.fold(expr)(check => s"$check($expr)")
w.w("static CppType toCpp(JNIEnv* jniEnv, JniType j)").bracedSemi {
w.wl(s"""DJINNI_ASSERT_MSG(j, jniEnv, "$jniSelf::fromCpp requires a non-null Java object");""")
w.wl(s"""return ${nnCheck(s"::djinni::JniClass<$jniSelf>::get()._fromJava(jniEnv, j)")};""")
}
w.wl(s"static ::djinni::LocalRef<JniType> fromCppOpt(JNIEnv* jniEnv, const CppOptType& c) { return {jniEnv, ::djinni::JniClass<$jniSelf>::get()._toJava(jniEnv, c)}; }")
w.wl(s"static ::djinni::LocalRef<JniType> fromCpp(JNIEnv* jniEnv, const CppType& c) { return fromCppOpt(jniEnv, c); }")
} else {
w.wl(s"static CppType toCpp(JNIEnv* jniEnv, JniType j) { return ::djinni::JniClass<$jniSelf>::get()._fromJava(jniEnv, j); }")
w.wl(s"static ::djinni::LocalRef<JniType> fromCpp(JNIEnv* jniEnv, const CppType& c) { return {jniEnv, ::djinni::JniClass<$jniSelf>::get()._toJava(jniEnv, c)}; }")
}
w.wl
w.wlOutdent("private:")
w.wl(s"$jniSelf();")
w.wl(s"friend ::djinni::JniClass<$jniSelf>;")
w.wl(s"friend $baseType;")
w.wl
if (i.ext.java) {
w.wl(s"class JavaProxy final : ::djinni::JavaProxyCacheEntry, public $cppSelf").bracedSemi {
w.wlOutdent(s"public:")
// w.wl(s"using JavaProxyCacheEntry::JavaProxyCacheEntry;")
w.wl(s"JavaProxy(JniType j);")
w.wl(s"~JavaProxy();")
w.wl
for (m <- i.methods) {
val ret = cppMarshal.fqReturnType(m.ret)
val params = m.params.map(p => cppMarshal.fqParamType(p.ty) + " " + idCpp.local(p.ident))
w.wl(s"$ret ${idCpp.method(m.ident)}${params.mkString("(", ", ", ")")} override;")
}
w.wl
w.wlOutdent(s"private:")
w.wl(s"friend ::djinni::JniInterface<$cppSelf, ${withNs(Some(spec.jniNamespace), jniSelf)}>;")
}
w.wl
w.wl(s"const ::djinni::GlobalRef<jclass> clazz { ::djinni::jniFindClass(${q(classLookup)}) };")
for (m <- i.methods) {
val javaMethodName = idJava.method(m.ident)
val javaMethodSig = q(jniMarshal.javaMethodSignature(m.params, m.ret))
w.wl(s"const jmethodID method_$javaMethodName { ::djinni::jniGetMethodID(clazz.get(), ${q(javaMethodName)}, $javaMethodSig) };")
}
}
}
}
def writeJniBody(w: IndentWriter) {
// Defining ctor/dtor in the cpp file reduces build times
val baseClassParam = if (i.ext.cpp) q(classLookup+"$CppProxy") else ""
val jniSelfWithParams = jniSelf + typeParamsSignature(typeParams)
writeJniTypeParams(w, typeParams)
w.wl(s"$jniSelfWithParams::$jniSelf() : $baseType($baseClassParam) {}")
w.wl
writeJniTypeParams(w, typeParams)
w.wl(s"$jniSelfWithParams::~$jniSelf() = default;")
w.wl
if (i.ext.java) {
writeJniTypeParams(w, typeParams)
w.wl(s"$jniSelfWithParams::JavaProxy::JavaProxy(JniType j) : Handle(::djinni::jniGetThreadEnv(), j) { }")
w.wl
writeJniTypeParams(w, typeParams)
w.wl(s"$jniSelfWithParams::JavaProxy::~JavaProxy() = default;")
w.wl
for (m <- i.methods) {
val ret = cppMarshal.fqReturnType(m.ret)
val params = m.params.map(p => cppMarshal.fqParamType(p.ty) + " c_" + idCpp.local(p.ident))
writeJniTypeParams(w, typeParams)
val methodNameAndSignature: String = s"${idCpp.method(m.ident)}${params.mkString("(", ", ", ")")}"
w.w(s"$ret $jniSelfWithParams::JavaProxy::$methodNameAndSignature").braced {
w.wl(s"auto jniEnv = ::djinni::jniGetThreadEnv();")
w.wl(s"::djinni::JniLocalScope jscope(jniEnv, 10);")
w.wl(s"const auto& data = ::djinni::JniClass<${withNs(Some(spec.jniNamespace), jniSelf)}>::get();")
val call = m.ret.fold("jniEnv->CallVoidMethod(")(r => "auto jret = " + toJniCall(r, (jt: String) => s"jniEnv->Call${jt}Method("))
w.w(call)
val javaMethodName = idJava.method(m.ident)
w.w(s"Handle::get().get(), data.method_$javaMethodName")
if(m.params.nonEmpty){
w.wl(",")
writeAlignedCall(w, " " * call.length(), m.params, ")", p => {
val param = jniMarshal.fromCpp(p.ty, "c_" + idCpp.local(p.ident))
s"::djinni::get($param)"
})
}
else
w.w(")")
w.wl(";")
w.wl(s"::djinni::jniExceptionCheck(jniEnv);")
m.ret.fold()(ty => {
(spec.cppNnCheckExpression, isInterface(ty.resolved)) match {
case (Some(check), true) => {
// We have a non-optional interface, assert that we're getting a non-null value
val javaParams = m.params.map(p => javaMarshal.fqParamType(p.ty) + " " + idJava.local(p.ident))
val javaParamsString: String = javaParams.mkString("(", ",", ")")
val functionString: String = s"${javaMarshal.fqTypename(ident, i)}#$javaMethodName$javaParamsString"
w.wl(s"""DJINNI_ASSERT_MSG(jret, jniEnv, "Got unexpected null return value from function $functionString");""")
w.wl(s"return ${jniMarshal.toCpp(ty, "jret")};")
}
case _ =>
}
w.wl(s"return ${jniMarshal.toCpp(ty, "jret")};")
})
}
}
}
if (i.ext.cpp) {
// Generate CEXPORT functions for JNI to call.
val classIdentMunged = javaMarshal.fqTypename(ident, i)
.replaceAllLiterally("_", "_1")
.replaceAllLiterally(".", "_")
val prefix = "Java_" + classIdentMunged
def nativeHook(name: String, static: Boolean, params: Iterable[Field], ret: Option[TypeRef], f: => Unit) = {
val paramList = params.map(p => jniMarshal.paramType(p.ty) + " j_" + idJava.local(p.ident)).mkString(", ")
val jniRetType = jniMarshal.fqReturnType(ret)
w.wl
val methodNameMunged = name.replaceAllLiterally("_", "_1")
val zero = ret.fold("")(s => "0 /* value doesn't matter */")
if (static) {
w.wl(s"CJNIEXPORT $jniRetType JNICALL ${prefix}_$methodNameMunged(JNIEnv* jniEnv, jobject /*this*/${preComma(paramList)})").braced {
w.w("try").bracedEnd(s" JNI_TRANSLATE_EXCEPTIONS_RETURN(jniEnv, $zero)") {
w.wl(s"DJINNI_FUNCTION_PROLOGUE0(jniEnv);")
f
}
}
}
else {
w.wl(s"CJNIEXPORT $jniRetType JNICALL ${prefix}_00024CppProxy_$methodNameMunged(JNIEnv* jniEnv, jobject /*this*/, jlong nativeRef${preComma(paramList)})").braced {
w.w("try").bracedEnd(s" JNI_TRANSLATE_EXCEPTIONS_RETURN(jniEnv, $zero)") {
w.wl(s"DJINNI_FUNCTION_PROLOGUE1(jniEnv, nativeRef);")
f
}
}
}
}
nativeHook("nativeDestroy", false, Seq.empty, None, {
w.wl(s"delete reinterpret_cast<djinni::CppProxyHandle<$cppSelf>*>(nativeRef);")
})
for (m <- i.methods) {
val nativeAddon = if (m.static) "" else "native_"
nativeHook(nativeAddon + idJava.method(m.ident), m.static, m.params, m.ret, {
//w.wl(s"::${spec.jniNamespace}::JniLocalScope jscope(jniEnv, 10);")
if (!m.static) w.wl(s"const auto& ref = ::djinni::objectFromHandleAddress<$cppSelf>(nativeRef);")
m.params.foreach(p => {
if (isInterface(p.ty.resolved) && spec.cppNnCheckExpression.nonEmpty) {
// We have a non-optional interface in nn mode, assert that we're getting a non-null value
val paramName = idJava.local(p.ident)
val javaMethodName = idJava.method(m.ident)
val javaParams = m.params.map(p => javaMarshal.fqParamType(p.ty) + " " + idJava.local(p.ident))
val javaParamsString: String = javaParams.mkString("(", ", ", ")")
val functionString: String = s"${javaMarshal.fqTypename(ident, i)}#$javaMethodName$javaParamsString"
w.wl( s"""DJINNI_ASSERT_MSG(j_$paramName, jniEnv, "Got unexpected null parameter '$paramName' to function $functionString");""")
}
})
val methodName = idCpp.method(m.ident)
val ret = m.ret.fold("")(r => "auto r = ")
val call = if (m.static) s"$cppSelf::$methodName(" else s"ref->$methodName("
writeAlignedCall(w, ret + call, m.params, ")", p => jniMarshal.toCpp(p.ty, "j_" + idJava.local(p.ident)))
w.wl(";")
m.ret.fold()(r => w.wl(s"return ::djinni::release(${jniMarshal.fromCpp(r, "r")});"))
})
}
}
}
writeJniFiles(origin, typeParams.nonEmpty, ident, refs, writeJniPrototype, writeJniBody)
}
def writeJniFiles(origin: String, allInHeader: Boolean, ident: Ident, refs: JNIRefs, writeProto: IndentWriter => Unit, writeBody: IndentWriter => Unit) {
if (allInHeader) {
// Template class. Write both parts to .hpp.
writeJniHppFile(ident, origin, Iterable.concat(refs.jniHpp, refs.jniCpp), Nil, w => {
writeProto(w)
w.wl
writeBody(w)
})
}
else {
// Write prototype to .hpp and body to .cpp
writeJniHppFile(ident, origin, refs.jniHpp, Nil, writeProto)
writeJniCppFile(ident, origin, refs.jniCpp, writeBody)
}
}
def writeJniTypeParams(w: IndentWriter, params: Seq[TypeParam]) {
if (params.isEmpty) return
w.wl("template " + params.map(p => "typename " + spec.jniClassIdentStyle(p.ident)).mkString("<", ", ", ">"))
}
def typeParamsSignature(params: Seq[TypeParam]) = if(params.isEmpty) "" else params.map(p => spec.jniClassIdentStyle(p.ident)).mkString("<", ", ", ">")
def toJniCall(ty: TypeRef, f: String => String): String = toJniCall(ty.resolved, f, false)
def toJniCall(m: MExpr, f: String => String, needRef: Boolean): String = m.base match {
case p: MPrimitive => f(if (needRef) "Object" else IdentStyle.camelUpper(p.jName))
case MString => "(jstring)" + f("Object")
case MOptional => toJniCall(m.args.head, f, true)
case MBinary => "(jbyteArray)" + f("Object")
case _ => f("Object")
}
def cppTypeArgs(params: Seq[TypeParam]): String =
if (params.isEmpty) "" else params.map(p => idCpp.typeParam(p.ident)).mkString("<", ", ", ">")
}
| jeremiahyan/djinni | src/source/JNIGenerator.scala | Scala | apache-2.0 | 19,145 |
package tastytest
object PathDep {
trait Foo {
type T
def foo: T
}
trait Goo {
type F <: Foo
val foo: F
}
class Bar {
def bar(member: Foo): member.T = {
member.foo
}
def baz(member: Goo): member.foo.T = {
member.foo.foo
}
def qux(member: Goo): member.foo.type = {
member.foo
}
}
}
| scala/scala | test/tasty/run/src-3/tastytest/PathDep.scala | Scala | apache-2.0 | 356 |
package tyler.breakout
import levels.RedBrick
abstract class Level {
def blockCollection: Seq[RedBrick]
def initialBatPosition: ImmutableVector2f
def initialBatVelocity: ImmutableVector2f
def initialBallPosition: ImmutableVector2f
def initialBallVelocity: ImmutableVector2f
def initialLifeCount: Int
}
| DaveTCode/BreakoutGameScala | src/main/scala/tyler/breakout/Level.scala | Scala | mit | 321 |
package com.arcusys.valamis.web.servlet.scorm
import com.arcusys.valamis.lesson.scorm.model.tracking.{ActivityState, ActivityStateNode, ObjectiveState}
import com.arcusys.valamis.lesson.scorm.service.ActivityServiceContract
import com.arcusys.valamis.lesson.scorm.service.lms.DataModelService
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.service.Sanitizer
import com.arcusys.valamis.web.servlet.base.BaseApiController
import com.arcusys.valamis.web.servlet.request.Parameter
import org.scalatra.{ScalatraBase, SinatraRouteMatcher}
class RteServlet extends BaseApiController {
lazy val sanitizer = inject[Sanitizer]
//next line fixes 404
implicit override def string2RouteMatcher(path: String) = new SinatraRouteMatcher(path)
private lazy val activityManager = inject[ActivityServiceContract]
implicit val scalatra: ScalatraBase = this
post("/rte/Initialize") {
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val organizationID = Parameter("organizationID").required
val currentAttempt = activityManager.getActiveAttempt(userID, packageID, organizationID)
val stateTree = activityManager.getActivityStateTreeForAttemptOption(currentAttempt)
if (stateTree.isEmpty) {
activityManager.createActivityStateTreeForAttempt(currentAttempt)
JsonHelper.toJson("status" -> false)
} else {
JsonHelper.toJson("status" -> true)
}
}
get("/rte/GetValue/:key") {
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val activityID = Parameter("activityID").required
val currentAttempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val dataModel = new DataModelService(currentAttempt, activityID)
JsonHelper.toJson(dataModel.getValue(Parameter("key").required))
}
get("/rte/GetValues") {
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val activityID = Parameter("activityID").required
val currentAttempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val dataModel = new DataModelService(currentAttempt, activityID)
JsonHelper.toJson(dataModel.getValues)
}
post("/rte/SetValue") {
val userID = getUserId.toInt
val value = sanitizer.sanitize(Parameter("value").required)
val packageID = Parameter("packageID").intRequired
val activityID = Parameter("activityID").required
val currentAttempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val dataModel = new DataModelService(currentAttempt, activityID)
dataModel.setValue(Parameter("key").required, value)
}
post("/rte/SetValues") {
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val activityID = Parameter("activityID").required
val currentAttempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val amount = Parameter("amount").intRequired
val dataModel = new DataModelService(currentAttempt, activityID)
(0 until amount).foreach(index => {
dataModel.setValue(Parameter("dataKey" + index).required, Parameter("dataValue" + index).required)
})
}
get("/rte/ActivityInformation/:activityID") {
def serializeObjective(id: Option[String], state: ObjectiveState) = {
Some(
Map("identifier" -> id,
"objectiveProgressStatus" -> state.getSatisfiedStatus.isDefined,
"objectiveSatisfiedStatus" -> state.getSatisfiedStatus,
"objectiveMeasureStatus" -> state.getNormalizedMeasure.isDefined,
"objectiveNormalizedMeasure" -> state.getNormalizedMeasure
)
)
}
def serializePrimaryObjective(activityState: ActivityState) = {
val primaryObjective = activityState.activity.sequencing.primaryObjective
if (primaryObjective.isDefined) {
val primaryObjectiveState = activityState.objectiveStates(None)
serializeObjective(primaryObjective.get.id, primaryObjectiveState)
} else {
None
}
}
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val attempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val tree = activityManager.getActivityStateTreeForAttemptOption(attempt).get //OrElse(throw new Exception("Activity tree should exist!"))
val activity = tree(Parameter("activityID").required)
JsonHelper.toJson(Map("attemptProgressStatus" -> activity.get.item.getCompletionStatus().isEmpty,
"attemptCompletionStatus" -> activity.get.item.getCompletionStatus(),
"attemptCompletionAmount" -> activity.get.item.attemptCompletionAmount,
"isActivitySuspended" -> activity.get.item.suspended,
"primaryObjective" -> serializePrimaryObjective(activity.get.item),
"activityObjectives" -> activity.get.item.activity.sequencing.nonPrimaryObjectives.map(objective =>
serializeObjective(objective.id, activity.get.item.objectiveStates(objective.id)))
))
}
post("/rte/ActivityInformation/:activityID") {
def deserializeObjective(base: String, state: ObjectiveState) {
state.setSatisfiedStatus(Parameter(base + "[objectiveProgressStatus]").booleanOption("null"))
state.setNormalizedMeasure(Parameter(base + "[objectiveNormalizedMeasure]").bigDecimalOption("null"))
}
def deserializePrimaryObjective(activity: ActivityStateNode) {
val activityState = activity.item
val primaryObjective = activityState.activity.sequencing.primaryObjective
if (primaryObjective.isDefined) {
val primaryObjectiveState = activityState.objectiveStates(None)
deserializeObjective("primaryObjective", primaryObjectiveState)
}
}
def deserializeNonPrimaryObjective(activity: ActivityStateNode, index: String) {
val activityState = activity.item
val id = Parameter("activityObjectives[" + index + "][identifier]").required
val objectiveState = activityState.objectiveStates(Some(id))
deserializeObjective("activityObjectives[" + index + "]", objectiveState)
}
val userID = getUserId.toInt
val packageID = Parameter("packageID").intRequired
val attempt = activityManager.getLastAttempltOption(userID, packageID).getOrElse(halt(404, "Attempt not found for this SCO and user"))
val tree = activityManager.getActivityStateTreeForAttemptOption(attempt).getOrElse(throw new Exception("Activity tree should exist!"))
val activity = tree(Parameter("activityID").required)
activity.get.item.setCompletionStatus(Parameter("attemptCompletionStatus").booleanOption("null"))
activity.get.item.attemptCompletionAmount = Parameter("attemptCompletionAmount").bigDecimalOption("null")
activity.get.item.suspended = Parameter("isActivitySuspended").booleanRequired
val activityObjectivesCount = Parameter("activityObjectivesCount").intRequired
deserializePrimaryObjective(activity.get)
(0 until activityObjectivesCount).foreach(index => deserializeNonPrimaryObjective(activity.get, index.toString))
activityManager.updateActivityStateTree(attempt.id.toInt, tree)
}
post("/rte/Commit") {
}
}
| igor-borisov/valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/scorm/RteServlet.scala | Scala | gpl-3.0 | 7,492 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.runtime.java8
@FunctionalInterface trait JFunction2$mcDII$sp extends Function2[Any, Any, Any] with Serializable {
def apply$mcDII$sp(v1: Int, v2: Int): Double
override def apply(v1: Any, v2: Any): Any = scala.runtime.BoxesRunTime.boxToDouble(apply$mcDII$sp(scala.runtime.BoxesRunTime.unboxToInt(v1), scala.runtime.BoxesRunTime.unboxToInt(v2)))
}
| scala/scala | src/library/scala/runtime/java8/JFunction2$mcDII$sp.scala | Scala | apache-2.0 | 655 |
package org.jetbrains.plugins.scala
package lang.refactoring.changeSignature
import java.util
import com.intellij.psi.PsiElement
import com.intellij.refactoring.changeSignature.MethodDescriptor
import com.intellij.refactoring.changeSignature.MethodDescriptor.ReadWriteOption
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScMethodLike, ScPrimaryConstructor}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import scala.collection.JavaConverters._
/**
* Nikolay.Tropin
* 2014-08-29
*/
class ScalaMethodDescriptor(val fun: ScMethodLike) extends MethodDescriptor[ScalaParameterInfo, String] {
override def getName: String = fun match {
case fun: ScFunction =>
if (fun.isConstructor) fun.containingClass.name
else fun.name
case pc: ScPrimaryConstructor => pc.containingClass.name
case _ => ""
}
override def canChangeName: Boolean = !fun.isConstructor
override def canChangeVisibility: Boolean = !fun.isLocal
val parameters = parametersInner
override def getParameters: util.List[ScalaParameterInfo] = parameters.flatten.asJava
override def getParametersCount: Int = parameters.flatten.size
override def canChangeReturnType: ReadWriteOption =
if (fun.isConstructor) ReadWriteOption.None else ReadWriteOption.ReadWrite
override def canChangeParameters: Boolean = true
override def getMethod: PsiElement = fun
override def getVisibility: String = fun.getModifierList.accessModifier.fold("")(_.getText)
def returnTypeText: String = fun match {
case f: ScFunction => f.returnType.getOrAny.presentableText
case _ => ""
}
protected def parametersInner: Seq[Seq[ScalaParameterInfo]] = ScalaParameterInfo.allForMethod(fun)
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/changeSignature/ScalaMethodDescriptor.scala | Scala | apache-2.0 | 1,723 |
/*******************************************************************************
* Copyright (c) 2012-2013 CWI
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
*
* * Michael Steindorfer - [email protected] - CWI
******************************************************************************/
package org.eclipse.imp.pdb.facts.impl.persistent.scala
import org.eclipse.imp.pdb.facts.IList
import org.eclipse.imp.pdb.facts.IListWriter
import org.eclipse.imp.pdb.facts.IValue
import org.eclipse.imp.pdb.facts.`type`._
import org.eclipse.imp.pdb.facts.`type`.TypeFactory
import collection.mutable.ListBuffer
import collection.JavaConversions.iterableAsScalaIterable
sealed class ListWriter extends IListWriter {
val xs: ListBuffer[IValue] = ListBuffer[IValue]()
override def insert(ys: IValue*): Unit = ys ++=: xs
override def insert(ys: Array[IValue], i: Int, n: Int) = this insert ((ys slice(i, i + n)): _*)
override def insertAll(ys: java.lang.Iterable[_ <: org.eclipse.imp.pdb.facts.IValue]) = xs prependAll ys
override def insertAt(i: Int, ys: IValue*) = xs insertAll(i, ys)
override def insertAt(i: Int, ys: Array[IValue], j: Int, n: Int) = this insertAt(i, (ys slice(j, j + n)): _*)
override def replaceAt(i: Int, x: IValue) = xs update(i, x)
override def append(ys: IValue*): Unit = xs ++= ys
override def appendAll(ys: java.lang.Iterable[_ <: org.eclipse.imp.pdb.facts.IValue]) = xs appendAll ys
override def done: IList = {
val res = emptyList ++ xs.result
List(`type` lub res, res)
}
}
| msteindorfer/oopsla15-artifact | pdb.values.persistent.scala/src/main/scala/org/eclipse/imp/pdb/facts/impl/persistent/scala/ListWriter.scala | Scala | epl-1.0 | 1,754 |
package mesosphere.marathon.api.v2
import java.util
import java.util.concurrent.atomic.AtomicInteger
import akka.event.EventStream
import com.codahale.metrics.MetricRegistry
import mesosphere.marathon._
import mesosphere.marathon.api.{ TestGroupManagerFixture, JsonTestHelper, TaskKiller, TestAuthFixture }
import mesosphere.marathon.core.appinfo.AppInfo.Embed
import mesosphere.marathon.core.appinfo._
import mesosphere.marathon.core.base.ConstantClock
import mesosphere.marathon.core.task.tracker.TaskTracker
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.io.storage.StorageProvider
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state.AppDefinition.VersionInfo.OnlyVersion
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.test.{ MarathonActorSupport, Mockito }
import mesosphere.marathon.upgrade.DeploymentPlan
import mesosphere.util.{ CapConcurrentExecutions, CapConcurrentExecutionsMetrics }
import org.scalatest.{ GivenWhenThen, Matchers }
import play.api.libs.json.{ JsNumber, JsObject, Json }
import scala.collection.immutable
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
class AppsResourceTest extends MarathonSpec with MarathonActorSupport with Matchers with Mockito with GivenWhenThen {
import mesosphere.marathon.api.v2.json.Formats._
test("Create a new app successfully") {
Given("An app and group")
val app = AppDefinition(id = PathId("/app"), cmd = Some("cmd"), versionInfo = OnlyVersion(Timestamp.zero))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body = Json.stringify(Json.toJson(app)).getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.rootGroup() returns Future.successful(group)
When("The create request is made")
clock += 5.seconds
val response = appsResource.create(body, force = false, auth.request)
Then("It is successful")
response.getStatus should be(201)
And("the JSON is as expected, including a newly generated version")
import mesosphere.marathon.api.v2.json.Formats._
val expected = AppInfo(
app.copy(versionInfo = AppDefinition.VersionInfo.OnlyVersion(clock.now())),
maybeTasks = Some(immutable.Seq.empty),
maybeCounts = Some(TaskCounts.zero),
maybeDeployments = Some(immutable.Seq(Identifiable(plan.id)))
)
JsonTestHelper.assertThatJsonString(response.getEntity.asInstanceOf[String]).correspondsToJsonOf(expected)
}
test("Create a new app successfully using ports instead of portDefinitions") {
Given("An app and group")
val app = AppDefinition(
id = PathId("/app"),
cmd = Some("cmd"),
portDefinitions = PortDefinitions(1000, 1001),
versionInfo = OnlyVersion(Timestamp.zero)
)
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val appJson = Json.toJson(app).as[JsObject]
val appJsonWithOnlyPorts = appJson - "portDefinitions" + ("ports" -> Json.parse("""[1000, 1001]"""))
val body = Json.stringify(appJsonWithOnlyPorts).getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.rootGroup() returns Future.successful(group)
When("The create request is made")
clock += 5.seconds
val response = appsResource.create(body, force = false, auth.request)
Then("It is successful")
response.getStatus should be(201)
And("the JSON is as expected, including a newly generated version")
import mesosphere.marathon.api.v2.json.Formats._
val expected = AppInfo(
app.copy(versionInfo = AppDefinition.VersionInfo.OnlyVersion(clock.now())),
maybeTasks = Some(immutable.Seq.empty),
maybeCounts = Some(TaskCounts.zero),
maybeDeployments = Some(immutable.Seq(Identifiable(plan.id)))
)
JsonTestHelper.assertThatJsonString(response.getEntity.asInstanceOf[String]).correspondsToJsonOf(expected)
}
test("Create a new app fails with Validation errors") {
Given("An app with validation errors")
val app = AppDefinition(id = PathId("/app"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body = Json.stringify(Json.toJson(app)).getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
Then("A constraint violation exception is thrown")
val response = appsResource.create(body, false, auth.request)
response.getStatus should be(422)
}
test("Create a new app with float instance count fails") {
Given("The json of an invalid application")
val invalidAppJson = Json.stringify(Json.obj("id" -> "/foo", "cmd" -> "cmd", "instances" -> 0.1))
val group = Group(PathId("/"), Set.empty)
val plan = DeploymentPlan(group, group)
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.rootGroup() returns Future.successful(group)
Then("A constraint violation exception is thrown")
val body = invalidAppJson.getBytes("UTF-8")
intercept[RuntimeException] { appsResource.create(body, false, auth.request) }
}
test("Replace an existing application") {
Given("An app and group")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body = """{ "cmd": "bla" }""".getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.app(PathId("/app")) returns Future.successful(Some(app))
When("The application is updated")
val response = appsResource.replace(app.id.toString, body, false, auth.request)
Then("The application is updated")
response.getStatus should be(200)
}
test("Replace an existing application using ports instead of portDefinitions") {
Given("An app and group")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
groupManager.app(PathId("/app")) returns Future.successful(Some(app))
val appJson = Json.toJson(app).as[JsObject]
val appJsonWithOnlyPorts = appJson - "uris" - "portDefinitions" - "version" +
("ports" -> Json.parse("""[1000, 1001]"""))
val body = Json.stringify(appJsonWithOnlyPorts).getBytes("UTF-8")
When("The application is updated")
val response = appsResource.replace(app.id.toString, body, false, auth.request)
Then("The application is updated")
response.getStatus should be(200)
}
test("Replace an existing application fails due to docker container validation") {
Given("An app update with an invalid container (missing docker field)")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body =
"""{
| "cmd": "sleep 1",
| "container": {
| "type": "DOCKER"
| }
|}""".stripMargin.getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
When("The application is updated")
val response = appsResource.replace(app.id.toString, body, force = false, auth.request)
Then("The return code indicates a validation error for container.docker")
response.getStatus should be(422)
response.getEntity.toString should include("/container/docker")
response.getEntity.toString should include("must not be empty")
}
test("Creating an app with broken volume definition fails with readable error message") {
Given("An app update with an invalid volume (wrong field name)")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body =
"""
|{
| "id": "resident1",
| "cmd": "sleep 100",
| "instances": 0,
| "container": {
| "type": "MESOS",
| "volumes": [{
| "containerPath": "/var",
| "persistent_WRONG_FIELD_NAME": {
| "size": 10
| },
| "mode": "RW"
| }]
| }
|}
""".stripMargin.getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
When("The request is processed")
val response = appsResource.create(body, false, auth.request)
Then("The return code indicates that the hostPath of volumes[0] is missing") // although the wrong field should fail
response.getStatus should be(422)
response.getEntity.toString should include("/container/volumes(0)/hostPath")
response.getEntity.toString should include("must not be empty")
}
test("Replace an existing application fails due to mesos container validation") {
Given("An app update with an invalid container (missing docker field)")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
val body =
"""{
| "cmd": "sleep 1",
| "container": {
| "type": "MESOS",
| "docker": {
| "image": "/test:latest"
| }
| }
|}""".stripMargin.getBytes("UTF-8")
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
When("The application is updated")
val response = appsResource.replace(app.id.toString, body, force = false, auth.request)
Then("The return code indicates a validation error for container.docker")
response.getStatus should be(422)
response.getEntity.toString should include("/container/docker")
response.getEntity.toString should include("must be empty")
}
test("Restart an existing app") {
val app = AppDefinition(id = PathId("/app"))
val group = Group(PathId("/"), Set(app))
val plan = DeploymentPlan(group, group)
service.deploy(any, any) returns Future.successful(())
groupManager.app(PathId("/app")) returns Future.successful(Some(app))
groupManager.updateApp(any, any, any, any, any) returns Future.successful(plan)
val response = appsResource.restart(app.id.toString, force = true, auth.request)
response.getStatus should be(200)
}
test("Restart a non existing app will fail") {
val missing = PathId("/app")
groupManager.app(PathId("/app")) returns Future.successful(None)
groupManager.updateApp(any, any, any, any, any) returns Future.failed(new UnknownAppException(missing))
intercept[UnknownAppException] { appsResource.restart(missing.toString, force = true, auth.request) }
}
test("Index has counts and deployments by default (regression for #2171)") {
Given("An app and group")
val app = AppDefinition(id = PathId("/app"), cmd = Some("foo"))
val expectedEmbeds: Set[Embed] = Set(Embed.Counts, Embed.Deployments)
val appInfo = AppInfo(app, maybeDeployments = Some(Seq(Identifiable("deployment-123"))), maybeCounts = Some(TaskCounts(1, 2, 3, 4)))
appInfoService.selectAppsBy(any, eq(expectedEmbeds)) returns Future.successful(Seq(appInfo))
When("The the index is fetched without any filters")
val response = appsResource.index(null, null, null, new java.util.HashSet(), auth.request)
Then("The response holds counts and deployments")
val appJson = Json.parse(response.getEntity.asInstanceOf[String])
(appJson \\ "apps" \\\\ "deployments" head) should be (Json.arr(Json.obj("id" -> "deployment-123")))
(appJson \\ "apps" \\\\ "tasksStaged" head) should be (JsNumber(1))
}
test("Search apps can be filtered") {
val app1 = AppDefinition(id = PathId("/app/service-a"), cmd = Some("party hard"), labels = Map("a" -> "1", "b" -> "2"))
val app2 = AppDefinition(id = PathId("/app/service-b"), cmd = Some("work hard"), labels = Map("a" -> "1", "b" -> "3"))
val apps = Set(app1, app2)
def search(cmd: Option[String], id: Option[String], label: Option[String]): Set[AppDefinition] = {
val selector = appsResource.search(cmd, id, label)
apps.filter(selector.matches)
}
search(cmd = None, id = None, label = None) should be(Set(app1, app2))
search(cmd = Some(""), id = None, label = None) should be(Set(app1, app2))
search(cmd = Some("party"), id = None, label = None) should be(Set(app1))
search(cmd = Some("work"), id = None, label = None) should be(Set(app2))
search(cmd = Some("hard"), id = None, label = None) should be(Set(app1, app2))
search(cmd = Some("none"), id = None, label = None) should be(Set.empty)
search(cmd = None, id = Some("app"), label = None) should be(Set(app1, app2))
search(cmd = None, id = Some("service-a"), label = None) should be(Set(app1))
search(cmd = Some("party"), id = Some("app"), label = None) should be(Set(app1))
search(cmd = Some("work"), id = Some("app"), label = None) should be(Set(app2))
search(cmd = Some("hard"), id = Some("service-a"), label = None) should be(Set(app1))
search(cmd = Some(""), id = Some(""), label = None) should be(Set(app1, app2))
search(cmd = None, id = None, label = Some("b==2")) should be(Set(app1))
search(cmd = Some("party"), id = Some("app"), label = Some("a==1")) should be(Set(app1))
search(cmd = Some("work"), id = Some("app"), label = Some("a==1")) should be(Set(app2))
search(cmd = Some("hard"), id = Some("service-a"), label = Some("a==1")) should be(Set(app1))
search(cmd = Some(""), id = Some(""), label = Some("")) should be(Set(app1, app2))
}
test("access without authentication is denied") {
Given("An unauthenticated request")
auth.authenticated = false
val req = auth.request
val embed = new util.HashSet[String]()
val app = """{"id":"/a/b/c","cmd":"foo","ports":[]}"""
groupManager.rootGroup() returns Future.successful(Group.empty)
When("we try to fetch the list of apps")
val index = appsResource.index("", "", "", embed, req)
Then("we receive a NotAuthenticated response")
index.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to add an app")
val create = appsResource.create(app.getBytes("UTF-8"), false, req)
Then("we receive a NotAuthenticated response")
create.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to fetch an app")
val show = appsResource.show("", embed, req)
Then("we receive a NotAuthenticated response")
show.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to update an app")
val replace = appsResource.replace("", app.getBytes("UTF-8"), false, req)
Then("we receive a NotAuthenticated response")
replace.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to update multiple apps")
val replaceMultiple = appsResource.replaceMultiple(false, s"[$app]".getBytes("UTF-8"), req)
Then("we receive a NotAuthenticated response")
replaceMultiple.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to delete an app")
val delete = appsResource.delete(false, "", req)
Then("we receive a NotAuthenticated response")
delete.getStatus should be(auth.NotAuthenticatedStatus)
When("we try to restart an app")
val restart = appsResource.restart("", false, req)
Then("we receive a NotAuthenticated response")
restart.getStatus should be(auth.NotAuthenticatedStatus)
}
test("access without authorization is denied") {
Given("A real Group Manager with one app")
useRealGroupManager()
val group = Group(PathId.empty, apps = Set(AppDefinition("/a".toRootPath)))
groupRepository.group(GroupRepository.zkRootName) returns Future.successful(Some(group))
groupRepository.rootGroup returns Future.successful(Some(group))
Given("An unauthorized request")
auth.authenticated = true
auth.authorized = false
val req = auth.request
val embed = new util.HashSet[String]()
val app = """{"id":"/a","cmd":"foo","ports":[]}"""
config.zkTimeoutDuration returns 5.seconds
When("we try to create an app")
val create = appsResource.create(app.getBytes("UTF-8"), false, req)
Then("we receive a NotAuthorized response")
create.getStatus should be(auth.UnauthorizedStatus)
When("we try to fetch an app")
val show = appsResource.show("*", embed, req)
Then("we receive a NotAuthorized response")
show.getStatus should be(auth.UnauthorizedStatus)
When("we try to update an app")
val replace = appsResource.replace("/a", app.getBytes("UTF-8"), false, req)
Then("we receive a NotAuthorized response")
replace.getStatus should be(auth.UnauthorizedStatus)
When("we try to update multiple apps")
val replaceMultiple = appsResource.replaceMultiple(false, s"[$app]".getBytes("UTF-8"), req)
Then("we receive a NotAuthorized response")
replaceMultiple.getStatus should be(auth.UnauthorizedStatus)
When("we try to remove an app")
val delete = appsResource.delete(false, "/a", req)
Then("we receive a NotAuthorized response")
delete.getStatus should be(auth.UnauthorizedStatus)
When("we try to restart an app")
val restart = appsResource.restart("/a", false, req)
Then("we receive a NotAuthorized response")
restart.getStatus should be(auth.UnauthorizedStatus)
}
test("access with limited authorization gives a filtered apps listing") {
Given("An authorized identity with limited ACL's")
auth.authFn = (resource: Any) => {
val id = resource match {
case app: AppDefinition => app.id.toString
case _ => resource.asInstanceOf[Group].id.toString
}
id.startsWith("/visible")
}
implicit val identity = auth.identity
val selector = appsResource.selectAuthorized(AppSelector.forall(Seq.empty))
val apps = Seq(
AppDefinition("/visible/app".toPath),
AppDefinition("/visible/other/foo/app".toPath),
AppDefinition("/secure/app".toPath),
AppDefinition("/root".toPath),
AppDefinition("/other/great/app".toPath)
)
When("The selector selects applications")
val filtered = apps.filter(selector.matches)
Then("The list of filtered apps only contains apps according to ACL's")
filtered should have size 2
filtered.head should be (AppDefinition("/visible/app".toPath))
filtered(1) should be (AppDefinition("/visible/other/foo/app".toPath))
}
test("delete with authorization gives a 404 if the app doesn't exist") {
Given("An authenticated identity with full access")
auth.authenticated = true
auth.authorized = false
val req = auth.request
When("We try to remove a non-existing application")
useRealGroupManager()
groupRepository.group(GroupRepository.zkRootName) returns Future.successful(Some(Group.empty))
groupRepository.rootGroup returns Future.successful(Some(Group.empty))
Then("A 404 is returned")
intercept[UnknownAppException] { appsResource.delete(false, "/foo", req) }
}
var clock: ConstantClock = _
var eventBus: EventStream = _
var service: MarathonSchedulerService = _
var taskTracker: TaskTracker = _
var taskKiller: TaskKiller = _
var healthCheckManager: HealthCheckManager = _
var taskFailureRepo: TaskFailureRepository = _
var config: MarathonConf = _
var groupManager: GroupManager = _
var appInfoService: AppInfoService = _
var appsResource: AppsResource = _
var auth: TestAuthFixture = _
var appRepository: AppRepository = _
var appTaskResource: AppTasksResource = _
var groupRepository: GroupRepository = _
before {
clock = ConstantClock()
auth = new TestAuthFixture
eventBus = mock[EventStream]
service = mock[MarathonSchedulerService]
taskTracker = mock[TaskTracker]
taskKiller = mock[TaskKiller]
healthCheckManager = mock[HealthCheckManager]
taskFailureRepo = mock[TaskFailureRepository]
config = mock[MarathonConf]
appInfoService = mock[AppInfoService]
groupManager = mock[GroupManager]
appRepository = mock[AppRepository]
appTaskResource = mock[AppTasksResource]
appsResource = new AppsResource(
clock,
eventBus,
appTaskResource,
service,
appInfoService,
config,
auth.auth,
auth.auth,
groupManager
)
}
private[this] def useRealGroupManager(): Unit = {
val f = new TestGroupManagerFixture()
service = f.service
config = f.config
appRepository = f.appRepository
groupManager = f.groupManager
groupRepository = f.groupRepository
appsResource = new AppsResource(
clock,
eventBus,
appTaskResource,
service,
appInfoService,
config,
auth.auth,
auth.auth,
groupManager
)
}
}
| pgkelley4/marathon | src/test/scala/mesosphere/marathon/api/v2/AppsResourceTest.scala | Scala | apache-2.0 | 21,112 |
package mllib.perf
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.apache.spark.SparkContext
import org.apache.spark.ml.PredictionModel
import org.apache.spark.ml.classification.{GBTClassificationModel, GBTClassifier, RandomForestClassificationModel, RandomForestClassifier, LogisticRegression}
import org.apache.spark.ml.regression.{GBTRegressionModel, GBTRegressor, RandomForestRegressionModel, RandomForestRegressor, LinearRegression}
import org.apache.spark.mllib.classification._
import org.apache.spark.mllib.clustering.{KMeans, KMeansModel}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.mllib.regression._
import org.apache.spark.mllib.tree.{GradientBoostedTrees, RandomForest}
import org.apache.spark.mllib.tree.configuration.{Algo, BoostingStrategy, QuantileStrategy, Strategy}
import org.apache.spark.mllib.tree.impurity.Variance
import org.apache.spark.mllib.tree.loss.{LogLoss, SquaredError}
import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel, RandomForestModel}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.SQLContext
import mllib.perf.util.{DataGenerator, DataLoader}
/** Parent class for tests which run on a large dataset. */
abstract class RegressionAndClassificationTests[M](sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[LabeledPoint]): M
def validate(model: M, rdd: RDD[LabeledPoint]): Double
val NUM_EXAMPLES = ("num-examples", "number of examples for regression tests")
val NUM_FEATURES = ("num-features", "number of features of each example for regression tests")
intOptions = intOptions ++ Seq(NUM_FEATURES)
longOptions = Seq(NUM_EXAMPLES)
var rdd: RDD[LabeledPoint] = _
var testRdd: RDD[LabeledPoint] = _
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
/**
* For classification
* @param predictions RDD over (prediction, truth) for each instance
* @return Percent correctly classified
*/
def calculateAccuracy(predictions: RDD[(Double, Double)], numExamples: Long): Double = {
predictions.map{case (pred, label) =>
if (pred == label) 1.0 else 0.0
}.sum() * 100.0 / numExamples
}
/**
* For regression
* @param predictions RDD over (prediction, truth) for each instance
* @return Root mean squared error (RMSE)
*/
def calculateRMSE(predictions: RDD[(Double, Double)], numExamples: Long): Double = {
val error = predictions.map{ case (pred, label) =>
(pred - label) * (pred - label)
}.sum()
math.sqrt(error / numExamples)
}
}
/** Parent class for Generalized Linear Model (GLM) tests */
abstract class GLMTests(sc: SparkContext)
extends RegressionAndClassificationTests[GeneralizedLinearModel](sc) {
val STEP_SIZE = ("step-size", "step size for SGD")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
val REG_TYPE = ("reg-type", "type of regularization: none, l1, l2, elastic-net")
val ELASTIC_NET_PARAM = ("elastic-net-param", "elastic-net param, 0.0 for L2, and 1.0 for L1")
val REG_PARAM = ("reg-param", "the regularization parameter against overfitting")
val OPTIMIZER = ("optimizer", "optimization algorithm (elastic-net only supports lbfgs): sgd, lbfgs")
intOptions = intOptions ++ Seq(NUM_ITERATIONS)
doubleOptions = doubleOptions ++ Seq(ELASTIC_NET_PARAM, STEP_SIZE, REG_PARAM)
stringOptions = stringOptions ++ Seq(REG_TYPE, OPTIMIZER)
}
class GLMRegressionTest(sc: SparkContext) extends GLMTests(sc) {
val INTERCEPT = ("intercept", "intercept for random data generation")
val EPS = ("epsilon", "scale factor for the noise during data generation")
val LOSS = ("loss", "loss to minimize. Supported: l2 (squared error).")
doubleOptions = doubleOptions ++ Seq(INTERCEPT, EPS)
stringOptions = stringOptions ++ Seq(LOSS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val intercept: Double = doubleOptionValue(INTERCEPT)
val eps: Double = doubleOptionValue(EPS)
val data = DataGenerator.generateLabeledPoints(sc, math.ceil(numExamples * 1.25).toLong,
numFeatures, intercept, eps, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def validate(model: GeneralizedLinearModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateRMSE(predictions, numExamples)
}
override def runTest(rdd: RDD[LabeledPoint]): GeneralizedLinearModel = {
val stepSize = doubleOptionValue(STEP_SIZE)
val loss = stringOptionValue(LOSS)
val regType = stringOptionValue(REG_TYPE)
val regParam = doubleOptionValue(REG_PARAM)
val elasticNetParam = doubleOptionValue(ELASTIC_NET_PARAM)
val numIterations = intOptionValue(NUM_ITERATIONS)
val optimizer = stringOptionValue(OPTIMIZER)
// Linear Regression only supports squared loss for now.
if (!Array("l2").contains(loss)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown loss ($loss). Supported values: l2.")
}
if (Array("sgd").contains(optimizer)) {
if (!Array("none", "l1", "l2").contains(regType)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown regType ($regType) with sgd. Supported values: none, l1, l2.")
}
} else if (Array("lbfgs").contains(optimizer)) {
if (!Array("elastic-net").contains(regType)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown regType ($regType) with lbfgs. Supported values: elastic-net.")
}
} else {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown optimizer ($optimizer). Supported values: sgd, lbfgs.")
}
(loss, regType) match {
case ("l2", "none") =>
val lr = new LinearRegressionWithSGD().setIntercept(addIntercept = true)
lr.optimizer.setNumIterations(numIterations).setStepSize(stepSize)
lr.run(rdd)
case ("l2", "l1") =>
val lasso = new LassoWithSGD().setIntercept(addIntercept = true)
lasso.optimizer.setNumIterations(numIterations).setStepSize(stepSize).setRegParam(regParam)
lasso.run(rdd)
case ("l2", "l2") =>
val rr = new RidgeRegressionWithSGD().setIntercept(addIntercept = true)
rr.optimizer.setNumIterations(numIterations).setStepSize(stepSize).setRegParam(regParam)
rr.run(rdd)
case ("l2", "elastic-net") =>
println("WARNING: Linear Regression with elastic-net in ML package uses LBFGS/OWLQN for optimization" +
" which ignores stepSize and uses numIterations for maxIter in Spark 1.5.")
val rr = new LinearRegression().setElasticNetParam(elasticNetParam).setRegParam(regParam).setMaxIter(numIterations)
val sqlContext = new SQLContext(rdd.context)
import sqlContext.implicits._
val mlModel = rr.fit(rdd.toDF())
new LinearRegressionModel(mlModel.weights, mlModel.intercept)
case _ =>
throw new IllegalArgumentException(
s"GLMRegressionTest given incompatible (loss, regType) = ($loss, $regType)." +
s" Note the set of supported combinations increases in later Spark versions.")
}
}
}
class GLMClassificationTest(sc: SparkContext) extends GLMTests(sc) {
val THRESHOLD = ("per-negative", "probability for a negative label during data generation")
val SCALE = ("scale-factor", "scale factor for the noise during data generation")
val LOSS = ("loss", "loss to minimize. Supported: logistic, hinge (SVM).")
doubleOptions = doubleOptions ++ Seq(THRESHOLD, SCALE)
stringOptions = stringOptions ++ Seq(LOSS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
override def validate(model: GeneralizedLinearModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateAccuracy(predictions, numExamples)
}
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val threshold: Double = doubleOptionValue(THRESHOLD)
val sf: Double = doubleOptionValue(SCALE)
val data = DataGenerator.generateClassificationLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, sf, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def runTest(rdd: RDD[LabeledPoint]): GeneralizedLinearModel = {
val stepSize = doubleOptionValue(STEP_SIZE)
val loss = stringOptionValue(LOSS)
val regType = stringOptionValue(REG_TYPE)
val regParam = doubleOptionValue(REG_PARAM)
val elasticNetParam = doubleOptionValue(ELASTIC_NET_PARAM)
val numIterations = intOptionValue(NUM_ITERATIONS)
val optimizer = stringOptionValue(OPTIMIZER)
// For classification problem in GLM, we currently support logistic loss and hinge loss.
if (!Array("logistic", "hinge").contains(loss)) {
throw new IllegalArgumentException(
s"GLMClassificationTest run with unknown loss ($loss). Supported values: logistic, hinge.")
}
if (Array("sgd").contains(optimizer)) {
if (!Array("none", "l1", "l2").contains(regType)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown regType ($regType) with sgd. Supported values: none, l1, l2.")
}
} else if (Array("lbfgs").contains(optimizer)) {
if (!Array("logistic").contains(loss)) {
throw new IllegalArgumentException(
s"GLMRegressionTest with lbfgs only supports logistic loss.")
}
if (!Array("none", "elastic-net").contains(regType)) {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown regType ($regType) with lbfgs. Supported values: none, elastic-net.")
}
} else {
throw new IllegalArgumentException(
s"GLMRegressionTest run with unknown optimizer ($optimizer). Supported values: sgd, lbfgs.")
}
(loss, regType, optimizer) match {
case ("logistic", "none", "sgd") =>
LogisticRegressionWithSGD.train(rdd, numIterations, stepSize)
case ("logistic", "none", "lbfgs") =>
println("WARNING: LogisticRegressionWithLBFGS ignores numIterations, stepSize" +
" in this Spark version.")
new LogisticRegressionWithLBFGS().run(rdd)
case ("logistic", "elastic-net", _) =>
println("WARNING: Logistic Regression with elastic-net in ML package uses LBFGS/OWLQN for optimization" +
" which ignores stepSize and uses numIterations for maxIter in Spark 1.5.")
val lor = new LogisticRegression().setElasticNetParam(elasticNetParam).setRegParam(regParam).setMaxIter(numIterations)
val sqlContext = new SQLContext(rdd.context)
import sqlContext.implicits._
val mlModel = lor.fit(rdd.toDF())
new LogisticRegressionModel(mlModel.weights, mlModel.intercept)
case ("hinge", "l2", "sgd") =>
SVMWithSGD.train(rdd, numIterations, stepSize, regParam)
case _ =>
throw new IllegalArgumentException(
s"GLMClassificationTest given incompatible (loss, regType) = ($loss, $regType)." +
s" Note the set of supported combinations increases in later Spark versions.")
}
}
}
abstract class RecommendationTests(sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[Rating]): MatrixFactorizationModel
val NUM_USERS = ("num-users", "number of users for recommendation tests")
val NUM_PRODUCTS = ("num-products", "number of features of each example for recommendation tests")
val NUM_RATINGS = ("num-ratings", "number of ratings for recommendation tests")
val RANK = ("rank", "rank of factorized matrices for recommendation tests")
val IMPLICIT = ("implicit-prefs", "use implicit ratings")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
val REG_PARAM = ("reg-param", "the regularization parameter against overfitting")
intOptions = intOptions ++ Seq(NUM_USERS, NUM_PRODUCTS, RANK, NUM_ITERATIONS)
longOptions = longOptions ++ Seq(NUM_RATINGS)
booleanOptions = booleanOptions ++ Seq(IMPLICIT)
doubleOptions = doubleOptions ++ Seq(REG_PARAM)
val options = intOptions ++ stringOptions ++ booleanOptions ++ longOptions ++ doubleOptions
addOptionsToParser()
var rdd: RDD[Rating] = _
var testRdd: RDD[Rating] = _
override def createInputData(seed: Long) = {
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val numUsers: Int = intOptionValue(NUM_USERS)
val numProducts: Int = intOptionValue(NUM_PRODUCTS)
val numRatings: Long = longOptionValue(NUM_RATINGS)
val implicitRatings: Boolean = booleanOptionValue(IMPLICIT)
val data = DataGenerator.generateRatings(sc, numUsers, numProducts,
math.ceil(numRatings * 1.25).toLong, implicitRatings,numPartitions,seed)
rdd = data._1.cache()
testRdd = data._2
// Materialize rdd
println("Num Examples: " + rdd.count())
}
def validate(model: MatrixFactorizationModel,
data: RDD[Rating]): Double = {
val implicitPrefs: Boolean = booleanOptionValue(IMPLICIT)
val predictions: RDD[Rating] = model.predict(data.map(x => (x.user, x.product)))
val predictionsAndRatings: RDD[(Double, Double)] = predictions.map{ x =>
def mapPredictedRating(r: Double) = if (implicitPrefs) math.max(math.min(r, 1.0), 0.0) else r
((x.user, x.product), mapPredictedRating(x.rating))
}.join(data.map(x => ((x.user, x.product), x.rating))).values
math.sqrt(predictionsAndRatings.map(x => (x._1 - x._2) * (x._1 - x._2)).mean())
}
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
}
abstract class ClusteringTests(sc: SparkContext) extends PerfTest {
def runTest(rdd: RDD[Vector]): KMeansModel
val NUM_POINTS = ("num-points", "number of points for clustering tests")
val NUM_COLUMNS = ("num-columns", "number of columns for each point for clustering tests")
val NUM_CENTERS = ("num-centers", "number of centers for clustering tests")
val NUM_ITERATIONS = ("num-iterations", "number of iterations for the algorithm")
intOptions = intOptions ++ Seq(NUM_CENTERS, NUM_COLUMNS, NUM_ITERATIONS)
longOptions = longOptions ++ Seq(NUM_POINTS)
val options = intOptions ++ stringOptions ++ booleanOptions ++ longOptions ++ doubleOptions
addOptionsToParser()
var rdd: RDD[Vector] = _
var testRdd: RDD[Vector] = _
def validate(model: KMeansModel, rdd: RDD[Vector]): Double = {
val numPoints = rdd.cache().count()
val error = model.computeCost(rdd)
math.sqrt(error/numPoints)
}
override def createInputData(seed: Long) = {
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val numPoints: Long = longOptionValue(NUM_POINTS)
val numColumns: Int = intOptionValue(NUM_COLUMNS)
val numCenters: Int = intOptionValue(NUM_CENTERS)
val data = DataGenerator.generateKMeansVectors(sc, math.ceil(numPoints*1.25).toLong, numColumns,
numCenters, numPartitions, seed)
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def run(): JValue = {
var start = System.currentTimeMillis()
val model = runTest(rdd)
val trainingTime = (System.currentTimeMillis() - start).toDouble / 1000.0
start = System.currentTimeMillis()
val trainingMetric = validate(model, rdd)
val testTime = (System.currentTimeMillis() - start).toDouble / 1000.0
val testMetric = validate(model, testRdd)
Map("trainingTime" -> trainingTime, "testTime" -> testTime,
"trainingMetric" -> trainingMetric, "testMetric" -> testMetric)
}
}
// Classification Algorithms
class NaiveBayesTest(sc: SparkContext)
extends RegressionAndClassificationTests[NaiveBayesModel](sc) {
val THRESHOLD = ("per-negative", "probability for a negative label during data generation")
val SCALE = ("scale-factor", "scale factor for the noise during data generation")
val SMOOTHING = ("nb-lambda", "the smoothing parameter lambda for Naive Bayes")
val MODEL_TYPE = ("model-type", "either multinomial (default) or bernoulli")
doubleOptions = doubleOptions ++ Seq(THRESHOLD, SCALE, SMOOTHING)
stringOptions = stringOptions ++ Seq(MODEL_TYPE)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
/** Note: using same data generation as for GLMClassificationTest, but should change later */
override def createInputData(seed: Long) = {
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val threshold: Double = doubleOptionValue(THRESHOLD)
val sf: Double = doubleOptionValue(SCALE)
val modelType = stringOptionValue(MODEL_TYPE)
val data = if (modelType == "bernoulli") {
DataGenerator.generateBinaryLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, numPartitions, seed)
} else {
val negdata = DataGenerator.generateClassificationLabeledPoints(sc,
math.ceil(numExamples * 1.25).toLong, numFeatures, threshold, sf, numPartitions, seed)
val dataNonneg = negdata.map { lp =>
LabeledPoint(lp.label, Vectors.dense(lp.features.toArray.map(math.abs)))
}
dataNonneg
}
val split = data.randomSplit(Array(0.8, 0.2), seed)
rdd = split(0).cache()
testRdd = split(1)
// Materialize rdd
println("Num Examples: " + rdd.count())
}
override def validate(model: NaiveBayesModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = rdd.map { example =>
(model.predict(example.features), example.label)
}
calculateAccuracy(predictions, numExamples)
}
override def runTest(rdd: RDD[LabeledPoint]): NaiveBayesModel = {
val lambda = doubleOptionValue(SMOOTHING)
val modelType = stringOptionValue(MODEL_TYPE)
NaiveBayes.train(rdd, lambda, modelType)
}
}
// Recommendation
class ALSTest(sc: SparkContext) extends RecommendationTests(sc) {
override def runTest(rdd: RDD[Rating]): MatrixFactorizationModel = {
val numIterations: Int = intOptionValue(NUM_ITERATIONS)
val rank: Int = intOptionValue(RANK)
val regParam = doubleOptionValue(REG_PARAM)
val seed = intOptionValue(RANDOM_SEED) + 12
new ALS().setIterations(numIterations).setRank(rank).setSeed(seed).setLambda(regParam)
.setBlocks(rdd.partitions.size).run(rdd)
}
}
// Clustering
// TODO: refactor into mllib.perf.clustering like the other clustering tests
class KMeansTest(sc: SparkContext) extends ClusteringTests(sc) {
override def runTest(rdd: RDD[Vector]): KMeansModel = {
val numIterations: Int = intOptionValue(NUM_ITERATIONS)
val k: Int = intOptionValue(NUM_CENTERS)
KMeans.train(rdd, k, numIterations)
}
}
// Decision-tree
sealed trait TreeBasedModel
case class MLlibRFModel(model: RandomForestModel) extends TreeBasedModel
case class MLlibGBTModel(model: GradientBoostedTreesModel) extends TreeBasedModel
case class MLRFRegressionModel(model: RandomForestRegressionModel) extends TreeBasedModel
case class MLRFClassificationModel(model: RandomForestClassificationModel) extends TreeBasedModel
case class MLGBTRegressionModel(model: GBTRegressionModel) extends TreeBasedModel
case class MLGBTClassificationModel(model: GBTClassificationModel) extends TreeBasedModel
/**
* Parent class for DecisionTree-based tests which run on a large dataset.
*/
abstract class DecisionTreeTests(sc: SparkContext)
extends RegressionAndClassificationTests[TreeBasedModel](sc) {
val TEST_DATA_FRACTION =
("test-data-fraction", "fraction of data to hold out for testing (ignored if given training and test dataset)")
val LABEL_TYPE =
("label-type", "Type of label: 0 indicates regression, 2+ indicates " +
"classification with this many classes")
val FRAC_CATEGORICAL_FEATURES = ("frac-categorical-features",
"Fraction of features which are categorical")
val FRAC_BINARY_FEATURES =
("frac-binary-features", "Fraction of categorical features which are binary. " +
"Others have 20 categories.")
val TREE_DEPTH = ("tree-depth", "Depth of true decision tree model used to label examples.")
val MAX_BINS = ("max-bins", "Maximum number of bins for the decision tree learning algorithm.")
val NUM_TREES = ("num-trees", "Number of trees to train. If 1, run DecisionTree. If >1, run an ensemble method (RandomForest).")
val FEATURE_SUBSET_STRATEGY =
("feature-subset-strategy", "Strategy for feature subset sampling. Supported: auto, all, sqrt, log2, onethird.")
intOptions = intOptions ++ Seq(LABEL_TYPE, TREE_DEPTH, MAX_BINS, NUM_TREES)
doubleOptions = doubleOptions ++ Seq(TEST_DATA_FRACTION, FRAC_CATEGORICAL_FEATURES, FRAC_BINARY_FEATURES)
stringOptions = stringOptions ++ Seq(FEATURE_SUBSET_STRATEGY)
addOptionalOptionToParser("training-data", "path to training dataset (if not given, use random data)", "", classOf[String])
addOptionalOptionToParser("test-data", "path to test dataset (only used if training dataset given)" +
" (if not given, hold out part of training data for validation)", "", classOf[String])
var categoricalFeaturesInfo: Map[Int, Int] = Map.empty
protected var labelType = -1
def validate(model: TreeBasedModel, rdd: RDD[LabeledPoint]): Double = {
val numExamples = rdd.count()
val predictions: RDD[(Double, Double)] = model match {
case MLlibRFModel(rfModel) => rfModel.predict(rdd.map(_.features)).zip(rdd.map(_.label))
case MLlibGBTModel(gbtModel) => gbtModel.predict(rdd.map(_.features)).zip(rdd.map(_.label))
case MLRFRegressionModel(rfModel) => makePredictions(rfModel, rdd)
case MLRFClassificationModel(rfModel) => makePredictions(rfModel, rdd)
case MLGBTRegressionModel(gbtModel) => makePredictions(gbtModel, rdd)
case MLGBTClassificationModel(gbtModel) => makePredictions(gbtModel, rdd)
}
val labelType: Int = intOptionValue(LABEL_TYPE)
if (labelType == 0) {
calculateRMSE(predictions, numExamples)
} else {
calculateAccuracy(predictions, numExamples)
}
}
// TODO: generate DataFrame outside of `runTest` so it is not included in timing results
private def makePredictions(
model: PredictionModel[Vector, _], rdd: RDD[LabeledPoint]): RDD[(Double, Double)] = {
val labelType: Int = intOptionValue(LABEL_TYPE)
val dataFrame = DataGenerator.setMetadata(rdd, categoricalFeaturesInfo, labelType)
val results = model.transform(dataFrame)
results
.select(model.getPredictionCol, model.getLabelCol)
.map { case Row(prediction: Double, label: Double) => (prediction, label) }
}
}
class DecisionTreeTest(sc: SparkContext) extends DecisionTreeTests(sc) {
val supportedTreeTypes = Array("RandomForest", "GradientBoostedTrees",
"ml.RandomForest", "ml.GradientBoostedTrees")
val ENSEMBLE_TYPE = ("ensemble-type", "Type of ensemble algorithm: " + supportedTreeTypes.mkString(" "))
stringOptions = stringOptions ++ Seq(ENSEMBLE_TYPE)
val options = intOptions ++ stringOptions ++ booleanOptions ++ doubleOptions ++ longOptions
addOptionsToParser()
private def getTestDataFraction: Double = {
val testDataFraction: Double = doubleOptionValue(TEST_DATA_FRACTION)
assert(testDataFraction >= 0 && testDataFraction <= 1, s"Bad testDataFraction: $testDataFraction")
testDataFraction
}
override def createInputData(seed: Long) = {
val trainingDataPath: String = optionValue[String]("training-data")
val (rdds, categoricalFeaturesInfo_, numClasses) = if (trainingDataPath != "") {
println(s"LOADING FILE: $trainingDataPath")
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val testDataPath: String = optionValue[String]("test-data")
val testDataFraction: Double = getTestDataFraction
DataLoader.loadLibSVMFiles(sc, numPartitions, trainingDataPath, testDataPath,
testDataFraction, seed)
} else {
createSyntheticInputData(seed)
}
assert(rdds.length == 2)
rdd = rdds(0).cache()
testRdd = rdds(1)
categoricalFeaturesInfo = categoricalFeaturesInfo_
this.labelType = numClasses
// Materialize rdd
println("Num Examples: " + rdd.count())
}
/**
* Create synthetic training and test datasets.
* @return (trainTestDatasets, categoricalFeaturesInfo, numClasses) where
* trainTestDatasets = Array(trainingData, testData),
* categoricalFeaturesInfo is a map of categorical feature arities, and
* numClasses = number of classes label can take.
*/
private def createSyntheticInputData(
seed: Long): (Array[RDD[LabeledPoint]], Map[Int, Int], Int) = {
// Generic test options
val numPartitions: Int = intOptionValue(NUM_PARTITIONS)
val testDataFraction: Double = getTestDataFraction
// Data dimensions and type
val numExamples: Long = longOptionValue(NUM_EXAMPLES)
val numFeatures: Int = intOptionValue(NUM_FEATURES)
val labelType: Int = intOptionValue(LABEL_TYPE)
val fracCategoricalFeatures: Double = doubleOptionValue(FRAC_CATEGORICAL_FEATURES)
val fracBinaryFeatures: Double = doubleOptionValue(FRAC_BINARY_FEATURES)
// Model specification
val treeDepth: Int = intOptionValue(TREE_DEPTH)
val (rdd_, categoricalFeaturesInfo_) =
DataGenerator.generateDecisionTreeLabeledPoints(sc, math.ceil(numExamples * 1.25).toLong,
numFeatures, numPartitions, labelType,
fracCategoricalFeatures, fracBinaryFeatures, treeDepth, seed)
val splits = rdd_.randomSplit(Array(1.0 - testDataFraction, testDataFraction), seed)
(splits, categoricalFeaturesInfo_, labelType)
}
// TODO: generate DataFrame outside of `runTest` so it is not included in timing results
override def runTest(rdd: RDD[LabeledPoint]): TreeBasedModel = {
val treeDepth: Int = intOptionValue(TREE_DEPTH)
val maxBins: Int = intOptionValue(MAX_BINS)
val numTrees: Int = intOptionValue(NUM_TREES)
val featureSubsetStrategy: String = stringOptionValue(FEATURE_SUBSET_STRATEGY)
val ensembleType: String = stringOptionValue(ENSEMBLE_TYPE)
if (!supportedTreeTypes.contains(ensembleType)) {
throw new IllegalArgumentException(
s"DecisionTreeTest given unknown ensembleType param: $ensembleType." +
" Supported values: " + supportedTreeTypes.mkString(" "))
}
if (labelType == 0) {
// Regression
ensembleType match {
case "RandomForest" =>
MLlibRFModel(RandomForest.trainRegressor(rdd, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, "variance", treeDepth, maxBins, this.getRandomSeed))
case "ml.RandomForest" =>
val labelType: Int = intOptionValue(LABEL_TYPE)
val dataset = DataGenerator.setMetadata(rdd, categoricalFeaturesInfo, labelType)
val model = new RandomForestRegressor()
.setImpurity("variance")
.setMaxDepth(treeDepth)
.setMaxBins(maxBins)
.setNumTrees(numTrees)
.setFeatureSubsetStrategy(featureSubsetStrategy)
.setSeed(this.getRandomSeed)
.fit(dataset)
MLRFRegressionModel(model)
case "GradientBoostedTrees" =>
val treeStrategy = new Strategy(Algo.Regression, Variance, treeDepth,
labelType, maxBins, QuantileStrategy.Sort, categoricalFeaturesInfo)
val boostingStrategy = BoostingStrategy(treeStrategy, SquaredError, numTrees,
learningRate = 0.1)
MLlibGBTModel(GradientBoostedTrees.train(rdd, boostingStrategy))
case "ml.GradientBoostedTrees" =>
val labelType: Int = intOptionValue(LABEL_TYPE)
val dataset = DataGenerator.setMetadata(rdd, categoricalFeaturesInfo, labelType)
val model = new GBTRegressor()
.setLossType("squared")
.setMaxBins(maxBins)
.setMaxDepth(treeDepth)
.setMaxIter(numTrees)
.setStepSize(0.1)
.setSeed(this.getRandomSeed)
.fit(dataset)
MLGBTRegressionModel(model)
}
} else if (labelType >= 2) {
// Classification
ensembleType match {
case "RandomForest" =>
MLlibRFModel(RandomForest.trainClassifier(rdd, labelType, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy, "gini", treeDepth, maxBins, this.getRandomSeed))
case "ml.RandomForest" =>
val labelType: Int = intOptionValue(LABEL_TYPE)
val dataset = DataGenerator.setMetadata(rdd, categoricalFeaturesInfo, labelType)
val model = new RandomForestClassifier()
.setImpurity("gini")
.setMaxDepth(treeDepth)
.setMaxBins(maxBins)
.setNumTrees(numTrees)
.setFeatureSubsetStrategy(featureSubsetStrategy)
.setSeed(this.getRandomSeed)
.fit(dataset)
MLRFClassificationModel(model)
case "GradientBoostedTrees" =>
val treeStrategy = new Strategy(Algo.Classification, Variance, treeDepth,
labelType, maxBins, QuantileStrategy.Sort, categoricalFeaturesInfo)
val boostingStrategy = BoostingStrategy(treeStrategy, LogLoss, numTrees,
learningRate = 0.1)
MLlibGBTModel(GradientBoostedTrees.train(rdd, boostingStrategy))
case "ml.GradientBoostedTrees" =>
val labelType: Int = intOptionValue(LABEL_TYPE)
val dataset = DataGenerator.setMetadata(rdd, categoricalFeaturesInfo, labelType)
val model = new GBTClassifier()
.setLossType("logistic")
.setMaxBins(maxBins)
.setMaxDepth(treeDepth)
.setMaxIter(numTrees)
.setStepSize(0.1)
.setSeed(this.getRandomSeed)
.fit(dataset)
MLGBTClassificationModel(model)
}
} else {
throw new IllegalArgumentException(s"Bad label-type parameter " +
s"given to DecisionTreeTest: $labelType")
}
}
}
| Altiscale/spark-perf | mllib-tests/v1p5/src/main/scala/mllib/perf/MLAlgorithmTests.scala | Scala | apache-2.0 | 32,321 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.stats.assertion
object AssertionPathParts {
implicit def string2PathParts(path: String) = AssertionPathParts(List(path))
}
case class AssertionPathParts(parts: List[String]) {
def /(part: String) = copy(parts = parts :+ part)
}
| wiacekm/gatling | gatling-commons/src/main/scala/io/gatling/commons/stats/assertion/AssertionPathParts.scala | Scala | apache-2.0 | 878 |
package mr.merc.map.view
import org.scalatest.funsuite.AnyFunSuite
import mr.merc.unit.view.SoldierView
import org.mockito.Mockito._
import org.mockito.MockitoSugar
import org.scalatest.BeforeAndAfter
import scalafx.scene.canvas.GraphicsContext
import scalafx.geometry.Rectangle2D
class SoldierDrawerTest extends AnyFunSuite with MockitoSugar with BeforeAndAfter {
val soldier1: SoldierView = mock[SoldierView]
val soldier2: SoldierView = mock[SoldierView]
val soldier3: SoldierView = mock[SoldierView]
val gc: GraphicsContext = mock[GraphicsContext]
def addSoldiers(sd: SoldiersDrawer): Unit = {
sd.addSoldier(soldier1)
sd.addSoldier(soldier2)
sd.addSoldier(soldier3)
}
before {
when(soldier1.viewRect).thenReturn(new Rectangle2D(0, 0, 10, 10))
when(soldier2.viewRect).thenReturn(new Rectangle2D(0, 0, 10, 10))
when(soldier3.viewRect).thenReturn(new Rectangle2D(0, 0, 10, 10))
}
test("simple updating without movements") {
val soldiersDrawer = new SoldiersDrawer
addSoldiers(soldiersDrawer)
soldiersDrawer.update(50)
verify(soldier1, times(1)).updateTime(50)
verify(soldier2, times(1)).updateTime(50)
verify(soldier3, times(1)).updateTime(50)
}
after {
reset(soldier1, soldier2, soldier3, gc)
}
}
| RenualdMarch/merc | src/test/scala/mr/merc/map/view/SoldierDrawerTest.scala | Scala | gpl-3.0 | 1,282 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.fsc
import scala.tools.nsc.CompilerCommand
import scala.reflect.io.Directory
import scala.util.Properties.isWin
/** A compiler command for the offline compiler.
*
* @author Martin Odersky and Lex Spoon
*/
class OfflineCompilerCommand(arguments: List[String], settings: FscSettings) extends CompilerCommand(arguments, settings) {
import settings.currentDir
def extraFscArgs = List(currentDir.name, currentDir.value)
locally {
// if -current-dir is unset, we're on the client and need to obtain it.
if (currentDir.isDefault) {
// Prefer env variable PWD to system property user.dir because the former
// deals better with paths not rooted at / (filesystem mounts.)
// ... except on windows, because under cygwin PWD involves "/cygdrive"
// instead of whatever it's supposed to be doing.
val baseDirectory = {
val pwd = System.getenv("PWD")
if (pwd == null || isWin) Directory.Current getOrElse Directory("/")
else Directory(pwd)
}
currentDir.value = baseDirectory.path
}
else {
// Otherwise we're on the server and will use it to absolutize the paths.
settings.absolutize()
}
}
override def cmdName = "fsc"
override def usageMsg = (
createUsageMsg("where possible fsc", explain = false)(x => x.isStandard && settings.isFscSpecific(x.name)) +
"\\n\\nStandard scalac options also available:" +
optionsMessage(x => x.isStandard && !settings.isFscSpecific(x.name))
)
}
| scala/scala | src/compiler/scala/tools/nsc/fsc/OfflineCompilerCommand.scala | Scala | apache-2.0 | 1,806 |
package BootstrapResampling
class Bootstrapper(data: SingleSample, statistic: SingleSample => N,
B_in: Int, seed: Int) {
def this(data: SingleSample, statistic: SingleSample => N, B_in: Int) =
this(data, statistic, B_in, default_seed)
def this(data: SingleSample, statistic: SingleSample => N) =
this(data, statistic, default_B)
// B bootstrap samples must be a whole number
val B = if (B_in < 1) default_B
else B_in
val n = data.length: Int
val r = new RandState(seed)
protected def bootSample(data: SingleSample, r: RandState): Samples = {
def singleSample(data: SingleSample, r: RandState): SingleSample = {
val single_sample = (1 to this.n).map(b => {
val i = r.nextInt(this.n)
data(i)
})
toSample(single_sample)
}
val samples = (1 to this.B).map(b => {
singleSample(data, r)
})
toSample(samples)
}
protected def calcStatistics(samples: Samples,
statistic: SingleSample => N): SingleSample = {
// Use parallelism here for speedier computation on complex statistics
val statistics = samples.par.map(s => {
statistic(s)
})
toSample(statistics)
}
def CI(method: String): Either[String, ConfInt] = method match {
case "bca" => Right(ConfidenceInterval.bca( this.data
, this.t_tilde
, this.t_star
, this.statistic
, this.n
, this.B
))
case "percentile" => Right(ConfidenceInterval.percentile( this.t_tilde
, this.B
))
case _ => Left("Invalid bootstrapping method supplied.")
}
def CI(): Either[String, ConfInt] = CI("bca")
protected def bootBias(t_tilde: SingleSample, t_star: N, B: Int): N = {
t_tilde.foldLeft(0.0)((a, b) => a + b - t_star) / B
}
val samples = bootSample(data, r)
val t_tilde = calcStatistics(samples, statistic)
val t_star = statistic(data)
lazy val bias = bootBias(t_tilde, t_star, B)
}
| rdtaylor/BootstrapResampling | src/main/scala/BootstrapResampling/BootstrapResampling.scala | Scala | mit | 2,376 |
/* Scala.js compiler
* Copyright 2013 LAMP/EPFL
* @author Sébastien Doeraene
*/
package org.scalajs.core.compiler
import scala.tools.nsc._
import scala.tools.nsc.plugins.{
Plugin => NscPlugin, PluginComponent => NscPluginComponent
}
import scala.collection.{ mutable, immutable }
import java.net.{ URI, URISyntaxException }
import org.scalajs.core.ir.Trees
/** Main entry point for the Scala.js compiler plugin
*
* @author Sébastien Doeraene
*/
class ScalaJSPlugin(val global: Global) extends NscPlugin {
import global._
val name = "scalajs"
val description = "Compile to JavaScript"
val components = {
if (global.forScaladoc)
List[NscPluginComponent](PrepInteropComponent)
else
List[NscPluginComponent](PrepInteropComponent, GenCodeComponent)
}
/** Called when the JS ASTs are generated. Override for testing */
def generatedJSAST(clDefs: List[Trees.Tree]): Unit = {}
/** Addons for JavaScript platform */
object jsAddons extends {
val global: ScalaJSPlugin.this.global.type = ScalaJSPlugin.this.global
} with JSGlobalAddons with Compat210Component
object scalaJSOpts extends ScalaJSOptions {
import ScalaJSOptions.URIMap
var fixClassOf: Boolean = false
lazy val sourceURIMaps: List[URIMap] = {
if (_sourceURIMaps.nonEmpty)
_sourceURIMaps.reverse
else
relSourceMap.toList.map(URIMap(_, absSourceMap))
}
var _sourceURIMaps: List[URIMap] = Nil
var relSourceMap: Option[URI] = None
var absSourceMap: Option[URI] = None
}
object PrepInteropComponent extends {
val global: ScalaJSPlugin.this.global.type = ScalaJSPlugin.this.global
val jsAddons: ScalaJSPlugin.this.jsAddons.type = ScalaJSPlugin.this.jsAddons
val scalaJSOpts = ScalaJSPlugin.this.scalaJSOpts
override val runsAfter = List("typer")
override val runsBefore = List("pickle")
} with PrepJSInterop
object GenCodeComponent extends {
val global: ScalaJSPlugin.this.global.type = ScalaJSPlugin.this.global
val jsAddons: ScalaJSPlugin.this.jsAddons.type = ScalaJSPlugin.this.jsAddons
val scalaJSOpts = ScalaJSPlugin.this.scalaJSOpts
override val runsAfter = List("mixin")
override val runsBefore = List("delambdafy", "cleanup", "terminal")
} with GenJSCode {
def generatedJSAST(clDefs: List[Trees.Tree]) =
ScalaJSPlugin.this.generatedJSAST(clDefs)
}
override def processOptions(options: List[String],
error: String => Unit): Unit = {
import ScalaJSOptions.URIMap
import scalaJSOpts._
for (option <- options) {
if (option == "fixClassOf") {
fixClassOf = true
} else if (option.startsWith("mapSourceURI:")) {
val uris = option.stripPrefix("mapSourceURI:").split("->")
if (uris.length != 1 && uris.length != 2) {
error("relocateSourceMap needs one or two URIs as argument.")
} else {
try {
val from = new URI(uris.head)
val to = uris.lift(1).map(str => new URI(str))
_sourceURIMaps ::= URIMap(from, to)
} catch {
case e: URISyntaxException =>
error(s"${e.getInput} is not a valid URI")
}
}
// The following options are deprecated (how do we show this to the user?)
} else if (option.startsWith("relSourceMap:")) {
val uriStr = option.stripPrefix("relSourceMap:")
try { relSourceMap = Some(new URI(uriStr)) }
catch {
case e: URISyntaxException => error(s"$uriStr is not a valid URI")
}
} else if (option.startsWith("absSourceMap:")) {
val uriStr = option.stripPrefix("absSourceMap:")
try { absSourceMap = Some(new URI(uriStr)) }
catch {
case e: URISyntaxException => error(s"$uriStr is not a valid URI")
}
} else {
error("Option not understood: " + option)
}
}
// Verify constraints
if (_sourceURIMaps.nonEmpty && relSourceMap.isDefined)
error("You may not use mapSourceURI and relSourceMap together. " +
"Use another mapSourceURI option without second URI.")
else if (_sourceURIMaps.nonEmpty && absSourceMap.isDefined)
error("You may not use mapSourceURI and absSourceMap together. " +
"Use another mapSourceURI option.")
else if (absSourceMap.isDefined && relSourceMap.isEmpty)
error("absSourceMap requires the use of relSourceMap")
}
override val optionsHelp: Option[String] = Some(s"""
| -P:$name:mapSourceURI:FROM_URI[->TO_URI]
| change the location the source URIs in the emitted IR point to
| - strips away the prefix FROM_URI (if it matches)
| - optionally prefixes the TO_URI, where stripping has been performed
| - any number of occurences are allowed. Processing is done on a first match basis.
| -P:$name:fixClassOf repair calls to Predef.classOf that reach ScalaJS
| WARNING: This is a tremendous hack! Expect ugly errors if you use this option.
|Deprecated options
| -P:$name:relSourceMap:<URI> relativize emitted source maps with <URI>
| -P:$name:absSourceMap:<URI> absolutize emitted source maps with <URI>
| This option requires the use of relSourceMap
""".stripMargin)
}
| matthughes/scala-js | compiler/src/main/scala/org/scalajs/core/compiler/ScalaJSPlugin.scala | Scala | bsd-3-clause | 5,302 |
/*
* Copyright © 2011-2012 Sattvik Software & Technology Resources, Ltd. Co.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.sattvik.baitha.views
import android.widget.RadioGroup
/** Adds a convenience method to be able to enable all of the children of a
* radio group in one go.
*
* @author Daniel Solano Gómez */
class EnhancedRadioGroup(radioGroup: RadioGroup) {
def setChildrenEnabled(enabled: Boolean) {
for (i <- 0 until radioGroup.getChildCount) {
radioGroup.getChildAt(i).setEnabled(enabled)
}
}
}
| sattvik/baitha | src/main/scala/com/sattvik/baitha/views/EnhancedRadioGroup.scala | Scala | apache-2.0 | 1,085 |
package sttp.client3
import sttp.capabilities.Effect
import sttp.client3.monad.{FunctionK, MapEffect}
import sttp.monad.{EitherMonad, MonadError}
import scala.util.control.NonFatal
/** A synchronous backend that safely wraps [[SttpBackend]] exceptions in `Either[Throwable, *]`'s
*
* @param delegate
* A synchronous `SttpBackend` which to which this backend forwards all requests
* @tparam P
* TODO
*/
class EitherBackend[P](delegate: SttpBackend[Identity, P]) extends SttpBackend[Either[Throwable, *], P] {
override def send[T, R >: P with Effect[Either[Throwable, *]]](
request: Request[T, R]
): Either[Throwable, Response[T]] =
doTry(
delegate.send(
MapEffect[Either[Throwable, *], Identity, Identity, T, P](
request: Request[T, P with Effect[Either[Throwable, *]]],
eitherToId,
idToEither,
responseMonad,
delegate.responseMonad
)
)
)
override def close(): Either[Throwable, Unit] = doTry(delegate.close())
private def doTry[T](t: => T): Either[Throwable, T] = {
try Right(t)
catch {
case NonFatal(e) => Left(e)
}
}
override def responseMonad: MonadError[Either[Throwable, *]] = EitherMonad
private val eitherToId: FunctionK[Either[Throwable, *], Identity] =
new FunctionK[Either[Throwable, *], Identity] {
override def apply[A](fa: Either[Throwable, A]): Identity[A] =
fa match {
case Left(e) => throw e
case Right(v) => v
}
}
private val idToEither: FunctionK[Identity, Either[Throwable, *]] =
new FunctionK[Identity, Either[Throwable, *]] {
override def apply[A](fa: Identity[A]): Either[Throwable, A] = Right(fa)
}
}
| softwaremill/sttp | core/src/main/scala/sttp/client3/EitherBackend.scala | Scala | apache-2.0 | 1,739 |
package com.twitter.finagle.loadbalancer.roundrobin
import com.twitter.finagle.stats.{NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.{Address, NoBrokersAvailableException}
import com.twitter.finagle.loadbalancer.EndpointFactory
import com.twitter.util.{Activity, Var}
trait RoundRobinSuite {
// number of servers
val N: Int = 100
// number of reqs
val R: Int = 100000
// tolerated variance
val variance: Double = 0.0001 * R
trait RRServiceFactory extends EndpointFactory[Unit, Int] {
def remake() = {}
def address = Address.Failed(new Exception)
def meanLoad: Double
}
protected val noBrokers: NoBrokersAvailableException = new NoBrokersAvailableException
def newBal(
fs: Var[Vector[RRServiceFactory]],
sr: StatsReceiver = NullStatsReceiver
): RoundRobinBalancer[Unit, Int] = new RoundRobinBalancer(
Activity(fs.map(Activity.Ok(_))),
statsReceiver = sr,
emptyException = noBrokers,
maxEffort = 1
)
def assertEven(fs: Vector[RRServiceFactory]) {
val ml = fs.head.meanLoad
for (f <- fs) {
assert(
math.abs(f.meanLoad - ml) < variance,
"ml=%f; f.ml=%f; ε=%f".format(ml, f.meanLoad, variance)
)
}
}
}
| mkhq/finagle | finagle-core/src/test/scala/com/twitter/finagle/loadbalancer/roundrobin/RoundRobinSuite.scala | Scala | apache-2.0 | 1,222 |
package sky.dispatcher.example.CallingThreadDispatcher
import akka.actor.{Props, ActorSystem}
import akka.routing.RoundRobinRouter
import com.typesafe.config.ConfigFactory
import sky.MsgEchoActor
/**
* Created by szekai on 22/08/2014.
*/
object Example {
def main(args: Array[String]): Unit = {
val _system = ActorSystem.create("callingThread-dispatcher",ConfigFactory.load().getConfig("MyDispatcherExample"))
val actor = _system.actorOf(Props[MsgEchoActor].withDispatcher("CallingThreadDispatcher").withRouter(
RoundRobinRouter(5)))
0 to 25 foreach {
i => actor ! i
}
Thread.sleep(3000)
_system.shutdown()
}
}
| szekai/akka-example | AkkaDispatcherExample/src/main/scala/sky/dispatcher/example/CallingThreadDispatcher/Example.scala | Scala | apache-2.0 | 657 |
/*
* Copyright (c) 2017-2022 Lymia Alusyia <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package moe.lymia.princess.editor.scripting
import moe.lymia.lua._
import moe.lymia.princess.core.gamedata.{I18N, LuaLibrary}
final case class I18NLib(i18n: I18N) extends LuaLibrary {
override def open(L: LuaState, table: LuaTable): Unit = {
L.register(table, "i18n", ScalaLuaClosure { Ls =>
val L = new LuaState(Ls)
val key = L.value(1).as[String]
val args = L.valueRange(2).map(_.as[Any])
L.push(i18n.userLua(key, args : _*))
1
})
}
} | Lymia/PrincessEdit | modules/princess-edit/src/main/scala/moe/lymia/princess/editor/scripting/I18NLib.scala | Scala | mit | 1,626 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.{BufferedWriter, File, FileWriter}
import scala.util.Properties
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.hadoop.fs.Path
import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.Assertions._
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.UI.UI_ENABLED
import org.apache.spark.sql.{QueryTest, Row, SparkSession}
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.hive.test.{HiveTestJars, TestHiveContext}
import org.apache.spark.sql.internal.SQLConf.SHUFFLE_PARTITIONS
import org.apache.spark.sql.internal.StaticSQLConf.WAREHOUSE_PATH
import org.apache.spark.sql.types.{DecimalType, StructType}
import org.apache.spark.tags.{ExtendedHiveTest, SlowHiveTest}
import org.apache.spark.util.{ResetSystemProperties, Utils}
/**
* This suite tests spark-submit with applications using HiveContext.
*/
@SlowHiveTest
@ExtendedHiveTest
class HiveSparkSubmitSuite
extends SparkSubmitTestUtils
with Matchers
with BeforeAndAfterEach
with ResetSystemProperties {
override protected val enableAutoThreadAudit = false
override def beforeEach(): Unit = {
super.beforeEach()
}
test("temporary Hive UDF: define a UDF and use it") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", TemporaryHiveUDFTest.getClass.getName.stripSuffix("$"),
"--name", "TemporaryHiveUDFTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("permanent Hive UDF: define a UDF and use it") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", PermanentHiveUDFTest1.getClass.getName.stripSuffix("$"),
"--name", "PermanentHiveUDFTest1",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("permanent Hive UDF: use a already defined permanent function") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jarsString = Seq(jar1, jar2).map(j => j.toString).mkString(",")
val args = Seq(
"--class", PermanentHiveUDFTest2.getClass.getName.stripSuffix("$"),
"--name", "PermanentHiveUDFTest2",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("SPARK-8368: includes jars passed in through --jars") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val jar1 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassA"))
val jar2 = TestUtils.createJarWithClasses(Seq("SparkSubmitClassB"))
val jar3 = HiveTestJars.getHiveContribJar().getCanonicalPath
val jar4 = HiveTestJars.getHiveHcatalogCoreJar().getCanonicalPath
val jarsString = Seq(jar1, jar2, jar3, jar4).map(j => j.toString).mkString(",")
val args = Seq(
"--class", SparkSubmitClassLoaderTest.getClass.getName.stripSuffix("$"),
"--name", "SparkSubmitClassLoaderTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--jars", jarsString,
unusedJar.toString, "SparkSubmitClassA", "SparkSubmitClassB")
runSparkSubmit(args)
}
test("SPARK-8020: set sql conf in spark conf") {
assume(!SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9))
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SparkSQLConfTest.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", "spark.sql.hive.metastore.version=0.12",
"--conf", "spark.sql.hive.metastore.jars=maven",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-8489: MissingRequirementError during reflection") {
// This test uses a pre-built jar to test SPARK-8489. In a nutshell, this test creates
// a HiveContext and uses it to create a data frame from an RDD using reflection.
// Before the fix in SPARK-8470, this results in a MissingRequirementError because
// the HiveContext code mistakenly overrides the class loader that contains user classes.
// For more detail, see sql/hive/src/test/resources/regression-test-SPARK-8489/*scala.
// TODO: revisit for Scala 2.13 support
val version = Properties.versionNumberString match {
case v if v.startsWith("2.12") => v.substring(0, 4)
case x => throw new Exception(s"Unsupported Scala Version: $x")
}
val jarDir = getTestResourcePath("regression-test-SPARK-8489")
val testJar = s"$jarDir/test-$version.jar"
val args = Seq(
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
"--class", "Main",
testJar)
runSparkSubmit(args)
}
test("SPARK-9757 Persist Parquet relation with decimal column") {
assume(!SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9))
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_9757.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-11009 fix wrong result of Window function in cluster mode") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_11009.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-14244 fix window partition size attribute binding failure") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_14244.getClass.getName.stripSuffix("$"),
"--name", "SparkSQLConfTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("set spark.sql.warehouse.dir") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetWarehouseLocationTest.getClass.getName.stripSuffix("$"),
"--name", "SetSparkWarehouseLocationTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("set hive.metastore.warehouse.dir") {
// In this test, we set hive.metastore.warehouse.dir in hive-site.xml but
// not set spark.sql.warehouse.dir. So, the warehouse dir should be
// the value of hive.metastore.warehouse.dir. Also, the value of
// spark.sql.warehouse.dir should be set to the value of hive.metastore.warehouse.dir.
val hiveWarehouseLocation = Utils.createTempDir()
hiveWarehouseLocation.delete()
val hiveSiteXmlContent =
s"""
|<configuration>
| <property>
| <name>hive.metastore.warehouse.dir</name>
| <value>$hiveWarehouseLocation</value>
| </property>
|</configuration>
""".stripMargin
// Write a hive-site.xml containing a setting of hive.metastore.warehouse.dir.
val hiveSiteDir = Utils.createTempDir()
val file = new File(hiveSiteDir.getCanonicalPath, "hive-site.xml")
val bw = new BufferedWriter(new FileWriter(file))
bw.write(hiveSiteXmlContent)
bw.close()
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetWarehouseLocationTest.getClass.getName.stripSuffix("$"),
"--name", "SetHiveWarehouseLocationTest",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.sql.test.expectedWarehouseDir=$hiveWarehouseLocation",
"--conf", s"spark.driver.extraClassPath=${hiveSiteDir.getCanonicalPath}",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-16901: set javax.jdo.option.ConnectionURL") {
assume(!SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9))
// In this test, we set javax.jdo.option.ConnectionURL and set metastore version to
// 0.13. This test will make sure that javax.jdo.option.ConnectionURL will not be
// overridden by hive's default settings when we create a HiveConf object inside
// HiveClientImpl. Please see SPARK-16901 for more details.
val metastoreLocation = Utils.createTempDir()
metastoreLocation.delete()
val metastoreURL =
s"jdbc:derby:memory:;databaseName=${metastoreLocation.getAbsolutePath};create=true"
val hiveSiteXmlContent =
s"""
|<configuration>
| <property>
| <name>javax.jdo.option.ConnectionURL</name>
| <value>$metastoreURL</value>
| </property>
|</configuration>
""".stripMargin
// Write a hive-site.xml containing a setting of hive.metastore.warehouse.dir.
val hiveSiteDir = Utils.createTempDir()
val file = new File(hiveSiteDir.getCanonicalPath, "hive-site.xml")
val bw = new BufferedWriter(new FileWriter(file))
bw.write(hiveSiteXmlContent)
bw.close()
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SetMetastoreURLTest.getClass.getName.stripSuffix("$"),
"--name", "SetMetastoreURLTest",
"--master", "local[1]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--conf", s"spark.sql.test.expectedMetastoreURL=$metastoreURL",
"--conf", s"spark.driver.extraClassPath=${hiveSiteDir.getCanonicalPath}",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-18360: default table path of tables in default database should depend on the " +
"location of default database") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val args = Seq(
"--class", SPARK_18360.getClass.getName.stripSuffix("$"),
"--name", "SPARK-18360",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--driver-java-options", "-Dderby.system.durability=test",
unusedJar.toString)
runSparkSubmit(args)
}
test("SPARK-18989: DESC TABLE should not fail with format class not found") {
val unusedJar = TestUtils.createJarWithClasses(Seq.empty)
val argsForCreateTable = Seq(
"--class", SPARK_18989_CREATE_TABLE.getClass.getName.stripSuffix("$"),
"--name", "SPARK-18947",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
"--jars", HiveTestJars.getHiveContribJar().getCanonicalPath,
unusedJar.toString)
runSparkSubmit(argsForCreateTable)
val argsForShowTables = Seq(
"--class", SPARK_18989_DESC_TABLE.getClass.getName.stripSuffix("$"),
"--name", "SPARK-18947",
"--master", "local-cluster[2,1,1024]",
"--conf", "spark.ui.enabled=false",
"--conf", "spark.master.rest.enabled=false",
unusedJar.toString)
runSparkSubmit(argsForShowTables)
}
}
object SetMetastoreURLTest extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val sparkConf = new SparkConf(loadDefaults = true)
val builder = SparkSession.builder()
.config(sparkConf)
.config(UI_ENABLED.key, "false")
.config(HiveUtils.HIVE_METASTORE_VERSION.key, "0.13.1")
// The issue described in SPARK-16901 only appear when
// spark.sql.hive.metastore.jars is not set to builtin.
.config(HiveUtils.HIVE_METASTORE_JARS.key, "maven")
.enableHiveSupport()
val spark = builder.getOrCreate()
val expectedMetastoreURL =
spark.conf.get("spark.sql.test.expectedMetastoreURL")
logInfo(s"spark.sql.test.expectedMetastoreURL is $expectedMetastoreURL")
if (expectedMetastoreURL == null) {
throw new Exception(
s"spark.sql.test.expectedMetastoreURL should be set.")
}
// HiveExternalCatalog is used when Hive support is enabled.
val actualMetastoreURL =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
.getConf("javax.jdo.option.ConnectionURL", "this_is_a_wrong_URL")
logInfo(s"javax.jdo.option.ConnectionURL is $actualMetastoreURL")
if (actualMetastoreURL != expectedMetastoreURL) {
throw new Exception(
s"Expected value of javax.jdo.option.ConnectionURL is $expectedMetastoreURL. But, " +
s"the actual value is $actualMetastoreURL")
}
}
}
object SetWarehouseLocationTest extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val sparkConf = new SparkConf(loadDefaults = true).set(UI_ENABLED, false)
val providedExpectedWarehouseLocation =
sparkConf.getOption("spark.sql.test.expectedWarehouseDir")
val (sparkSession, expectedWarehouseLocation) = providedExpectedWarehouseLocation match {
case Some(warehouseDir) =>
// If spark.sql.test.expectedWarehouseDir is set, the warehouse dir is set
// through spark-summit. So, neither spark.sql.warehouse.dir nor
// hive.metastore.warehouse.dir is set at here.
(new TestHiveContext(new SparkContext(sparkConf)).sparkSession, warehouseDir)
case None =>
val warehouseLocation = Utils.createTempDir()
warehouseLocation.delete()
val hiveWarehouseLocation = Utils.createTempDir()
hiveWarehouseLocation.delete()
// If spark.sql.test.expectedWarehouseDir is not set, we will set
// spark.sql.warehouse.dir and hive.metastore.warehouse.dir.
// We are expecting that the value of spark.sql.warehouse.dir will override the
// value of hive.metastore.warehouse.dir.
val session = new TestHiveContext(new SparkContext(sparkConf
.set(WAREHOUSE_PATH.key, warehouseLocation.toString)
.set("hive.metastore.warehouse.dir", hiveWarehouseLocation.toString)))
.sparkSession
(session, warehouseLocation.toString)
}
if (sparkSession.conf.get(WAREHOUSE_PATH.key) != expectedWarehouseLocation) {
throw new Exception(
s"${WAREHOUSE_PATH.key} is not set to the expected warehouse location " +
s"$expectedWarehouseLocation.")
}
val catalog = sparkSession.sessionState.catalog
sparkSession.sql("drop table if exists testLocation")
sparkSession.sql("drop database if exists testLocationDB cascade")
{
sparkSession.sql("create table testLocation (a int)")
val tableMetadata =
catalog.getTableMetadata(TableIdentifier("testLocation", Some("default")))
val expectedLocation =
CatalogUtils.stringToURI(s"file:${expectedWarehouseLocation.toString}/testlocation")
val actualLocation = tableMetadata.location
if (actualLocation != expectedLocation) {
throw new Exception(
s"Expected table location is $expectedLocation. But, it is actually $actualLocation")
}
sparkSession.sql("drop table testLocation")
}
{
sparkSession.sql("create database testLocationDB")
sparkSession.sql("use testLocationDB")
sparkSession.sql("create table testLocation (a int)")
val tableMetadata =
catalog.getTableMetadata(TableIdentifier("testLocation", Some("testLocationDB")))
val expectedLocation = CatalogUtils.stringToURI(
s"file:${expectedWarehouseLocation.toString}/testlocationdb.db/testlocation")
val actualLocation = tableMetadata.location
if (actualLocation != expectedLocation) {
throw new Exception(
s"Expected table location is $expectedLocation. But, it is actually $actualLocation")
}
sparkSession.sql("drop table testLocation")
sparkSession.sql("use default")
sparkSession.sql("drop database testLocationDB")
}
}
}
// This application is used to test defining a new Hive UDF (with an associated jar)
// and use this UDF. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object TemporaryHiveUDFTest extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set(UI_ENABLED, false)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Registering a temporary Hive UDF provided in a jar.")
val jar = HiveTestJars.getHiveContribJar().getCanonicalPath
hiveContext.sql(
s"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
|USING JAR '$jar'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP temporary FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This application is used to test defining a new Hive UDF (with an associated jar)
// and use this UDF. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object PermanentHiveUDFTest1 extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set(UI_ENABLED, false)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Registering a permanent Hive UDF provided in a jar.")
val jar = HiveTestJars.getHiveContribJar().getCanonicalPath
hiveContext.sql(
s"""
|CREATE FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
|USING JAR '$jar'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This application is used to test that a pre-defined permanent function with a jar
// resources can be used. We need to run this test in separate JVM to make sure we
// can load the jar defined with the function.
object PermanentHiveUDFTest2 extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
conf.set(UI_ENABLED, false)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Load a Hive UDF from the jar.
logInfo("Write the metadata of a permanent Hive UDF into metastore.")
val jar = HiveTestJars.getHiveContribJar().getCanonicalPath
val function = CatalogFunction(
FunctionIdentifier("example_max"),
"org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax",
FunctionResource(JarResource, jar) :: Nil)
hiveContext.sessionState.catalog.createFunction(function, ignoreIfExists = false)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Actually use the loaded UDF.
logInfo("Using the UDF.")
val result = hiveContext.sql(
"SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = result.orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"Result table should have 10 rows instead of $count rows")
}
hiveContext.sql("DROP FUNCTION example_max")
logInfo("Test finishes.")
sc.stop()
}
}
// This object is used for testing SPARK-8368: https://issues.apache.org/jira/browse/SPARK-8368.
// We test if we can load user jars in both driver and executors when HiveContext is used.
object SparkSubmitClassLoaderTest extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val conf = new SparkConf()
val hiveWarehouseLocation = Utils.createTempDir()
conf.set(UI_ENABLED, false)
conf.set(WAREHOUSE_PATH.key, hiveWarehouseLocation.toString)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
val df = hiveContext.createDataFrame((1 to 100).map(i => (i, i))).toDF("i", "j")
logInfo("Testing load classes at the driver side.")
// First, we load classes at driver side.
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
throw new Exception("Could not load user class from jar:\\n", t)
}
// Second, we load classes at the executor side.
logInfo("Testing load classes at the executor side.")
val result = df.rdd.mapPartitions { x =>
var exception: String = null
try {
Utils.classForName(args(0))
Utils.classForName(args(1))
} catch {
case t: Throwable =>
exception = t + "\\n" + Utils.exceptionString(t)
exception = exception.replaceAll("\\n", "\\n\\t")
}
Option(exception).toSeq.iterator
}.collect()
if (result.nonEmpty) {
throw new Exception("Could not load user class from jar:\\n" + result(0))
}
// Load a Hive UDF from the jar.
logInfo("Registering temporary Hive UDF provided in a jar.")
hiveContext.sql(
"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
""".stripMargin)
val source =
hiveContext.createDataFrame((1 to 10).map(i => (i, s"str$i"))).toDF("key", "val")
source.createOrReplaceTempView("sourceTable")
// Load a Hive SerDe from the jar.
logInfo("Creating a Hive table with a SerDe provided in a jar.")
hiveContext.sql(
"""
|CREATE TABLE t1(key int, val string)
|ROW FORMAT SERDE 'org.apache.hive.hcatalog.data.JsonSerDe'
""".stripMargin)
// Actually use the loaded UDF and SerDe.
logInfo("Writing data into the table.")
hiveContext.sql(
"INSERT INTO TABLE t1 SELECT example_max(key) as key, val FROM sourceTable GROUP BY val")
logInfo("Running a simple query on the table.")
val count = hiveContext.table("t1").orderBy("key", "val").count()
if (count != 10) {
throw new Exception(s"table t1 should have 10 rows instead of $count rows")
}
logInfo("Test finishes.")
sc.stop()
}
}
// This object is used for testing SPARK-8020: https://issues.apache.org/jira/browse/SPARK-8020.
// We test if we can correctly set spark sql configurations when HiveContext is used.
object SparkSQLConfTest extends Logging {
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
// We override the SparkConf to add spark.sql.hive.metastore.version and
// spark.sql.hive.metastore.jars to the beginning of the conf entry array.
// So, if metadataHive get initialized after we set spark.sql.hive.metastore.version but
// before spark.sql.hive.metastore.jars get set, we will see the following exception:
// Exception in thread "main" java.lang.IllegalArgumentException: Builtin jars can only
// be used when hive execution version == hive metastore version.
// Execution: 0.13.1 != Metastore: 0.12. Specify a valid path to the correct hive jars
// using $HIVE_METASTORE_JARS or change spark.sql.hive.metastore.version to 0.13.1.
val conf = new SparkConf() {
override def getAll: Array[(String, String)] = {
def isMetastoreSetting(conf: String): Boolean = {
conf == HiveUtils.HIVE_METASTORE_VERSION.key || conf == HiveUtils.HIVE_METASTORE_JARS.key
}
// If there is any metastore settings, remove them.
val filteredSettings = super.getAll.filterNot(e => isMetastoreSetting(e._1))
// Always add these two metastore settings at the beginning.
(HiveUtils.HIVE_METASTORE_VERSION.key -> "0.12") +:
(HiveUtils.HIVE_METASTORE_JARS.key -> "maven") +:
filteredSettings
}
// For this simple test, we do not really clone this object.
override def clone: SparkConf = this
}
conf.set(UI_ENABLED, false)
val sc = new SparkContext(conf)
val hiveContext = new TestHiveContext(sc)
// Run a simple command to make sure all lazy vals in hiveContext get instantiated.
hiveContext.tables().collect()
sc.stop()
}
}
object SPARK_9757 extends QueryTest {
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val hiveWarehouseLocation = Utils.createTempDir()
val sparkContext = new SparkContext(
new SparkConf()
.set(HiveUtils.HIVE_METASTORE_VERSION.key, "0.13.1")
.set(HiveUtils.HIVE_METASTORE_JARS.key, "maven")
.set(UI_ENABLED, false)
.set(WAREHOUSE_PATH.key, hiveWarehouseLocation.toString))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
import hiveContext.implicits._
val dir = Utils.createTempDir()
dir.delete()
try {
{
val df =
hiveContext
.range(10)
.select(($"id" + 0.1) cast DecimalType(10, 3) as "dec")
df.write.option("path", dir.getCanonicalPath).mode("overwrite").saveAsTable("t")
checkAnswer(hiveContext.table("t"), df)
}
{
val df =
hiveContext
.range(10)
.select(callUDF("struct", ($"id" + 0.2) cast DecimalType(10, 3)) as "dec_struct")
df.write.option("path", dir.getCanonicalPath).mode("overwrite").saveAsTable("t")
checkAnswer(hiveContext.table("t"), df)
}
} finally {
dir.delete()
hiveContext.sql("DROP TABLE t")
sparkContext.stop()
}
}
}
object SPARK_11009 extends QueryTest {
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val sparkContext = new SparkContext(
new SparkConf()
.set(UI_ENABLED, false)
.set(SHUFFLE_PARTITIONS.key, "100"))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
try {
val df = spark.range(1 << 20)
val df2 = df.select((df("id") % 1000).alias("A"), (df("id") / 1000).alias("B"))
val ws = Window.partitionBy(df2("A")).orderBy(df2("B"))
val df3 = df2.select(df2("A"), df2("B"), row_number().over(ws).alias("rn")).filter("rn < 0")
if (df3.rdd.count() != 0) {
throw new Exception("df3 should have 0 output row.")
}
} finally {
sparkContext.stop()
}
}
}
object SPARK_14244 extends QueryTest {
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
protected var spark: SparkSession = _
def main(args: Array[String]): Unit = {
TestUtils.configTestLog4j("INFO")
val sparkContext = new SparkContext(
new SparkConf()
.set(UI_ENABLED, false)
.set(SHUFFLE_PARTITIONS.key, "100"))
val hiveContext = new TestHiveContext(sparkContext)
spark = hiveContext.sparkSession
import hiveContext.implicits._
try {
val window = Window.orderBy("id")
val df = spark.range(2).select(cume_dist().over(window).as("cdist")).orderBy("cdist")
checkAnswer(df, Seq(Row(0.5D), Row(1.0D)))
} finally {
sparkContext.stop()
}
}
}
object SPARK_18360 {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder()
.config(UI_ENABLED.key, "false")
.enableHiveSupport().getOrCreate()
val defaultDbLocation = spark.catalog.getDatabase("default").locationUri
assert(new Path(defaultDbLocation) == new Path(spark.conf.get(WAREHOUSE_PATH)))
val hiveClient =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
try {
val tableMeta = CatalogTable(
identifier = TableIdentifier("test_tbl", Some("default")),
tableType = CatalogTableType.MANAGED,
storage = CatalogStorageFormat.empty,
schema = new StructType().add("i", "int"),
provider = Some(DDLUtils.HIVE_PROVIDER))
val newWarehousePath = Utils.createTempDir().getAbsolutePath
hiveClient.runSqlHive(s"SET hive.metastore.warehouse.dir=$newWarehousePath")
hiveClient.createTable(tableMeta, ignoreIfExists = false)
val rawTable = hiveClient.getTable("default", "test_tbl")
// Hive will use the value of `hive.metastore.warehouse.dir` to generate default table
// location for tables in default database.
assert(rawTable.storage.locationUri.map(
CatalogUtils.URIToString).get.contains(newWarehousePath))
hiveClient.dropTable("default", "test_tbl", ignoreIfNotExists = false, purge = false)
spark.sharedState.externalCatalog.createTable(tableMeta, ignoreIfExists = false)
val readBack = spark.sharedState.externalCatalog.getTable("default", "test_tbl")
// Spark SQL will use the location of default database to generate default table
// location for tables in default database.
assert(readBack.storage.locationUri.map(CatalogUtils.URIToString)
.get.contains(defaultDbLocation))
} finally {
hiveClient.dropTable("default", "test_tbl", ignoreIfNotExists = true, purge = false)
hiveClient.runSqlHive(s"SET hive.metastore.warehouse.dir=$defaultDbLocation")
}
}
}
object SPARK_18989_CREATE_TABLE {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().enableHiveSupport().getOrCreate()
spark.sql(
"""
|CREATE TABLE IF NOT EXISTS base64_tbl(val string) STORED AS
|INPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextInputFormat'
|OUTPUTFORMAT 'org.apache.hadoop.hive.contrib.fileformat.base64.Base64TextOutputFormat'
""".stripMargin)
}
}
object SPARK_18989_DESC_TABLE {
def main(args: Array[String]): Unit = {
val spark = SparkSession.builder().enableHiveSupport().getOrCreate()
try {
spark.sql("DESC base64_tbl")
} finally {
spark.sql("DROP TABLE IF EXISTS base64_tbl")
}
}
}
| spark-test/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/HiveSparkSubmitSuite.scala | Scala | apache-2.0 | 34,451 |
package is.hail.expr.ir.functions
import is.hail.expr.NatVariable
import is.hail.expr.ir._
import is.hail.types.coerce
import is.hail.types.virtual._
object NDArrayFunctions extends RegistryFunctions {
override def registerAll() {
for ((stringOp, argType, retType, irOp) <- ArrayFunctions.arrayOps) {
val nDimVar = NatVariable()
registerIR2(stringOp, TNDArray(argType, nDimVar), argType, TNDArray(retType, nDimVar)) { (_, a, c) =>
val i = genUID()
NDArrayMap(a, i, irOp(Ref(i, c.typ), c))
}
registerIR2(stringOp, argType, TNDArray(argType, nDimVar), TNDArray(retType, nDimVar)) { (_, c, a) =>
val i = genUID()
NDArrayMap(a, i, irOp(c, Ref(i, c.typ)))
}
registerIR2(stringOp, TNDArray(argType, nDimVar), TNDArray(argType, nDimVar), TNDArray(retType, nDimVar)) { (_, l, r) =>
val lid = genUID()
val rid = genUID()
val lElemRef = Ref(lid, coerce[TNDArray](l.typ).elementType)
val rElemRef = Ref(rid, coerce[TNDArray](r.typ).elementType)
NDArrayMap2(l, r, lid, rid, irOp(lElemRef, rElemRef))
}
}
}
}
| danking/hail | hail/src/main/scala/is/hail/expr/ir/functions/NDArrayFunctions.scala | Scala | mit | 1,127 |
package smtlib
package drivers
import parser.Terms._
import parser.Commands._
import parser.CommandsResponses._
import printer.RecursivePrinter
/** Provide standard complient behaviour for a sequence of commands.
*
* This driver will properly understand the whole SMT-LIB 2.5 language,
* maintaining proper state and forwarding work to an underlying solver.
* The behaviour of this driver is to perfectly follow the standard, and
* properly abstract the peculiarities of the black box solvers so that
* they can be used with SMT-LIB 2.5 language.
*
* One difficult in implementing the SMT-LIB standard is that the standard
* is designed for process interaction, relying on both a standard output and
* standard error channel. Standard output and error are part of the interface
* of the driver, and by default will be available on the driver. If one sets
* the :diagnostic-output-channel or :regular-output-channel options, that
* will redirect this behaviour and send the outputs to files.
* Basically: stdout and stderr are the iterator attached to this driver.
*
* TODO: this is work in progress, not usable yet.
*/
class SemanticsDriver(
rawSolver: Interpreter,
onRegularOutput: (CommandResponse) => Unit,
onDiagnosticOutput: (String) => Unit
) {
import SemanticsDriver._
private var regularOutputChannel: (CommandResponse) => Unit = onRegularOutput
private var diagnosticOutputChannel: (String) => Unit = onDiagnosticOutput
private def fileRegularOutputChannel(name: String): (CommandResponse) => Unit = {
val f = new java.io.FileWriter(name)
(res: CommandResponse) => {
f.write(RecursivePrinter.toString(res))
f.write("\\n")
}
}
private def fileDiagnosticOutputChannel(name: String): (String) => Unit = {
val f = new java.io.FileWriter(name)
(info: String) => {
f.write(info)
f.write("\\n")
}
}
private var executionMode: ExecutionMode = StartMode
private var logic: Option[Logic] = None
private var printSuccess = true
private var globalDeclarations = false
private var produceModels = false
private def doPrintSuccess(): Unit = {
if(printSuccess)
onRegularOutput(Success)
}
protected def processSetOption(option: SMTOption): Unit = option match {
case DiagnosticOutputChannel(value) =>
//TODO: can we set stdout for diagnostic output channel and redirect it to regular ?
if(value == "stderr")
diagnosticOutputChannel = onDiagnosticOutput
else
diagnosticOutputChannel = fileDiagnosticOutputChannel(value)
doPrintSuccess()
case GlobalDeclarations(value) =>
if(executionMode != StartMode) {
regularOutputChannel(Error("global-declaration can only be set in Start mode"))
} else {
globalDeclarations = value
rawSolver.eval(SetOption(option))
doPrintSuccess()
}
case InteractiveMode(value) =>
regularOutputChannel(Unsupported)
case PrintSuccess(value) =>
printSuccess = value
doPrintSuccess()
case ProduceAssertions(value) =>
regularOutputChannel(Unsupported)
case ProduceAssignments(value) =>
regularOutputChannel(Unsupported)
case ProduceModels(value) =>
if(executionMode != StartMode) {
regularOutputChannel(Error("produce-models can only be set in Start mode"))
} else {
produceModels = value
rawSolver.eval(SetOption(option))
doPrintSuccess()
}
case ProduceProofs(value) =>
regularOutputChannel(Unsupported)
case ProduceUnsatAssumptions(value) =>
regularOutputChannel(Unsupported)
case ProduceUnsatCores(value) =>
regularOutputChannel(Unsupported)
case RandomSeed(value) =>
regularOutputChannel(Unsupported)
case RegularOutputChannel(value) =>
if(value == "stdout")
regularOutputChannel = onRegularOutput
else
regularOutputChannel = fileRegularOutputChannel(value)
doPrintSuccess()
case ReproducibleResourceLimit(value) =>
regularOutputChannel(Unsupported)
case Verbosity(value) =>
regularOutputChannel(Unsupported)
case AttributeOption(attribute) =>
regularOutputChannel(Unsupported)
}
protected def processGetInfo(infoFlag: InfoFlag): Unit = infoFlag match {
case AllStatisticsInfoFlag =>
regularOutputChannel(Unsupported)
case AssertionStackLevelsInfoFlag =>
regularOutputChannel(Unsupported)
case AuthorsInfoFlag =>
regularOutputChannel(Unsupported)
case ErrorBehaviorInfoFlag =>
regularOutputChannel(Unsupported)
case NameInfoFlag =>
regularOutputChannel(Unsupported)
case ReasonUnknownInfoFlag =>
regularOutputChannel(Unsupported)
case VersionInfoFlag =>
regularOutputChannel(Unsupported)
case KeywordInfoFlag(keyword) =>
regularOutputChannel(Unsupported)
}
private class AssertionLevel {
var assertions: Set[Term] = Set()
private var sortSymbols: Map[SSymbol, Int] = Map()
private var sortAliases: Map[SSymbol, (Seq[SSymbol], Sort)] = Map()
def isSortDefined(name: SSymbol): Boolean =
sortSymbols.contains(name) || sortAliases.contains(name)
def newSortSymbol(name: SSymbol, arity: Int): Unit = {
require(!sortSymbols.contains(name))
sortSymbols += (name -> arity)
}
def newSortAlias(name: SSymbol, params: Seq[SSymbol], body: Sort): Unit = {
require(!sortAliases.contains(name))
sortAliases += (name -> ((params, body)))
}
var declareFuns: Set[DeclareFun] = Set()
var declareConsts: Set[DeclareConst] = Set()
}
private var assertionStack: List[AssertionLevel] = List(new AssertionLevel)
private def firstAssertionLevel: AssertionLevel = assertionStack.last
private def currentAssertionLevel: AssertionLevel = assertionStack.head
private def processPop(n: Int): Unit = {
if(executionMode == StartMode)
regularOutputChannel(Error("You cannot use pop in Start mode"))
else if(assertionStack.size - n <= 0)
regularOutputChannel(Error("You cannot pop more elements than was pushed"))
else {
assertionStack = assertionStack.drop(n)
executionMode = AssertMode
rawSolver.eval(Pop(n))
doPrintSuccess()
}
}
private def processPush(n: Int): Unit = {
if(executionMode == StartMode)
regularOutputChannel(Error("You cannot use push in Start mode"))
else {
for(i <- 1 to n)
assertionStack ::= new AssertionLevel
executionMode = AssertMode
rawSolver.eval(Push(n))
doPrintSuccess()
}
}
/* check that the sort is well defined: correct arity, symbol in context.
* TODO: how to check for built-in sorts?
*/
def checkSort(sort: Sort, params: Seq[SSymbol]): Unit = {
}
def eval(command: Command): Unit = {
if(executionMode == ExitMode) {
regularOutputChannel(Error("The solver has exited."))
} else {
command match {
case DeclareSort(name, arity) => {
//TODO: global definitions
if(assertionStack.exists(al => al.isSortDefined(name))) {
regularOutputChannel(Error("Sort " + name + " already defined"))
} else {
currentAssertionLevel.newSortSymbol(name, arity)
executionMode = AssertMode
rawSolver.eval(command)
doPrintSuccess()
}
}
case DefineSort(name, params, body) => {
//TODO: global definitions
//TODO: check well defined sort
if(assertionStack.exists(al => al.isSortDefined(name))) {
regularOutputChannel(Error("Sort " + name + " already defined"))
} else {
currentAssertionLevel.newSortAlias(name, params, body)
executionMode = AssertMode
rawSolver.eval(command)
doPrintSuccess()
}
}
case GetInfo(infoFlag) => {
processGetInfo(infoFlag)
}
case Exit() => {
executionMode = ExitMode
rawSolver.eval(command)
}
case Pop(n) => {
processPop(n)
}
case Push(n) => {
processPush(n)
}
case Reset() => {
regularOutputChannel = onRegularOutput
diagnosticOutputChannel = onDiagnosticOutput
printSuccess = true
logic = None
globalDeclarations = false
produceModels = false
//TODO: if supported, else just emulate by creating a fresh instance
rawSolver.eval(command)
}
case ResetAssertions() => {
//TODO: what exactly to do with global declarations and declarations at the top level
assertionStack = List(new AssertionLevel)
rawSolver.eval(command)
doPrintSuccess()
}
case SetLogic(log) => {
if(executionMode != StartMode) {
regularOutputChannel(Error("set-logic is only allowed while in Start mode"))
} else {
rawSolver.eval(command)
logic = Some(log)
executionMode = AssertMode
}
}
case SetOption(option) => {
processSetOption(option)
}
case _ => ???
}
}
}
//val regularOutput: Iterator[CommandResponse]
//val diagnosticOutput: Iterator[String]
}
object SemanticsDriver {
trait ExecutionMode
case object StartMode extends ExecutionMode
case object AssertMode extends ExecutionMode
case object SatMode extends ExecutionMode
case object UnsatMode extends ExecutionMode
case object ExitMode extends ExecutionMode
}
| manoskouk/scala-smtlib | src/main/scala/smtlib/drivers/SemanticsDriver.scala | Scala | mit | 9,676 |
package io.ddf.spark.content
import io.ddf.spark.ATestSuite
/**
* Created by huandao on 7/20/15.
*/
class CopyDDFSuite extends ATestSuite {
createTableMtcars()
createTableAirline()
test("copy ddf") {
val ddf1 = manager.sql2ddf("select * from mtcars", "SparkSQL")
Array("cyl", "hp", "vs", "am", "gear", "carb").foreach {
col => ddf1.getSchemaHandler.setAsFactor(col)
}
val ddf2 = ddf1.copy()
Array("cyl", "hp", "vs", "am", "gear", "carb").foreach {
col => assert(ddf2.getSchemaHandler.getColumn(col).getOptionalFactor != null)
}
assert(ddf1.getNumRows == ddf2.getNumRows)
assert(ddf1.getNumColumns == ddf2.getNumColumns)
}
}
| ubolonton/DDF | spark/src/test/scala/io/ddf/spark/content/CopyDDFSuite.scala | Scala | apache-2.0 | 680 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples
import scala.collection.JavaConverters._
import org.apache.spark.util.Utils
/**
* Prints out environmental information, sleeps, and then exits. Made to
* test driver submission in the standalone scheduler.
*/
object DriverSubmissionTest {
def main(args: Array[String]) {
if (args.length < 1) {
println("Usage: DriverSubmissionTest <seconds-to-sleep>")
System.exit(0)
}
val numSecondsToSleep = args(0).toInt
val env = System.getenv()
val properties = Utils.getSystemProperties
println("Environment variables containing SPARK_TEST:")
env.asScala.filter { case (k, _) => k.contains("SPARK_TEST")}.foreach(println)
println("System properties containing spark.test:")
properties.filter { case (k, _) => k.toString.contains("spark.test") }.foreach(println)
for (i <- 1 until numSecondsToSleep) {
println(s"Alive for $i out of $numSecondsToSleep seconds")
Thread.sleep(1000)
}
}
}
// scalastyle:on println
| mrchristine/spark-examples-dbc | src/main/scala/org/apache/spark/examples/DriverSubmissionTest.scala | Scala | apache-2.0 | 1,843 |
package com.github.jparkie.spark.elasticsearch.sql
import com.holdenkarau.spark.testing.SharedSparkContext
import org.apache.spark.sql.SQLContext
import org.scalatest.{ MustMatchers, WordSpec }
class PackageSpec extends WordSpec with MustMatchers with SharedSparkContext {
"Package com.github.jparkie.spark.elasticsearch.sql" must {
"lift DataFrame into SparkEsDataFrameFunctions" in {
val sqlContext = new SQLContext(sc)
val inputData = Seq(
("TEST_VALUE_1", 1),
("TEST_VALUE_2", 2),
("TEST_VALUE_3", 3)
)
val outputDataFrame = sqlContext.createDataFrame(inputData)
.toDF("key", "value")
// If sparkContext is available, DataFrame was lifted into SparkEsDataFrameFunctions.
outputDataFrame.sparkContext
}
}
}
| jparkie/Spark2Elasticsearch | src/test/scala/com/github/jparkie/spark/elasticsearch/sql/PackageSpec.scala | Scala | apache-2.0 | 796 |
import sbt._
import Keys._
object BuildSettings {
val paradiseVersion = "2.0.0-M3"
val buildSettings = Defaults.defaultSettings ++ Seq(
organization := "mixfix",
version := "0.1.0",
//scalacOptions ++= Seq("-Ymacro-debug-lite"),
scalaVersion := "2.10.3",
resolvers += Resolver.sonatypeRepo("snapshots"),
resolvers += Resolver.sonatypeRepo("releases"),
addCompilerPlugin("org.scalamacros" % "paradise" % paradiseVersion cross CrossVersion.full)
)
}
object MyBuild extends Build {
import BuildSettings._
lazy val root: Project = Project(
"root",
file("."),
settings = buildSettings ++ Seq(
run <<= run in Compile in test
)
) aggregate(mixfix, test)
lazy val mixfix: Project = Project(
"mixfix",
file("mixfix"),
settings = buildSettings ++ Seq(
libraryDependencies <+= (scalaVersion)("org.scala-lang" % "scala-reflect" % _),
libraryDependencies ++= (
if (scalaVersion.value.startsWith("2.10")) List("org.scalamacros" % "quasiquotes" % paradiseVersion cross CrossVersion.full)
else Nil
)
)
)
lazy val test: Project = Project(
"test",
file("test"),
settings = buildSettings ++ Seq(
libraryDependencies ++= Seq(
"org.scalatest" %% "scalatest" % "2.0" % "test",
"com.chuusai" % "shapeless" % "2.0.0-M1" % "test" cross CrossVersion.full
)
)
) dependsOn(mixfix)
}
| hunam/scala-mixfix | project/Build.scala | Scala | apache-2.0 | 1,424 |
package ch.descabato.core.util
import java.io.File
import java.nio.file.Files
import java.text.SimpleDateFormat
import java.util.Date
import java.util.stream.Collectors
import ch.descabato.core.config.BackupFolderConfiguration
import scala.collection.JavaConverters._
import scala.util.Try
object Constants {
val tempPrefix = "temp."
val filesEntry = "files.txt"
val indexSuffix = ".index"
def objectEntry(num: Option[Int] = None): String = {
val add = num.map(x => "_" + x).getOrElse("")
s"content$add.obj"
}
}
trait FileType {
def getFiles(): Seq[File]
def matches(file: File): Boolean
def isMetadata(): Boolean = true
def nextFile(): File
}
trait NumberedFileType extends FileType {
def numberOfFile(file: File): Int
def fileForNumber(number: Int): File
}
trait DatedFileType extends FileType {
def dateOfFile(file: File): Date
def getDates(): Seq[Date] = {
getFiles().map(dateOfFile).sorted
}
}
class FileManager(config: BackupFolderConfiguration) {
val volume = new StandardNumberedFileType("volume", "json.gz", config)
val volumeIndex = new StandardNumberedFileType("volumeIndex", "json.gz", config)
val metadata = new StandardNumberedFileType("metadata", "json.gz", config)
val backup = new StandardDatedFileType("backup", "json.gz", config)
private val filetypes = Seq(volume, volumeIndex, metadata, backup)
def allFiles(): Seq[File] = {
filetypes.flatMap(_.getFiles())
}
def fileTypeForFile(file: File): Option[FileType] = {
filetypes.find(_.matches(file))
}
}
/**
* Pattern here is:
* $name/$name_$number for all < 1000
* $name/$name_$range/$name_$number for all >= 1000
*/
class StandardNumberedFileType(name: String, suffix: String, config: BackupFolderConfiguration) extends NumberedFileType {
val mainFolder = new File(config.folder, name)
val regex = s"${name}_[0-9]+"
val regexWithSuffix = s"${regex}\\\\.${suffix}"
val filesPerFolder = 1000
override def numberOfFile(file: File): Int = {
require(matches(file))
file.getName.drop(name.length + 1).takeWhile(_.isDigit).toInt
}
override def fileForNumber(number: Int): File = {
val subfolderNumber = number / filesPerFolder
val nameNumber = f"${number}%06d"
val firstFolder = if (subfolderNumber > 0) s"${name}_$subfolderNumber/" else ""
new File(mainFolder, s"$firstFolder${name}_${nameNumber}.${suffix}")
}
override def getFiles(): Seq[File] = {
if (mainFolder.exists()) {
val files = Files.walk(mainFolder.toPath).collect(Collectors.toList())
files.asScala.map(_.toFile).filter(matches)
} else {
Seq.empty
}
}
override def matches(file: File): Boolean = {
val nameMatches = file.getName.matches(regexWithSuffix)
val scheme1 = {
val parentMatches = file.getParentFile.getName.matches(regex)
val parentsParentMatches = file.getParentFile.getParentFile.getName == name
nameMatches && parentMatches && parentsParentMatches
}
val scheme2 = {
val parentMatches = file.getParentFile.getName == name
nameMatches && parentMatches
}
scheme1 || scheme2
}
override def nextFile(): File = {
if (mainFolder.exists()) {
val existing = getFiles().map(numberOfFile)
if (existing.nonEmpty) {
fileForNumber(existing.max + 1)
} else {
fileForNumber(0)
}
} else {
fileForNumber(0)
}
}
}
/**
* Pattern here is
* $name_$date.$suffix
*/
class StandardDatedFileType(name: String, suffix: String, config: BackupFolderConfiguration) extends DatedFileType {
private val dateFormat = "yyyy-MM-dd.HHmmss"
private val dateFormatter = new SimpleDateFormat(dateFormat)
def newestFile(): Option[File] = {
getFiles().sortBy(_.getName).lastOption
}
def forDate(d: Date): File = {
val files = getFiles().filter(x => dateOfFile(x) == d)
require(files.size == 1)
files.head
}
override def dateOfFile(file: File): Date = {
require(matches(file))
val date = file.getName.drop(name.length + 1).take(dateFormat.length)
dateFormatter.parse(date)
}
override def getFiles(): Seq[File] = {
config.folder.listFiles().filter(_.isFile).filter(matches)
}
override def matches(file: File): Boolean = {
val fileName = file.getName
if (fileName.startsWith(name + "_") && fileName.endsWith("." + suffix)) {
val dateString = fileName.drop(name.length + 1).dropRight(suffix.length + 1)
Try(dateFormatter.parse(dateString)).isSuccess
} else {
false
}
}
override def nextFile(): File = {
val date = new Date()
new File(config.folder, s"${name}_${dateFormatter.format(date)}.$suffix")
}
}
| Stivo/DeScaBaTo | core/src/main/scala/ch/descabato/core/util/FileManager.scala | Scala | gpl-3.0 | 4,703 |