code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import Relation._
object Relation
{
/** Constructs a new immutable, finite relation that is initially empty. */
def empty[A,B]: Relation[A,B] = make(Map.empty, Map.empty)
def make[A,B](forward: Map[A,Set[B]], reverse: Map[B, Set[A]]): Relation[A,B] = new MRelation(forward, reverse)
def reconstruct[A,B](forward: Map[A, Set[B]]): Relation[A,B] =
{
val reversePairs = for( (a,bs) <- forward.view; b <- bs.view) yield (b, a)
val reverse = (Map.empty[B,Set[A]] /: reversePairs) { case (m, (b, a)) => add(m, b, a :: Nil) }
make(forward, reverse)
}
private[sbt] def remove[X,Y](map: M[X,Y], from: X, to: Y): M[X,Y] =
map.get(from) match {
case Some(tos) =>
val newSet = tos - to
if(newSet.isEmpty) map - from else map.updated(from, newSet)
case None => map
}
private[sbt] def combine[X,Y](a: M[X,Y], b: M[X,Y]): M[X,Y] =
(a /: b) { (map, mapping) => add(map, mapping._1, mapping._2) }
private[sbt] def add[X,Y](map: M[X,Y], from: X, to: Traversable[Y]): M[X,Y] =
map.updated(from, get(map, from) ++ to)
private[sbt] def get[X,Y](map: M[X,Y], t: X): Set[Y] = map.getOrElse(t, Set.empty[Y])
private[sbt] type M[X,Y] = Map[X, Set[Y]]
}
/** Binary relation between A and B. It is a set of pairs (_1, _2) for _1 in A, _2 in B. */
trait Relation[A,B]
{
/** Returns the set of all _2s such that (_1, _2) is in this relation. */
def forward(_1: A): Set[B]
/** Returns the set of all _1s such that (_1, _2) is in this relation. */
def reverse(_2: B): Set[A]
/** Includes the relation given by `pair`. */
def +(pair: (A, B)): Relation[A,B]
/** Includes the relation (a, b). */
def +(a: A, b: B): Relation[A,B]
/** Includes the relations (a, b) for all b in bs. */
def +(a: A, bs: Traversable[B]): Relation[A,B]
/** Returns the union of the relation r with this relation. */
def ++(r: Relation[A,B]): Relation[A,B]
/** Includes the given relations. */
def ++(rs: Traversable[(A,B)]): Relation[A,B]
/** Removes all relations (_1, _2) for all _1 in _1s. */
def --(_1s: Traversable[A]): Relation[A,B]
/** Removes all `pairs` from this relation. */
def --(pairs: TraversableOnce[(A,B)]): Relation[A,B]
/** Removes all pairs (_1, _2) from this relation. */
def -(_1: A): Relation[A,B]
/** Removes `pair` from this relation. */
def -(pair: (A,B)): Relation[A,B]
/** Returns the set of all _1s such that (_1, _2) is in this relation. */
def _1s: collection.Set[A]
/** Returns the set of all _2s such that (_1, _2) is in this relation. */
def _2s: collection.Set[B]
/** Returns the number of pairs in this relation */
def size: Int
/** Returns true iff (a,b) is in this relation*/
def contains(a: A, b: B): Boolean
/** Returns a relation with only pairs (a,b) for which f(a,b) is true.*/
def filter(f: (A,B) => Boolean): Relation[A,B]
/** Partitions this relation into a map of relations according to some discriminator function. */
def groupBy[K](f: ((A,B)) => K): Map[K, Relation[A,B]]
/** Returns all pairs in this relation.*/
def all: Traversable[(A,B)]
def forwardMap: Map[A, Set[B]]
def reverseMap: Map[B, Set[A]]
}
private final class MRelation[A,B](fwd: Map[A, Set[B]], rev: Map[B, Set[A]]) extends Relation[A,B]
{
def forwardMap = fwd
def reverseMap = rev
def forward(t: A) = get(fwd, t)
def reverse(t: B) = get(rev, t)
def _1s = fwd.keySet
def _2s = rev.keySet
def size = fwd.size
def all: Traversable[(A,B)] = fwd.iterator.flatMap { case (a, bs) => bs.iterator.map( b => (a,b) ) }.toTraversable
def +(pair: (A,B)) = this + (pair._1, Set(pair._2))
def +(from: A, to: B) = this + (from, to :: Nil)
def +(from: A, to: Traversable[B]) =
new MRelation( add(fwd, from, to), (rev /: to) { (map, t) => add(map, t, from :: Nil) })
def ++(rs: Traversable[(A,B)]) = ((this: Relation[A,B]) /: rs) { _ + _ }
def ++(other: Relation[A,B]) = new MRelation[A,B]( combine(fwd, other.forwardMap), combine(rev, other.reverseMap) )
def --(ts: Traversable[A]): Relation[A,B] = ((this: Relation[A,B]) /: ts) { _ - _ }
def --(pairs: TraversableOnce[(A,B)]): Relation[A,B] = ((this: Relation[A,B]) /: pairs) { _ - _ }
def -(pair: (A,B)): Relation[A,B] =
new MRelation( remove(fwd, pair._1, pair._2), remove(rev, pair._2, pair._1) )
def -(t: A): Relation[A,B] =
fwd.get(t) match {
case Some(rs) =>
val upRev = (rev /: rs) { (map, r) => remove(map, r, t) }
new MRelation(fwd - t, upRev)
case None => this
}
def filter(f: (A,B) => Boolean): Relation[A,B] = Relation.empty[A,B] ++ all.filter(f.tupled)
def groupBy[K](f: ((A,B)) => K): Map[K, Relation[A,B]] = all.groupBy(f) mapValues { Relation.empty[A,B] ++ _ }
def contains(a: A, b: B): Boolean = forward(a)(b)
override def toString = all.map { case (a,b) => a + " -> " + b }.mkString("Relation [", ", ", "]")
}
| olove/xsbt | util/relation/src/main/scala/sbt/Relation.scala | Scala | bsd-3-clause | 4,847 |
package nozzle.webresult
import spray.json._
import nozzle.jsend._
import spray.httpx.marshalling._
import spray.httpx.SprayJsonSupport
trait JSendMarshallingSupport extends MarshallingSupport with nozzle.jsend.JSendSupport {
import JSendJsonProtocol._
private implicit def okRootJsonFormat[T](implicit jsendable: JSendable[T], jsonFormat: RootJsonFormat[T]) =
new RootJsonFormat[Ok[T]] {
def write(t: Ok[T]) = jsendable.toJSendSuccess(t.value).toJson
def read(json: spray.json.JsValue): Ok[T] = Ok(JSendSuccessFormat(jsonFormat).read(json).data)
}
private implicit def okPluralRootJsonFormat[T](implicit jsendable: JSendable[List[T]], jsonFormat: RootJsonFormat[List[T]]) =
new RootJsonFormat[Ok[List[T]]] {
def write(t: Ok[List[T]]) = jsendable.toJSendSuccess(t.value).toJson
def read(json: spray.json.JsValue): Ok[List[T]] = Ok(JSendSuccessFormat(jsonFormat).read(json).data)
}
private implicit def okUnitRootJsonFormat[T] = new RootJsonFormat[Ok[Unit]] {
def write(t: Ok[Unit]) = JSendEmptySuccess.toJson
def read(json: spray.json.JsValue): Ok[Unit] = {
JSendEmptySuccessFormat.read(json)
Ok(())
}
}
private implicit def webErrorRootJsonFormat(implicit toMessageStr: WebError => String) =
new RootJsonWriter[WebError] {
def write(webError: WebError) = JSendError(message = toMessageStr(webError)).toJson
}
implicit def okMarshaller[T](implicit jsendable: JSendable[T], jsonFormat: RootJsonFormat[T]) =
SprayJsonSupport.sprayJsonMarshaller[Ok[T]]
implicit val okUnitMarshaller = SprayJsonSupport.sprayJsonMarshaller[Ok[Unit]]
implicit val webErrorMarshaller = SprayJsonSupport.sprayJsonMarshaller[WebError]
}
| utaal/nozzle | src/main/scala/webresult/JSendMarshallingSupport.scala | Scala | mit | 1,711 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.optim.parameters
import java.nio.ByteBuffer
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import scala.reflect._
class FP16SplitsCompressedTensor[T: ClassTag](buffers: Array[Array[Byte]], size: Int)
extends CompressedTensor[T] {
def this(tensor: Tensor[T], splitsNum: Int) {
this(new Array[Array[Byte]](splitsNum), tensor.nElement())
compress(tensor)
}
def this(length: Int, splitsNum: Int) {
this(new Array[Array[Byte]](splitsNum), length)
}
@inline
private def overlap(splitOffset: Int, splitLength: Int, offset: Int,
length: Int): Option[(Int, Int)] = {
if ((splitOffset > offset + length || splitOffset + splitLength < offset)) {
None
} else {
Some(math.max(offset - splitOffset, 0),
math.min(splitOffset + splitLength, offset + length) - math.max(splitOffset, offset))
}
}
override def compress(offset: Int, src: Tensor[T], srcOffset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = {
require(src.isContiguous() && offset >= 0 && srcOffset >= 0 &&
srcOffset + length <= src.nElement())
require(offset + length <= size)
val tOffset = src.storageOffset() - 1 + srcOffset
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
overlap(start, curLength, offset, length) match {
case Some((splitOffset, overlapLength)) =>
if (buffers(i) == null) {
buffers(i) = new Array[Byte](curLength * 2)
}
if (classTag[T] == classTag[Double]) {
FP16CompressedTensor.toFP16(src.storage().array().asInstanceOf[Array[Double]],
tOffset + start, buffers(i), splitOffset, overlapLength)
} else if (classTag[T] == classTag[Float]) {
FP16CompressedTensor.toFP16(src.storage().array().asInstanceOf[Array[Float]],
tOffset + start, buffers(i), splitOffset, overlapLength)
} else {
throw new IllegalArgumentException
}
case _ =>
}
i += 1
}
this
}
override def compress(tensor: Tensor[T]): FP16SplitsCompressedTensor.this.type =
compress(0, tensor, 0, tensor.nElement())
override def deCompress(srcOffset: Int, tensor: Tensor[T], tgtOffset: Int, length: Int): Unit = {
require(srcOffset >= 0 && length > 0 && srcOffset + length <= size &&
tgtOffset >= 0 && tgtOffset + length <= tensor.nElement())
require(tensor.isContiguous())
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
overlap(start, curLength, srcOffset, length) match {
case Some((splitOffset, overlapLength)) =>
if (classTag[T] == classTag[Double]) {
val tdata = tensor.storage().array().asInstanceOf[Array[Double]]
val toffset = tensor.storageOffset() - 1 + tgtOffset
FP16CompressedTensor.fromFP16(buffers(i), splitOffset * 2, overlapLength * 2,
tdata, toffset + start)
} else if (classTag[T] == classTag[Float]) {
val tdata = tensor.storage().array().asInstanceOf[Array[Float]]
val toffset = tensor.storageOffset() - 1 + tgtOffset
FP16CompressedTensor.fromFP16(buffers(i), splitOffset * 2, overlapLength * 2,
tdata, toffset + start)
} else {
throw new IllegalArgumentException
}
case _ =>
}
i += 1
}
}
override def deCompress(tensor: Tensor[T]): Unit = deCompress(0, tensor, 0, tensor.nElement())
override def bytes(offset: Int, length: Int): ByteBuffer = {
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
if (start == offset && curLength == length) {
require(buffers(i) != null, "split has not been inited")
return ByteBuffer.wrap(buffers(i))
}
i += 1
}
throw new IllegalArgumentException("Offset and length not match")
}
override def bytes(): ByteBuffer = bytes(0, size)
// scalastyle:off
override def add(data: ByteBuffer, offset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = ???
override def add(data: ByteBuffer): FP16SplitsCompressedTensor.this.type = ???
override def parAdd(data: ByteBuffer, offset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = ???
override def parAdd(data: ByteBuffer): FP16SplitsCompressedTensor.this.type = ???
// scalastyle:on
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/optim/parameters/FP16SplitsCompressedTensor.scala | Scala | apache-2.0 | 5,578 |
package monocle.generic
import monocle.law.discipline.IsoTests
import monocle.law.discipline.function.EachTests
import monocle.generic.all._
import org.scalacheck.Arbitrary
import cats.Eq
import munit.DisciplineSuite
import scala.annotation.nowarn
@nowarn
class ProductSpec extends DisciplineSuite {
case class Person(name: String, age: Int)
implicit val personEq: Eq[Person] = Eq.fromUniversalEquals
implicit val personArb: Arbitrary[Person] = Arbitrary(for {
n <- Arbitrary.arbitrary[String]
a <- Arbitrary.arbitrary[Int]
} yield Person(n, a))
case class Permissions(read: Boolean, write: Boolean, execute: Boolean)
implicit val nameEq: Eq[Permissions] = Eq.fromUniversalEquals
implicit val nameArb: Arbitrary[Permissions] = Arbitrary(for {
f <- Arbitrary.arbitrary[Boolean]
l <- Arbitrary.arbitrary[Boolean]
i <- Arbitrary.arbitrary[Boolean]
} yield Permissions(f, l, i))
checkAll("toTuple", IsoTests(product.productToTuple[Person]))
checkAll("eachTuple2", EachTests[(String, String), String])
checkAll("eachTuple4", EachTests[(Int, Int, Int, Int), Int])
checkAll("eachCaseClass", EachTests[Permissions, Boolean])
}
| julien-truffaut/Monocle | generic/src/test/scala/monocle/generic/ProductSpec.scala | Scala | mit | 1,171 |
package org.allenai.common
import org.allenai.common.testkit.UnitSpec
class MathUtilsSpec extends UnitSpec {
"MathUtils" should "correctly perform math operations" in {
MathUtils.round(3.1415, 2) should be(3.14)
MathUtils.round(3.1415, 3) should be(3.142)
}
it should "correctly incorporate rounding mode" in {
MathUtils.round(3.1415, 3, BigDecimal.RoundingMode.HALF_DOWN) should be(3.141)
MathUtils.round(3.1415, 3, BigDecimal.RoundingMode.HALF_UP) should be(3.142)
}
}
| allenai/common | core/src/test/scala/org/allenai/common/MathUtilsSpec.scala | Scala | apache-2.0 | 499 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
//package com.precog
//package ragnarok
//package test
//
//object ClicksTestSuite extends ClicksLikePerfTestSuite {
// val data = "//clicks"
//
// "simple" := simpleQueries()
// "grouping" := groupingQueries()
// "advanced grouping" := advancedGroupingQueries()
//}
//
//
| precog/platform | ragnarok/src/main/scala/com/precog/ragnarok/test/ClicksTestSuite.scala | Scala | agpl-3.0 | 1,351 |
package org.scalatra
import org.scalatest.{Matchers, FunSuite}
import org.scalatest.matchers.MustMatchers
class RailsLikeUrlGeneratorTest extends FunSuite with Matchers {
def url(path: String, params: Tuple2[String, String]*): String =
url(path, params.toMap)
def url(path: String, splat: String, moreSplats: String*): String =
url(path, Map[String, String](), splat +: moreSplats)
def url(path: String, params: Map[String, String] = Map(), splats: Iterable[String] = Seq()): String =
new RailsRouteMatcher(path).reverse(params, splats.toList)
test("static string") {
url("/foo") should equal ("/foo")
}
test("dynamic segment") {
url(":foo.example.com", "foo" -> "vanilla") should equal ("vanilla.example.com")
}
test("dynamic segment with leading underscore") {
url(":_foo.example.com", "_foo" -> "vanilla") should equal ("vanilla.example.com")
}
test("skip invalid group names: 123") {
url(":123.example.com") should equal (":123.example.com")
}
test("skip invalid group names: $") {
url(":$.example.com") should equal (":$.example.com")
}
test("escaped dynamic segment") {
url("""\\:foo.example.com""") should equal (":foo.example.com")
url("""bar.\\:foo.com""") should equal ("bar.:foo.com")
}
test("dynamic segment inside optional segment") {
url("foo(.:extension)", "extension" -> "json") should equal ("foo.json")
url("foo(.:extension)") should equal ("foo")
}
test("static string and dynamic segment inside optional segment") {
url("foo(/bar.:extension)", "extension" -> "json") should equal ("foo/bar.json")
url("foo(/bar.:extension)") should equal ("foo")
}
test("glob segment") {
url("src/*files", "files" -> "a/b/c.txt") should equal ("src/a/b/c.txt")
}
test("glob segment at the beginning") {
url("*files/foo.txt", "files" -> "/home/thib") should equal ("/home/thib/foo.txt")
}
test("glob segment in the middle") {
url("src/*files/foo.txt", "files" -> "a/b/c") should equal ("src/a/b/c/foo.txt")
}
test("multiple glob segments") {
url("src/*files/dir/*morefiles/foo.txt", "files" -> "a/b", "morefiles" -> "c/d") should equal ("src/a/b/dir/c/d/foo.txt")
}
test("escaped glob segment") {
url("""src/\\*files""") should equal ("src/*files")
}
test("glob segment inside optional segment") {
url("src(/*files)", "files" -> "a/b/c.txt") should equal ("src/a/b/c.txt")
url("src(/*files)") should equal ("src")
}
test("optional segment") {
url("/foo(/bar)") should equal ("/foo/bar")
}
test("optional segment on first position") {
url("(/foo)/bar") should equal ("/foo/bar")
}
test("consecutive optional segments") {
url("/foo(/bar)(/baz)") should equal ("/foo/bar/baz")
}
test("separated optional segments") {
url("/foo(/bar)/buz(/baz)") should equal ("/foo/bar/buz/baz")
}
test("multiple optional segments") {
url("(/foo)(/bar)(/baz)") should equal ("/foo/bar/baz")
}
test("escapes optional segment parentheses") {
url("""/foo\\(/bar\\)""") should equal ("/foo(/bar)")
}
test("escapes one optional segment parenthesis") {
url("""/foo\\((/bar)""") should equal ("/foo(/bar")
}
}
| etorreborre/scalatra | core/src/test/scala/org/scalatra/RailsLikeUrlGeneratorTest.scala | Scala | bsd-2-clause | 3,207 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.utils
object NameSuggester {
/**
* Suggest a name based on a list of pre-existing names.
* If the name does not appear in the list, return the name itself.
* If it does appear in the list, add a number to it until it does
* not appear in the list any more, starting with 2.
* @param name The name to check.
* @param list The list to find the name in.
*/
def suggestName(name: String, list: List[String]): String = {
if (!list.contains(name)) {
name
} else {
val (first, last) = splitNumberAtEnd(name)
val proposed = first + (last.toInt + 1).toString
if (list.contains(proposed)) {
suggestName(proposed, list)
} else {
proposed
}
}
}
/**
* Splits a string with a number at the end in the
* non-numeric and the numeric part.
* For example: "abcd123" => ("abcd", 123)
* @param name The actor name to split
* @return A tuple with a string and an int.
* If no number is present a the end, the number
* that is returned is 1.
*/
private def splitNumberAtEnd(name: String): (String, Int) = {
val first = name.takeWhile(c => !c.isDigit)
val last = {
val numbers = name.reverse.takeWhile(c => c.isDigit).reverse
if (numbers.length > 0) numbers.toInt else 1
}
(first, last)
}
}
| coral-streaming/coral | src/main/scala/io/coral/utils/NameSuggester.scala | Scala | apache-2.0 | 1,939 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import scala.util.control.NonFatal
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hive.service.cli.SessionHandle
import org.apache.hive.service.cli.session.SessionManager
import org.apache.hive.service.rpc.thrift.TProtocolVersion
import org.apache.hive.service.server.HiveServer2
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
import org.apache.spark.sql.hive.thriftserver.server.SparkSQLOperationManager
import org.apache.spark.sql.internal.SQLConf
private[hive] class SparkSQLSessionManager(hiveServer: HiveServer2, sqlContext: SQLContext)
extends SessionManager(hiveServer)
with ReflectedCompositeService with Logging {
private lazy val sparkSqlOperationManager = new SparkSQLOperationManager()
override def init(hiveConf: HiveConf): Unit = {
setSuperField(this, "operationManager", sparkSqlOperationManager)
super.init(hiveConf)
}
override def openSession(
protocol: TProtocolVersion,
username: String,
passwd: String,
ipAddress: String,
sessionConf: java.util.Map[String, String],
withImpersonation: Boolean,
delegationToken: String): SessionHandle = {
val sessionHandle =
super.openSession(protocol, username, passwd, ipAddress, sessionConf, withImpersonation,
delegationToken)
try {
val session = super.getSession(sessionHandle)
HiveThriftServer2.eventManager.onSessionCreated(
session.getIpAddress, sessionHandle.getSessionId.toString, session.getUsername)
val ctx = if (sqlContext.conf.hiveThriftServerSingleSession) {
sqlContext
} else {
sqlContext.newSession()
}
ctx.setConf(SQLConf.DATETIME_JAVA8API_ENABLED, true)
val hiveSessionState = session.getSessionState
setConfMap(ctx, hiveSessionState.getOverriddenConfigurations)
setConfMap(ctx, hiveSessionState.getHiveVariables)
if (sessionConf != null && sessionConf.containsKey("use:database")) {
ctx.sql(s"use ${sessionConf.get("use:database")}")
}
sparkSqlOperationManager.sessionToContexts.put(sessionHandle, ctx)
sessionHandle
} catch {
case NonFatal(e) =>
try {
closeSession(sessionHandle)
} catch {
case NonFatal(inner) =>
logWarning("Error closing session", inner)
}
throw HiveThriftServerErrors.failedToOpenNewSessionError(e)
}
}
override def closeSession(sessionHandle: SessionHandle): Unit = {
HiveThriftServer2.eventManager.onSessionClosed(sessionHandle.getSessionId.toString)
val ctx = sparkSqlOperationManager.sessionToContexts.getOrDefault(sessionHandle, sqlContext)
ctx.sparkSession.sessionState.catalog.getTempViewNames().foreach(ctx.uncacheTable)
super.closeSession(sessionHandle)
sparkSqlOperationManager.sessionToContexts.remove(sessionHandle)
}
def setConfMap(conf: SQLContext, confMap: java.util.Map[String, String]): Unit = {
val iterator = confMap.entrySet().iterator()
while (iterator.hasNext) {
val kv = iterator.next()
conf.setConf(kv.getKey, kv.getValue)
}
}
}
| maropu/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLSessionManager.scala | Scala | apache-2.0 | 4,047 |
/**
* Copyright (C) 2016 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.persistence.relational.search.part
import org.orbeon.oxf.fr.persistence.relational.Provider
import org.orbeon.oxf.fr.persistence.relational.Statement._
import org.orbeon.oxf.fr.persistence.relational.search.adt.{Column, FilterType, SearchRequest}
object columnFilterPart {
def apply(request: SearchRequest): StatementPart =
if (! request.columns.exists(_.filterType != FilterType.None))
NilPart
else
StatementPart(
sql =
request.columns
// Only consider column with a filter
.filter(_.filterType != FilterType.None)
// Add index, used to refer the appropriate tf table
.zipWithIndex
.flatMap { case (column, i) =>
val dataControlWhere =
s"""AND tf$i.data_id = c.data_id
|AND tf$i.control = ?
|""".stripMargin
val valueWhere =
column.filterType match {
case FilterType.None => List.empty
case FilterType.Exact (_) => List("AND " + Provider.textEquals (request.provider, s"tf$i.val"))
case FilterType.Substring(_) => List("AND " + Provider.textContains(request.provider, s"tf$i.val"))
case FilterType.Token (tokens) =>
tokens.map { _ =>
"AND " + Provider.textContains(request.provider, s"concat(' ', tf$i.val, ' ')")
}
}
dataControlWhere :: valueWhere
}
.mkString(" "),
setters = {
val values =
request.columns.flatMap { case Column(path, matchType) =>
matchType match {
case FilterType.None => List.empty
case FilterType.Exact(filter) => path :: List(filter)
case FilterType.Substring(filter) => path :: List(s"%${filter.toLowerCase}%")
case FilterType.Token(tokens) => path :: tokens.map(token => s"% $token %")
}
}
values.map(value => (_.setString(_, value)): Setter)
}
)
}
| orbeon/orbeon-forms | form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/persistence/relational/search/part/columnFilterPart.scala | Scala | lgpl-2.1 | 2,924 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
import org.apache.spark.sql.{CarbonEnv, CarbonSession, SparkSession}
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.carbondata.core.cache.dictionary.ManageDictionaryAndBTree
import org.apache.carbondata.core.datamap.DataMapStoreManager
import org.apache.carbondata.core.datastore.block.SegmentPropertiesAndSchemaHolder
import org.apache.carbondata.core.metadata.{schema, AbsoluteTableIdentifier, CarbonMetadata, CarbonTableIdentifier}
import org.apache.carbondata.core.metadata.converter.ThriftWrapperSchemaConverterImpl
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.format
import org.apache.carbondata.format.SchemaEvolutionEntry
import org.apache.carbondata.spark.util.CarbonSparkUtil
/**
* Metastore to store carbonschema in hive
*/
class CarbonHiveMetaStore extends CarbonFileMetastore {
override def isReadFromHiveMetaStore: Boolean = true
/**
* Create spark session from paramters.
*
* @param parameters
* @param absIdentifier
* @param sparkSession
*/
override def createCarbonRelation(parameters: Map[String, String],
absIdentifier: AbsoluteTableIdentifier,
sparkSession: SparkSession): CarbonRelation = {
val info = CarbonUtil.convertGsonToTableInfo(parameters.asJava)
val carbonRelation = if (info != null) {
val table = CarbonTable.buildFromTableInfo(info)
CarbonRelation(info.getDatabaseName, info.getFactTable.getTableName,
CarbonSparkUtil.createSparkMeta(table), table)
} else {
super.createCarbonRelation(parameters, absIdentifier, sparkSession)
}
carbonRelation.refresh()
carbonRelation
}
override def isTablePathExists(tableIdentifier: TableIdentifier)
(sparkSession: SparkSession): Boolean = {
tableExists(tableIdentifier)(sparkSession)
}
override def dropTable(absoluteTableIdentifier: AbsoluteTableIdentifier)
(sparkSession: SparkSession): Unit = {
val dbName = absoluteTableIdentifier.getCarbonTableIdentifier.getDatabaseName
val tableName = absoluteTableIdentifier.getCarbonTableIdentifier.getTableName
val carbonTable = CarbonMetadata.getInstance.getCarbonTable(dbName, tableName)
if (null != carbonTable) {
// clear driver B-tree and dictionary cache
ManageDictionaryAndBTree.clearBTreeAndDictionaryLRUCache(carbonTable)
}
checkSchemasModifiedTimeAndReloadTable(TableIdentifier(tableName, Some(dbName)))
removeTableFromMetadata(dbName, tableName)
CarbonHiveMetadataUtil.invalidateAndDropTable(dbName, tableName, sparkSession)
// discard cached table info in cachedDataSourceTables
val tableIdentifier = TableIdentifier(tableName, Option(dbName))
sparkSession.sessionState.catalog.refreshTable(tableIdentifier)
DataMapStoreManager.getInstance().clearDataMaps(absoluteTableIdentifier)
SegmentPropertiesAndSchemaHolder.getInstance().invalidate(absoluteTableIdentifier)
}
override def checkSchemasModifiedTimeAndReloadTable(tableIdentifier: TableIdentifier): Boolean = {
// do nothing
false
}
override def listAllTables(sparkSession: SparkSession): Seq[CarbonTable] = {
// Todo
Seq()
}
override def getThriftTableInfo(carbonTable: CarbonTable): format.TableInfo = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
schemaConverter.fromWrapperToExternalTableInfo(carbonTable.getTableInfo,
carbonTable.getDatabaseName,
carbonTable.getTableName)
}
/**
* This method will overwrite the existing schema and update it with the given details
*
* @param newTableIdentifier
* @param thriftTableInfo
* @param schemaEvolutionEntry
* @param sparkSession
*/
override def updateTableSchemaForAlter(newTableIdentifier: CarbonTableIdentifier,
oldTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: format.TableInfo,
schemaEvolutionEntry: SchemaEvolutionEntry,
tablePath: String)
(sparkSession: SparkSession): String = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
if (schemaEvolutionEntry != null) {
thriftTableInfo.fact_table.schema_evolution.schema_evolution_history.add(schemaEvolutionEntry)
}
updateHiveMetaStoreForAlter(newTableIdentifier,
oldTableIdentifier,
thriftTableInfo,
tablePath,
sparkSession,
schemaConverter)
}
/**
* This method will overwrite the existing schema and update it with the given details
*
* @param newTableIdentifier
* @param thriftTableInfo
* @param carbonTablePath
* @param sparkSession
*/
override def updateTableSchemaForDataMap(newTableIdentifier: CarbonTableIdentifier,
oldTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: org.apache.carbondata.format.TableInfo,
carbonTablePath: String)(sparkSession: SparkSession): String = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
updateHiveMetaStoreForAlter(
newTableIdentifier,
oldTableIdentifier,
thriftTableInfo,
carbonTablePath,
sparkSession,
schemaConverter)
}
private def updateHiveMetaStoreForAlter(newTableIdentifier: CarbonTableIdentifier,
oldTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: format.TableInfo,
tablePath: String,
sparkSession: SparkSession,
schemaConverter: ThriftWrapperSchemaConverterImpl) = {
val wrapperTableInfo = schemaConverter.fromExternalToWrapperTableInfo(
thriftTableInfo,
newTableIdentifier.getDatabaseName,
newTableIdentifier.getTableName,
tablePath)
val dbName = newTableIdentifier.getDatabaseName
val tableName = newTableIdentifier.getTableName
val schemaParts = CarbonUtil.convertToMultiGsonStrings(wrapperTableInfo, "=", "'", "")
val hiveClient = sparkSession.sessionState.catalog.asInstanceOf[CarbonSessionCatalog]
.getClient()
hiveClient.runSqlHive(s"ALTER TABLE $dbName.$tableName SET SERDEPROPERTIES($schemaParts)")
sparkSession.catalog.refreshTable(TableIdentifier(tableName, Some(dbName)).quotedString)
removeTableFromMetadata(dbName, tableName)
CarbonMetadata.getInstance().loadTableMetadata(wrapperTableInfo)
tablePath
}
/**
* Generates schema string from TableInfo
*/
override def generateTableSchemaString(
tableInfo: schema.table.TableInfo,
absoluteTableIdentifier: AbsoluteTableIdentifier): String = {
val schemaEvolutionEntry = new schema.SchemaEvolutionEntry
schemaEvolutionEntry.setTimeStamp(tableInfo.getLastUpdatedTime)
tableInfo.getFactTable.getSchemaEvolution.getSchemaEvolutionEntryList.add(schemaEvolutionEntry)
CarbonUtil.convertToMultiGsonStrings(tableInfo, " ", "", ",")
}
/**
* This method will is used to remove the evolution entry in case of failure.
*
* @param carbonTableIdentifier
* @param thriftTableInfo
* @param sparkSession
*/
override def revertTableSchemaInAlterFailure(carbonTableIdentifier: CarbonTableIdentifier,
thriftTableInfo: format.TableInfo,
identifier: AbsoluteTableIdentifier)
(sparkSession: SparkSession): String = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val evolutionEntries = thriftTableInfo.fact_table.schema_evolution.schema_evolution_history
evolutionEntries.remove(evolutionEntries.size() - 1)
updateHiveMetaStoreForAlter(carbonTableIdentifier,
carbonTableIdentifier,
thriftTableInfo,
identifier.getTablePath,
sparkSession,
schemaConverter)
}
override def revertTableSchemaForPreAggCreationFailure(absoluteTableIdentifier:
AbsoluteTableIdentifier,
thriftTableInfo: org.apache.carbondata.format.TableInfo)
(sparkSession: SparkSession): String = {
val schemaConverter = new ThriftWrapperSchemaConverterImpl
val childSchemas = thriftTableInfo.dataMapSchemas
childSchemas.remove(childSchemas.size() - 1)
val carbonTableIdentifier = absoluteTableIdentifier.getCarbonTableIdentifier
updateHiveMetaStoreForAlter(carbonTableIdentifier,
carbonTableIdentifier,
thriftTableInfo,
absoluteTableIdentifier.getTablePath,
sparkSession,
schemaConverter)
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/scala/org/apache/spark/sql/hive/CarbonHiveMetaStore.scala | Scala | apache-2.0 | 9,226 |
package pl.touk.nussknacker.engine.flink.util.transformer
import com.typesafe.config.Config
import com.typesafe.config.ConfigValueFactory.fromAnyRef
import pl.touk.nussknacker.engine.api.component.{ComponentDefinition, ComponentProvider, NussknackerVersion}
import pl.touk.nussknacker.engine.api.process.ProcessObjectDependencies
import pl.touk.nussknacker.engine.avro.schemaregistry.SchemaRegistryProvider
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.ConfluentSchemaRegistryProvider
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.client.CachedConfluentSchemaRegistryClientFactory
import pl.touk.nussknacker.engine.avro.sink.flink.FlinkKafkaAvroSinkImplFactory
import pl.touk.nussknacker.engine.avro.sink.{KafkaAvroSinkFactory, KafkaAvroSinkFactoryWithEditor}
import pl.touk.nussknacker.engine.avro.source.KafkaAvroSourceFactory
import pl.touk.nussknacker.engine.kafka.KafkaConfig
import pl.touk.nussknacker.engine.kafka.generic.sinks.GenericKafkaJsonSinkFactory
import pl.touk.nussknacker.engine.kafka.generic.sources.{GenericJsonSourceFactory, GenericTypedJsonSourceFactory}
import pl.touk.nussknacker.engine.kafka.source.flink.FlinkKafkaSourceImplFactory
import pl.touk.nussknacker.engine.util.config.DocsConfig
class FlinkKafkaComponentProvider extends ComponentProvider {
protected val avroSerializingSchemaRegistryProvider: SchemaRegistryProvider = createAvroSchemaRegistryProvider
protected val jsonSerializingSchemaRegistryProvider: SchemaRegistryProvider = createJsonSchemaRegistryProvider
protected def createAvroSchemaRegistryProvider: SchemaRegistryProvider = ConfluentSchemaRegistryProvider()
protected def createJsonSchemaRegistryProvider: SchemaRegistryProvider = ConfluentSchemaRegistryProvider.jsonPayload(CachedConfluentSchemaRegistryClientFactory())
override def providerName: String = "kafka"
override def resolveConfigForExecution(config: Config): Config = config
override def create(config: Config, dependencies: ProcessObjectDependencies): List[ComponentDefinition] = {
val overriddenDependencies = TemporaryKafkaConfigMapping.prepareDependencies(config, dependencies)
val docsConfig: DocsConfig = new DocsConfig(config)
import docsConfig._
val avro = "DataSourcesAndSinks#schema-registry--avro-serialization"
val schemaRegistryTypedJson = "DataSourcesAndSinks#schema-registry--json-serialization"
val noTypeInfo = "DataSourcesAndSinks#no-type-information--json-serialization"
List(
ComponentDefinition("kafka-json", new GenericKafkaJsonSinkFactory(overriddenDependencies)).withRelativeDocs(noTypeInfo),
ComponentDefinition("kafka-json", new GenericJsonSourceFactory(overriddenDependencies)).withRelativeDocs(noTypeInfo),
ComponentDefinition("kafka-typed-json", new GenericTypedJsonSourceFactory(overriddenDependencies)).withRelativeDocs("DataSourcesAndSinks#manually-typed--json-serialization"),
ComponentDefinition("kafka-avro", new KafkaAvroSourceFactory(avroSerializingSchemaRegistryProvider, overriddenDependencies, new FlinkKafkaSourceImplFactory(None))).withRelativeDocs(avro),
ComponentDefinition("kafka-avro", new KafkaAvroSinkFactoryWithEditor(avroSerializingSchemaRegistryProvider, overriddenDependencies, FlinkKafkaAvroSinkImplFactory)).withRelativeDocs(avro),
ComponentDefinition("kafka-registry-typed-json", new KafkaAvroSourceFactory(jsonSerializingSchemaRegistryProvider, overriddenDependencies, new FlinkKafkaSourceImplFactory(None))).withRelativeDocs(schemaRegistryTypedJson),
ComponentDefinition("kafka-registry-typed-json", new KafkaAvroSinkFactoryWithEditor(jsonSerializingSchemaRegistryProvider, overriddenDependencies, FlinkKafkaAvroSinkImplFactory)).withRelativeDocs(schemaRegistryTypedJson),
ComponentDefinition("kafka-registry-typed-json-raw", new KafkaAvroSinkFactory(jsonSerializingSchemaRegistryProvider, overriddenDependencies, FlinkKafkaAvroSinkImplFactory)).withRelativeDocs(schemaRegistryTypedJson),
ComponentDefinition("kafka-avro-raw", new KafkaAvroSinkFactory(avroSerializingSchemaRegistryProvider, overriddenDependencies, FlinkKafkaAvroSinkImplFactory)).withRelativeDocs(avro)
)
}
override def isCompatible(version: NussknackerVersion): Boolean = true
override def isAutoLoaded: Boolean = false
}
//FIXME: Kafka components should not depend directly on ProcessObjectDependencies, only on
//appropriate config, this class is temporary solution, where we pass modified dependencies
private[transformer] object TemporaryKafkaConfigMapping {
def prepareDependencies(config: Config, dependencies: ProcessObjectDependencies): ProcessObjectDependencies = {
val kafkaConfig = config.getConfig("config")
val kafkaConfigMergedWithGlobalConfig = dependencies.config.withValue(KafkaConfig.defaultGlobalKafkaConfigPath, fromAnyRef(kafkaConfig.root()))
dependencies.copy(config = kafkaConfigMergedWithGlobalConfig)
}
}
| TouK/nussknacker | engine/flink/components/kafka/src/main/scala/pl/touk/nussknacker/engine/flink/util/transformer/FlinkKafkaComponentProvider.scala | Scala | apache-2.0 | 4,937 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.statsEstimation
import org.apache.spark.sql.catalyst.CatalystConf
import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, AttributeMap}
import org.apache.spark.sql.catalyst.plans.logical.{Project, Statistics}
object ProjectEstimation {
import EstimationUtils._
def estimate(conf: CatalystConf, project: Project): Option[Statistics] = {
if (rowCountsExist(conf, project.child)) {
val childStats = project.child.stats(conf)
val inputAttrStats = childStats.attributeStats
// Match alias with its child's column stat
val aliasStats = project.expressions.collect {
case alias @ Alias(attr: Attribute, _) if inputAttrStats.contains(attr) =>
alias.toAttribute -> inputAttrStats(attr)
}
val outputAttrStats =
getOutputMap(AttributeMap(inputAttrStats.toSeq ++ aliasStats), project.output)
Some(childStats.copy(
sizeInBytes = getOutputSize(project.output, childStats.rowCount.get, outputAttrStats),
attributeStats = outputAttrStats))
} else {
None
}
}
}
| jianran/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/ProjectEstimation.scala | Scala | apache-2.0 | 1,921 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.{ util => ju }
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.apache.kafka.clients.consumer._
import org.apache.kafka.common.{ PartitionInfo, TopicPartition }
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{StreamingContext, Time}
import org.apache.spark.streaming.dstream._
import org.apache.spark.streaming.scheduler.{RateController, StreamInputInfo}
import org.apache.spark.streaming.scheduler.rate.RateEstimator
/**
* A DStream where
* each given Kafka topic/partition corresponds to an RDD partition.
* The spark configuration spark.streaming.kafka.maxRatePerPartition gives the maximum number
* of messages
* per second that each '''partition''' will accept.
* @param locationStrategy In most cases, pass in [[LocationStrategies.PreferConsistent]],
* see [[LocationStrategy]] for more details.
* @param consumerStrategy In most cases, pass in [[ConsumerStrategies.Subscribe]],
* see [[ConsumerStrategy]] for more details
* @param ppc configuration of settings such as max rate on a per-partition basis.
* see [[PerPartitionConfig]] for more details.
* @tparam K type of Kafka message key
* @tparam V type of Kafka message value
*/
private[spark] class DirectKafkaInputDStream[K, V](
_ssc: StreamingContext,
locationStrategy: LocationStrategy,
consumerStrategy: ConsumerStrategy[K, V],
ppc: PerPartitionConfig
) extends InputDStream[ConsumerRecord[K, V]](_ssc) with Logging with CanCommitOffsets {
val executorKafkaParams = {
val ekp = new ju.HashMap[String, Object](consumerStrategy.executorKafkaParams)
KafkaUtils.fixKafkaParams(ekp)
ekp
}
protected var currentOffsets = Map[TopicPartition, Long]()
@transient private var kc: Consumer[K, V] = null
def consumer(): Consumer[K, V] = this.synchronized {
if (null == kc) {
kc = consumerStrategy.onStart(currentOffsets.mapValues(l => new java.lang.Long(l)).asJava)
}
kc
}
override def persist(newLevel: StorageLevel): DStream[ConsumerRecord[K, V]] = {
logError("Kafka ConsumerRecord is not serializable. " +
"Use .map to extract fields before calling .persist or .window")
super.persist(newLevel)
}
protected def getBrokers = {
val c = consumer
val result = new ju.HashMap[TopicPartition, String]()
val hosts = new ju.HashMap[TopicPartition, String]()
val assignments = c.assignment().iterator()
while (assignments.hasNext()) {
val tp: TopicPartition = assignments.next()
if (null == hosts.get(tp)) {
val infos = c.partitionsFor(tp.topic).iterator()
while (infos.hasNext()) {
val i = infos.next()
hosts.put(new TopicPartition(i.topic(), i.partition()), i.leader.host())
}
}
result.put(tp, hosts.get(tp))
}
result
}
protected def getPreferredHosts: ju.Map[TopicPartition, String] = {
locationStrategy match {
case PreferBrokers => getBrokers
case PreferConsistent => ju.Collections.emptyMap[TopicPartition, String]()
case PreferFixed(hostMap) => hostMap
}
}
// Keep this consistent with how other streams are named (e.g. "Flume polling stream [2]")
private[streaming] override def name: String = s"Kafka 0.10 direct stream [$id]"
protected[streaming] override val checkpointData =
new DirectKafkaInputDStreamCheckpointData
/**
* Asynchronously maintains & sends new rate limits to the receiver through the receiver tracker.
*/
override protected[streaming] val rateController: Option[RateController] = {
if (RateController.isBackPressureEnabled(ssc.conf)) {
Some(new DirectKafkaRateController(id,
RateEstimator.create(ssc.conf, context.graph.batchDuration)))
} else {
None
}
}
protected[streaming] def maxMessagesPerPartition(
offsets: Map[TopicPartition, Long]): Option[Map[TopicPartition, Long]] = {
val estimatedRateLimit = rateController.map(_.getLatestRate())
// calculate a per-partition rate limit based on current lag
val effectiveRateLimitPerPartition = estimatedRateLimit.filter(_ > 0) match {
case Some(rate) =>
val lagPerPartition = offsets.map { case (tp, offset) =>
tp -> Math.max(offset - currentOffsets(tp), 0)
}
val totalLag = lagPerPartition.values.sum
lagPerPartition.map { case (tp, lag) =>
val maxRateLimitPerPartition = ppc.maxRatePerPartition(tp)
val backpressureRate = Math.round(lag / totalLag.toFloat * rate)
tp -> (if (maxRateLimitPerPartition > 0) {
Math.min(backpressureRate, maxRateLimitPerPartition)} else backpressureRate)
}
case None => offsets.map { case (tp, offset) => tp -> ppc.maxRatePerPartition(tp) }
}
if (effectiveRateLimitPerPartition.values.sum > 0) {
val secsPerBatch = context.graph.batchDuration.milliseconds.toDouble / 1000
Some(effectiveRateLimitPerPartition.map {
case (tp, limit) => tp -> (secsPerBatch * limit).toLong
})
} else {
None
}
}
/**
* The concern here is that poll might consume messages despite being paused,
* which would throw off consumer position. Fix position if this happens.
*/
private def paranoidPoll(c: Consumer[K, V]): Unit = {
val msgs = c.poll(0)
if (!msgs.isEmpty) {
// position should be minimum offset per topicpartition
msgs.asScala.foldLeft(Map[TopicPartition, Long]()) { (acc, m) =>
val tp = new TopicPartition(m.topic, m.partition)
val off = acc.get(tp).map(o => Math.min(o, m.offset)).getOrElse(m.offset)
acc + (tp -> off)
}.foreach { case (tp, off) =>
logInfo(s"poll(0) returned messages, seeking $tp to $off to compensate")
c.seek(tp, off)
}
}
}
/**
* Returns the latest (highest) available offsets, taking new partitions into account.
*/
protected def latestOffsets(): Map[TopicPartition, Long] = {
val c = consumer
paranoidPoll(c)
val parts = c.assignment().asScala
// make sure new partitions are reflected in currentOffsets
val newPartitions = parts.diff(currentOffsets.keySet)
// position for new partitions determined by auto.offset.reset if no commit
currentOffsets = currentOffsets ++ newPartitions.map(tp => tp -> c.position(tp)).toMap
// don't want to consume messages, so pause
c.pause(newPartitions.asJava)
// find latest available offsets
c.seekToEnd(currentOffsets.keySet.asJava)
parts.map(tp => tp -> c.position(tp)).toMap
}
// limits the maximum number of messages per partition
protected def clamp(
offsets: Map[TopicPartition, Long]): Map[TopicPartition, Long] = {
maxMessagesPerPartition(offsets).map { mmp =>
mmp.map { case (tp, messages) =>
val uo = offsets(tp)
tp -> Math.min(currentOffsets(tp) + messages, uo)
}
}.getOrElse(offsets)
}
override def compute(validTime: Time): Option[KafkaRDD[K, V]] = {
val untilOffsets = clamp(latestOffsets())
val offsetRanges = untilOffsets.map { case (tp, uo) =>
val fo = currentOffsets(tp)
OffsetRange(tp.topic, tp.partition, fo, uo)
}
val rdd = new KafkaRDD[K, V](
context.sparkContext, executorKafkaParams, offsetRanges.toArray, getPreferredHosts, true)
// Report the record number and metadata of this batch interval to InputInfoTracker.
val description = offsetRanges.filter { offsetRange =>
// Don't display empty ranges.
offsetRange.fromOffset != offsetRange.untilOffset
}.map { offsetRange =>
s"topic: ${offsetRange.topic}\\tpartition: ${offsetRange.partition}\\t" +
s"offsets: ${offsetRange.fromOffset} to ${offsetRange.untilOffset}"
}.mkString("\\n")
// Copy offsetRanges to immutable.List to prevent from being modified by the user
val metadata = Map(
"offsets" -> offsetRanges.toList,
StreamInputInfo.METADATA_KEY_DESCRIPTION -> description)
val inputInfo = StreamInputInfo(id, rdd.count, metadata)
ssc.scheduler.inputInfoTracker.reportInfo(validTime, inputInfo)
currentOffsets = untilOffsets
commitAll()
Some(rdd)
}
override def start(): Unit = {
val c = consumer
paranoidPoll(c)
if (currentOffsets.isEmpty) {
currentOffsets = c.assignment().asScala.map { tp =>
tp -> c.position(tp)
}.toMap
}
// don't actually want to consume any messages, so pause all partitions
c.pause(currentOffsets.keySet.asJava)
}
override def stop(): Unit = this.synchronized {
if (kc != null) {
kc.close()
}
}
protected val commitQueue = new ConcurrentLinkedQueue[OffsetRange]
protected val commitCallback = new AtomicReference[OffsetCommitCallback]
/**
* Queue up offset ranges for commit to Kafka at a future time. Threadsafe.
* @param offsetRanges The maximum untilOffset for a given partition will be used at commit.
*/
def commitAsync(offsetRanges: Array[OffsetRange]): Unit = {
commitAsync(offsetRanges, null)
}
/**
* Queue up offset ranges for commit to Kafka at a future time. Threadsafe.
* @param offsetRanges The maximum untilOffset for a given partition will be used at commit.
* @param callback Only the most recently provided callback will be used at commit.
*/
def commitAsync(offsetRanges: Array[OffsetRange], callback: OffsetCommitCallback): Unit = {
commitCallback.set(callback)
commitQueue.addAll(ju.Arrays.asList(offsetRanges: _*))
}
protected def commitAll(): Unit = {
val m = new ju.HashMap[TopicPartition, OffsetAndMetadata]()
var osr = commitQueue.poll()
while (null != osr) {
val tp = osr.topicPartition
val x = m.get(tp)
val offset = if (null == x) { osr.untilOffset } else { Math.max(x.offset, osr.untilOffset) }
m.put(tp, new OffsetAndMetadata(offset))
osr = commitQueue.poll()
}
if (!m.isEmpty) {
consumer.commitAsync(m, commitCallback.get)
}
}
private[streaming]
class DirectKafkaInputDStreamCheckpointData extends DStreamCheckpointData(this) {
def batchForTime: mutable.HashMap[Time, Array[(String, Int, Long, Long)]] = {
data.asInstanceOf[mutable.HashMap[Time, Array[OffsetRange.OffsetRangeTuple]]]
}
override def update(time: Time): Unit = {
batchForTime.clear()
generatedRDDs.foreach { kv =>
val a = kv._2.asInstanceOf[KafkaRDD[K, V]].offsetRanges.map(_.toTuple).toArray
batchForTime += kv._1 -> a
}
}
override def cleanup(time: Time): Unit = { }
override def restore(): Unit = {
batchForTime.toSeq.sortBy(_._1)(Time.ordering).foreach { case (t, b) =>
logInfo(s"Restoring KafkaRDD for time $t ${b.mkString("[", ", ", "]")}")
generatedRDDs += t -> new KafkaRDD[K, V](
context.sparkContext,
executorKafkaParams,
b.map(OffsetRange(_)),
getPreferredHosts,
// during restore, it's possible same partition will be consumed from multiple
// threads, so dont use cache
false
)
}
}
}
/**
* A RateController to retrieve the rate from RateEstimator.
*/
private[streaming] class DirectKafkaRateController(id: Int, estimator: RateEstimator)
extends RateController(id, estimator) {
override def publish(rate: Long): Unit = ()
}
}
| bOOm-X/spark | external/kafka-0-10/src/main/scala/org/apache/spark/streaming/kafka010/DirectKafkaInputDStream.scala | Scala | apache-2.0 | 12,482 |
/*
* Copyright 2016 Carlo Micieli
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.hascalator
package dst
import Prelude._
class SplayTreeSpec extends AbstractTestSpec with SampleSplayTrees {
describe("A SplayTree") {
describe("empty") {
it("should create an empty Splay tree") {
SplayTree.empty[Int].isEmpty shouldEqual true
}
}
describe("fromList") {
it("should create an empty Splay tree from the empty list") {
val tree = SplayTree.fromList(List.empty[Int])
tree.isEmpty shouldEqual true
}
it("should create a Splay tree from a list") {
val tree = SplayTree.fromList(List(42, 15, 67, 109, 4, 55))
tree.size shouldEqual 6
tree.isEmpty shouldEqual false
tree.top shouldEqual Maybe.just(4)
}
}
describe("pop") {
it("should remove min element from a Splay tree") {
val (m, tree) = splayTree.pop
m shouldEqual 4
tree.top shouldEqual Maybe.just(15)
}
}
describe("merge") {
it("should merge two empty Splay trees") {
val tree = emptySplayTree merge emptySplayTree
tree.isEmpty shouldEqual true
}
it("should merge two Splay trees") {
val tree = splayTree merge emptySplayTree
tree.top shouldEqual splayTree.top
}
}
describe("insert") {
it("should create a singleton Splay tree adding a key to the empty tree") {
val tree = emptySplayTree insert 42
tree.isEmpty shouldEqual false
}
it("should increase the size") {
val tree = emptySplayTree insert 42
tree.size shouldEqual 1
}
}
describe("isEmpty") {
it("should return true for empty Splay trees") {
emptySplayTree.isEmpty shouldEqual true
}
}
}
}
trait SampleSplayTrees {
val emptySplayTree: SplayTree[Int] = SplayTree.empty[Int]
def singleton(x: Int): SplayTree[Int] = SplayTree.fromList(List(x))
val splayTree: SplayTree[Int] = SplayTree.fromList(List(42, 15, 67, 109, 4, 55))
} | CarloMicieli/hascalator | core/src/test/scala/io/hascalator/dst/SplayTreeSpec.scala | Scala | apache-2.0 | 2,583 |
package text.kanji
/**
* @author K.Sakamoto
* Created on 2016/07/26
*/
object JISLevel2KanjiCharacter extends KanjiCharacter {
override val kanji: Seq[String] = readKanjiCSV("jis_level_2")
}
| ktr-skmt/FelisCatusZero | src/main/scala/text/kanji/JISLevel2KanjiCharacter.scala | Scala | apache-2.0 | 208 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.annotation.Public
import org.apache.flink.streaming.api.datastream.{ SplitStream => SplitJavaStream }
/**
* The SplitStream represents an operator that has been split using an
* [[org.apache.flink.streaming.api.collector.selector.OutputSelector]].
* Named outputs can be selected using the [[SplitStream#select()]] function.
* To apply a transformation on the whole output simply call
* the appropriate method on this stream.
*/
@Public
class SplitStream[T](javaStream: SplitJavaStream[T]) extends DataStream[T](javaStream){
/**
* Sets the output names for which the next operator will receive values.
*/
def select(outputNames: String*): DataStream[T] =
asScalaStream(javaStream.select(outputNames: _*))
}
| yew1eb/flink | flink-streaming-scala/src/main/scala/org/apache/flink/streaming/api/scala/SplitStream.scala | Scala | apache-2.0 | 1,608 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent.Semaphore
import scala.concurrent.TimeoutException
import scala.concurrent.duration._
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkException, SparkFunSuite,
TestUtils}
import org.apache.spark.internal.config
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.util.{RpcUtils, SerializableBuffer, ThreadUtils}
class WorkerDecommissionSuite extends SparkFunSuite with LocalSparkContext {
override def beforeEach(): Unit = {
val conf = new SparkConf().setAppName("test").setMaster("local")
.set(config.Worker.WORKER_DECOMMISSION_ENABLED, true)
sc = new SparkContext("local-cluster[2, 1, 1024]", "test", conf)
}
test("verify task with no decommissioning works as expected") {
val input = sc.parallelize(1 to 10)
input.count()
val sleepyRdd = input.mapPartitions{ x =>
Thread.sleep(100)
x
}
assert(sleepyRdd.count() === 10)
}
test("verify a task with all workers decommissioned succeeds") {
val input = sc.parallelize(1 to 10)
// Listen for the job
val sem = new Semaphore(0)
sc.addSparkListener(new SparkListener {
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = {
sem.release()
}
})
TestUtils.waitUntilExecutorsUp(sc = sc,
numExecutors = 2,
timeout = 10000) // 10s
val sleepyRdd = input.mapPartitions{ x =>
Thread.sleep(5000) // 5s
x
}
// Start the task.
val asyncCount = sleepyRdd.countAsync()
// Wait for the job to have started
sem.acquire(1)
// Give it time to make it to the worker otherwise we'll block
Thread.sleep(2000) // 2s
// Decommission all the executors, this should not halt the current task.
// decom.sh message passing is tested manually.
val sched = sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend]
val execs = sched.getExecutorIds()
execs.foreach(execId => sched.decommissionExecutor(execId))
val asyncCountResult = ThreadUtils.awaitResult(asyncCount, 20.seconds)
assert(asyncCountResult === 10)
// Try and launch task after decommissioning, this should fail
val postDecommissioned = input.map(x => x)
val postDecomAsyncCount = postDecommissioned.countAsync()
val thrown = intercept[java.util.concurrent.TimeoutException]{
val result = ThreadUtils.awaitResult(postDecomAsyncCount, 20.seconds)
}
assert(postDecomAsyncCount.isCompleted === false,
"After exec decommission new task could not launch")
}
}
| spark-test/spark | core/src/test/scala/org/apache/spark/scheduler/WorkerDecommissionSuite.scala | Scala | apache-2.0 | 3,417 |
package org.jetbrains.sbt
import java.io.File
import com.intellij.openapi.module.Module
import com.intellij.openapi.roots.ModuleRootModificationUtil
import com.intellij.openapi.vfs.VfsUtil
import org.jetbrains.plugins.scala.base.libraryLoaders.ScalaLibraryLoader.{ScalaCompilerLoader, ScalaLibraryLoaderAdapter, ScalaReflectLoader, ScalaRuntimeLoader}
import org.jetbrains.plugins.scala.base.libraryLoaders.{IvyLibraryLoader, IvyLibraryLoaderAdapter}
import org.jetbrains.plugins.scala.debugger._
import org.jetbrains.sbt.MockSbt._
import scala.collection.JavaConverters._
/**
* @author Nikolay Obedin
* @since 7/27/15.
*/
trait MockSbtBase extends ScalaSdkOwner {
implicit val sbtVersion: String
protected def scalaLoaders = Seq(ScalaCompilerLoader(), ScalaRuntimeLoader(), ScalaReflectLoader())
override protected def librariesLoaders: Seq[IvyLibraryLoader]
override protected def setUpLibraries(): Unit = {
val classPath = librariesLoaders.map(urlForLibraryRoot)
ModuleRootModificationUtil.addModuleLibrary(module, "sbt", classPath.asJava, List.empty[String].asJava)
}
}
trait MockSbt_0_12 extends MockSbtBase {
override implicit val version: ScalaVersion = Scala_2_9
private val sbt_0_12_modules =
Seq("sbt","collections","interface","io","ivy","logging","main","process")
override protected def librariesLoaders: Seq[ScalaLibraryLoaderAdapter] =
Seq(ScalaCompilerLoader(), ScalaRuntimeLoader()) ++ sbt_0_12_modules.map(sbtLoader)
}
trait MockSbt_0_13 extends MockSbtBase {
override implicit val version: ScalaVersion = Scala_2_10
private val sbt_0_13_modules =
Seq("sbt", "collections", "interface", "io", "ivy", "logging", "main", "main-settings", "process")
override protected def librariesLoaders: Seq[ScalaLibraryLoaderAdapter] =
scalaLoaders ++ sbt_0_13_modules.map(sbtLoader)
}
trait MockSbt_1_0 extends MockSbtBase {
override implicit val version: ScalaVersion = Scala_2_12
// https://github.com/sbt/sbt/blob/1.x/project/Dependencies.scala
// update them when updating versions.sbtVersion
// TODO find a way to automatically get the dependencies via transitive deps from org.scala-sbt:sbt artifact
private val ioVersion = "1.0.2"
private val utilVersion = "1.0.2"
private val lmVersion = "1.0.3"
private val zincVersion = "1.0.3"
private val sbt_1_0_modules = Seq("sbt", "test-agent")
private val util_cross = Seq("util-cache","util-control","util-logging","util-position","util-relation","util-tracking")
private val lm_cross = Seq("librarymanagement-core","librarymanagement-ivy")
private val sbt_1_0_modules_cross = Seq(
"main","logic","collections","actions","completion",
"run","task-system","tasks","testing","main-settings",
"command","protocol","core-macros")
private def compilerInterfaceLoader(implicit module: Module) = new SbtBaseLoader() {
override val name: String = "compiler-interface"
override val version: String = zincVersion
}
private def utilInterfaceLoader = new SbtBaseLoader() {
override val name: String = "util-interface"
override val version: String = utilVersion
}
private def ioLoader = new SbtBaseLoader_Cross() {
override val name: String = "io"
override val version: String = ioVersion
}
override protected def librariesLoaders: Seq[IvyLibraryLoader] =
scalaLoaders ++
sbt_1_0_modules.map(sbtLoader) ++
sbt_1_0_modules_cross.map(sbtLoader_cross) ++
util_cross.map(sbtLoader_cross(_)(utilVersion)) ++
lm_cross.map(sbtLoader_cross(_)(lmVersion)) ++
Seq(compilerInterfaceLoader, ioLoader, utilInterfaceLoader)
}
private[sbt] object MockSbt {
def urlForLibraryRoot(loader: IvyLibraryLoader)
(implicit version: ScalaVersion): String = {
val file = new File(loader.path)
assert(file.exists(), s"library root for ${loader.name} does not exist at $file")
VfsUtil.getUrlForLibraryRoot(file)
}
abstract class SbtBaseLoader(implicit val version: String) extends ScalaLibraryLoaderAdapter {
override val vendor: String = "org.scala-sbt"
override def fileName(implicit version: ScalaVersion): String =
s"$name-${this.version}"
}
/** Loads library with cross-versioning. */
abstract class SbtBaseLoader_Cross(implicit val version: String) extends IvyLibraryLoaderAdapter {
override val vendor: String = "org.scala-sbt"
}
def sbtLoader(libraryName: String)(implicit version: String): SbtBaseLoader =
new SbtBaseLoader() {
override val name: String = libraryName
}
def sbtLoader_cross(libraryName: String)(implicit version: String): SbtBaseLoader_Cross =
new SbtBaseLoader_Cross() {
override val name: String = libraryName
}
}
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/sbt/MockSbt.scala | Scala | apache-2.0 | 4,747 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ksmpartners.ernie.server
import com.ksmpartners.ernie.model.ModelObject
import com.ksmpartners.ernie.server.RestGenerator._
import net.liftweb.http._
import com.ksmpartners.ernie.server.service.ServiceRegistry
import net.liftweb.json._
import JsonDSL._
import net.liftweb.common.Box
import com.ksmpartners.ernie.server.filter.AuthUtil._
import com.ksmpartners.ernie.server.filter.SAMLConstants._
import com.ksmpartners.ernie.server.RestGenerator.Parameter
import com.ksmpartners.ernie.server.RestGenerator.Filter
import com.ksmpartners.ernie.server.RestGenerator.RequestTemplate
import net.liftweb.http.ResponseWithReason
import net.liftweb.common.Full
import scala.Some
import net.liftweb.http.BadResponse
import com.ksmpartners.ernie.server.RestGenerator.ErnieError
import ErnieFilters._
import net.liftweb.http.auth.userRoles
/**
* Contains filters that pre-process ernie requests
*/
object ErnieFilters {
/**
* Method that verifies that the requesting user is in the given role
* @param req - The request being handled
* @param role - The role to verify
* @param f - The function to be called if the user is in the role
* @return the function f, or a ForbiddenResponse if the user is not in the specified role
*/
private def authFilter(req: Req, role: String*)(f: () => Box[LiftResponse]): () => Box[LiftResponse] = {
if (role.foldLeft(false)((b, s) => b || isUserInRole(req, s)) ||
(if ((!userRoles.isEmpty) && (userRoles.is.size > 0)) {
role.foldLeft(true)((r: Boolean, theRole: String) => r &&
(userRoles.is.find(f => f.name == theRole).isDefined ||
userRoles.is.foldLeft(true)((res: Boolean, userRole: net.liftweb.http.auth.Role) => res && (userRole.name == theRole))))
} else false)) f
else () => {
log.debug("Response: Forbidden Response. Reason: User is not authorized to perform that action")
Full(ForbiddenResponse("User is not authorized to perform that action"))
}
}
val readAuthFilter = Filter("Read Authorization Filter", authFilter(_: Req, readRole)_, None, ErnieError(ResponseWithReason(ForbiddenResponse(), "User is not authorized to perform that action"), None), readRole)
val writeAuthFilter = Filter("Write Authorization Filter", authFilter(_: Req, writeRole)_, None, ErnieError(ResponseWithReason(ForbiddenResponse(), "User is not authorized to perform that action"), None), writeRole)
val writeRunAuthFilter = Filter("Write Authorization Filter", authFilter(_: Req, runRole, writeRole)_, None, ErnieError(ResponseWithReason(ForbiddenResponse(), "User is not authorized to perform that action"), None), writeRole, runRole)
val authFilters: List[Filter] = List(readAuthFilter, writeAuthFilter, writeRunAuthFilter)
/**
* Return true if the given request accepts an ernie response as defined in ModelObject
*/
def acceptsErnieJson(req: Req): Boolean = req.weightedAccept.find(_.matches(ModelObject.TYPE_PREFIX -> ModelObject.TYPE_POSTFIX)).isDefined
/**
* Method that verifies that the requesting user accepts the correct ctype
* @param req - The request being handled
* @param f - The function to be called if the user accepts the correct ctype
* @return the function f, or a NotAcceptableResponse if the user does not accept the correct ctype
*/
private def ctypeFilter(req: Req)(f: () => Box[LiftResponse]): () => Box[LiftResponse] = {
if (acceptsErnieJson(req)) f else () => {
log.debug("Response: Not Acceptable Response. Reason: Resource only serves " + ModelObject.TYPE_FULL)
Full(NotAcceptableResponse("Resource only serves " + ModelObject.TYPE_FULL))
}
}
val jsonFilter = Filter("JSON Content Type Filter", ctypeFilter(_: Req)_, None, ErnieError(ResponseWithReason(NotAcceptableResponse(), "Resource only serves " + ModelObject.TYPE_FULL), None))
val idFilter = Filter("ID is long filter", idIsLongFilter(_: Req)_, None, ErnieError(ResponseWithReason(BadResponse(), "Job ID provided is not a number"), None))
private def idIsLongFilter(req: Req)(f: () => Box[LiftResponse]): () => Box[LiftResponse] = try {
req.path(0).toLong
f
} catch {
case _ => () => {
log.debug("Response: Bad Response. Reason: Job ID provided is not a number")
Full(ResponseWithReason(BadResponse(), ("Job ID provided is not a number: " + req.path(0))))
}
}
}
/**
* Contains JSON representations of the various responses returned in Ernie operations
*/
object ErnieModels {
val parameterEntity = ("ParameterEntity" -> ("properties" ->
("paramName" -> ("type" -> "string")) ~
("dataType" -> ("type" -> "string")) ~
("allowNull" -> ("type" -> "boolean")) ~
("defaultValue" -> ("type" -> "string"))) ~ ("id" -> "ParameterEntity"))
val definitionEntity = ("DefinitionEntity" ->
(("properties" ->
("createdDate" -> ("type" -> "Date")) ~
("defId" -> ("type" -> "string")) ~
("defDescription" -> ("type" -> "string")) ~
("createdUser" -> ("type" -> "string")) ~
("paramNames" -> ("type" -> "Array") ~ ("items" -> ("type" -> "string"))) ~
("params" -> ("type" -> "Array") ~ ("items" -> ("type" -> "ParameterEntity"))) ~
("unsupportedReportTypes" -> ("type" -> "Array") ~ ("items" -> ("type" -> "string")))) ~
("id" -> "DefinitionEntity")))
val definitionResponse = ("DefinitionResponse" ->
(("properties" -> JNothing) ~
("id" -> "DefinitionResponse")))
val deleteResponse = ("DeleteResponse" ->
(("properties" -> ("deleteStatus" -> ("type" -> "string"))) ~
("id" -> "DeleteResponse")))
val deleteDefinitionResponse = ("DefinitionDeleteResponse" ->
(("properties" -> ("deleteStatus" -> ("type" -> "string"))) ~
("id" -> "DefinitionDeleteResponse")))
val jobsMapResponse = ("jobStatusMap" -> ("id" -> "jobStatusMap") ~ ("properties" -> ("jobStatusMap" ->
(("type" -> "Array") ~ ("items" -> ("type" -> "string")) ~ ("description" -> "Jobs map")))))
val jobsCatalogResponse = ("JobsCatalogResponse" -> ("id" -> "JobsCatalogResponse") ~ ("properties" -> ("jobsCatalog" ->
(("type" -> "Array") ~ ("items" -> ("$ref" -> "JobEntity"))))))
val reportDefinitionMapResponse = ("reportDefMap" -> ("id" -> "reportDefMap") ~ ("properties" -> ("reportDefMap" ->
(("type" -> "Array") ~ ("items" -> ("type" -> "string")) ~ ("description" -> "Defs map")))))
val reportEntity = ("ReportEntity" ->
(("properties" ->
("createdDate" -> ("type" -> "Date")) ~
("startDate" -> ("type" -> "Date")) ~
("finishDate" -> ("type" -> "Date")) ~
("retentionDate" -> ("type" -> "Date")) ~
("rptId" -> ("type" -> "string")) ~
("sourceDefId" -> ("type" -> "string")) ~
("createdUser" -> ("type" -> "string")) ~
("params" -> ("type" -> "Array") ~ ("description" -> "Report parameters") ~ ("items" -> ("type" -> "string"))) ~
("reportType" -> ("type" -> "string"))) ~
("id" -> "ReportEntity")))
val jobEntity = ("JobEntity" ->
("properties" ->
("jobId" -> ("type" -> "long")) ~
("jobStatus" -> ("type" -> "string")) ~
("submitDate" -> ("type" -> "Date")) ~
("rptId" -> ("type" -> "string")) ~
("rptEntity" -> ("type" -> "ReportEntity"))) ~
("id" -> "JobEntity"))
val reportResponse = ("ReportResponse" ->
(("properties" ->
("jobId" -> ("type" -> "long")) ~
("jobStatus" -> ("type" -> "string"))) ~
("id" -> "ReportResponse")))
val statusResponse = ("StatusResponse" ->
(("properties" -> ("jobStatus" -> ("type" -> "string"))) ~
("id" -> "StatusResponse")))
val models = definitionEntity ~ definitionResponse ~ deleteResponse ~ jobEntity ~ jobsCatalogResponse ~ jobsMapResponse ~
reportDefinitionMapResponse ~ reportEntity ~ reportResponse ~ statusResponse ~ deleteDefinitionResponse ~ parameterEntity ~ definitionEntity
}
/**
* Contains the [[com.ksmpartners.ernie.server.RestGenerator.RequestTemplate]]s that define the valid operations for a [[com.ksmpartners.ernie.server.RestGenerator.Resource]]
*/
object ErnieRequestTemplates {
val justJSON = List(Product(ModelObject.TYPE_FULL, ""))
val anything = Nil
val results = List(Product("application/pdf", ""), Product("application/csv", ""), Product("application/html", ""))
val jsonFile = List(Product("json", "json"))
val getJobsList = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getJobsListAction)
val headJobsList = getToHead(getJobsList)
val postJob = RequestTemplate(PostRequest, justJSON, List(writeRunAuthFilter, jsonFilter), ServiceRegistry.jobsResource.postJobAction, Parameter("ReportRequest", "body", "ReportRequest"))
val getCatalog = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getJobsCatalogAction)
val headCatalog = getToHead(getCatalog)
val getCompleteCatalog = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getCompleteCatalogAction)
val headCompleteCatalog = getToHead(getCompleteCatalog)
val getFailedCatalog = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getFailedCatalogAction)
val headFailedCatalog = getToHead(getFailedCatalog)
val getDeletedCatalog = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getDeletedCatalogAction)
val headDeletedCatalog = getToHead(getDeletedCatalog)
val getExpiredCatalog = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.jobsResource.getExpiredCatalogAction)
val headExpiredCatalog = getToHead(getExpiredCatalog)
val purgeExpired = RequestTemplate(DeleteRequest, justJSON, List(writeAuthFilter, jsonFilter), ServiceRegistry.jobsResource.purgeAction)
val getJob = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter, idFilter), ServiceRegistry.jobEntityResource.getJobDetailAction)
val headJob = getToHead(getJob)
val getJobStatus = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter, idFilter), ServiceRegistry.jobStatusResource.getJobStatusAction)
val headJobStatus = getToHead(getJobStatus)
val getJobResult = RequestTemplate(GetRequest, results, List(readAuthFilter, idFilter), ServiceRegistry.jobResultsResource.getJobResultAction, Parameter("Accept", "header", "string"))
val headJobResult = getToHead(getJobResult)
val deleteJobResult = RequestTemplate(DeleteRequest, justJSON, List(writeAuthFilter, jsonFilter, idFilter), ServiceRegistry.jobResultsResource.deleteReportAction)
val getReportDetail = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter, idFilter), ServiceRegistry.jobResultsResource.getDetailAction)
val headReportDetail = getToHead(getReportDetail)
val resourcesJSON = RequestTemplate(GetRequest, jsonFile, Nil, Action("Get Swagger Resources JSON", (_: Package) => Full(JsonResponse(DispatchRestAPI.resourceListing)), "", "", "byte"))
val jobsJSON = RequestTemplate(GetRequest, jsonFile, Nil, Action("Get Swagger Jobs JSON", (_: Package) => Full(JsonResponse(DispatchRestAPI.jobsAPI)), "", "", "byte"))
val defsJSON = RequestTemplate(GetRequest, jsonFile, Nil, Action("Get Swagger Defs JSON", (_: Package) => Full(JsonResponse(DispatchRestAPI.defsAPI)), "", "", "byte"))
val getDefs = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.defsResource.getDefsAction)
val headDefs = getToHead(getDefs)
val postDef = RequestTemplate(PostRequest, justJSON, List(writeAuthFilter, jsonFilter), ServiceRegistry.defsResource.postDefAction, Parameter("DefinitionEntity", "body", "DefinitionEntity"))
val getDef = RequestTemplate(GetRequest, justJSON, List(readAuthFilter, jsonFilter), ServiceRegistry.defDetailResource.getDefDetailAction)
val headDef = getToHead(getDef)
val putDesign = RequestTemplate(PutRequest, justJSON, List(writeAuthFilter, jsonFilter), ServiceRegistry.defDetailResource.putDefAction, Parameter("Rptdesign", "body", "byte"))
val deleteDef = RequestTemplate(DeleteRequest, justJSON, List(writeAuthFilter, jsonFilter), ServiceRegistry.defDetailResource.deleteDefAction)
}
| ksmpartners/ernie | ernie-server/src/main/scala/com/ksmpartners/ernie/server/ErnieModel.scala | Scala | apache-2.0 | 12,805 |
import sbt._
import sbt.Keys._
import bintray.Plugin._
import bintray.Keys._
object Build extends Build {
val customBintraySettings = bintrayPublishSettings ++ Seq(
packageLabels in bintray := Seq("json"),
bintrayOrganization in bintray := Some("plasmaconduit"),
repository in bintray := "releases"
)
val root = Project("root", file("."))
.settings(customBintraySettings: _*)
.settings(
name := "json",
organization := "com.plasmaconduit",
version := "0.25.0",
crossScalaVersions := Seq("2.10.6", "2.11.2"),
licenses += ("MIT", url("http://opensource.org/licenses/MIT")),
scalacOptions += "-feature",
scalacOptions += "-deprecation",
scalacOptions += "-unchecked",
scalacOptions in Test ++= Seq("-Yrangepos"),
resolvers ++= Seq("snapshots", "releases").map(Resolver.sonatypeRepo),
resolvers += "Plasma Conduit Repository" at "http://dl.bintray.com/plasmaconduit/releases",
libraryDependencies <++= (scalaVersion) { (v) =>
if (v.startsWith("2.11.")) {
Seq("org.scala-lang.modules" %% "scala-parser-combinators" % "1.0.2")
} else {
Seq()
}
},
libraryDependencies += "com.plasmaconduit" %% "validation" % "0.7.0",
libraryDependencies += "org.specs2" %% "specs2" % "2.3.11" % "test"
)
}
| plasmaconduit/json | project/Build.scala | Scala | mit | 1,477 |
/**
* Copyright 2011-2016 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.client
import javax.jms.{ Message, MessageConsumer }
import io.gatling.jms.protocol.JmsProtocol
import io.gatling.jms.request.JmsDestination
object JmsClient {
def apply(protocol: JmsProtocol, destination: JmsDestination, replyDestination: JmsDestination): JmsClient = {
new SimpleJmsClient(
protocol.connectionFactoryName,
destination,
replyDestination,
protocol.url,
protocol.credentials,
protocol.anonymousConnect,
protocol.contextFactory,
protocol.deliveryMode,
protocol.messageMatcher
)
}
}
trait JmsClient {
/**
* Gets a new consumer for the reply queue
*/
def createReplyConsumer(selector: String = null): MessageConsumer
/**
*
* @return the name of the reply destination
*/
def replyDestinationName: String
/**
* Wrapper to send a BytesMessage, returns the message ID of the sent message
*/
def sendBytesMessage(bytes: Array[Byte], props: Map[String, Any], jmsType: Option[String]): Message
/**
* Wrapper to send a MapMessage, returns the message ID of the sent message
* <p>
* Note that map must match the javax.jms.MapMessage contract ie: "This method works only
* for the objectified primitive object types (Integer, Double, Long ...), String objects,
* and byte arrays."
*/
def sendMapMessage(map: Map[String, Any], props: Map[String, Any], jmsType: Option[String]): Message
/**
* Wrapper to send an ObjectMessage, returns the message ID of the sent message
*/
def sendObjectMessage(o: java.io.Serializable, props: Map[String, Any], jmsType: Option[String]): Message
/**
* Wrapper to send a TextMessage, returns the message ID of the sent message
*/
def sendTextMessage(messageText: String, props: Map[String, Any], jmsType: Option[String]): Message
def close(): Unit
}
| ryez/gatling | gatling-jms/src/main/scala/io/gatling/jms/client/JmsClient.scala | Scala | apache-2.0 | 2,476 |
package integrationtest
import _root_.controller._
import service._
import skinny.{ Routes, _ }
import skinny.test.SkinnyFlatSpec
class RootControllerSpec extends SkinnyFlatSpec with unit.SkinnyTesting {
class EchoServiceMock extends EchoService {
override def echo(s: String): String = s.toUpperCase
}
addFilter(Controllers.root, "/*")
addFilter(ErrorController, "/*")
addFilter(new RootController with Routes {
override val echoService = new EchoServiceMock
get("/mock/?".r)(index).as(Symbol("index"))
}, "/*")
it should "show top page" in {
get("/?echo=abcdEFG") {
status should equal(200)
body should include("abcdEFG")
header("X-Content-Type-Options") should equal("nosniff")
header("X-XSS-Protection") should equal("1; mode=block")
}
get("/mock/?echo=abcdEFG") {
status should equal(200)
body should include("ABCDEFG")
}
}
it should "renew session attributes" in {
session {
get("/session/renew", "locale" -> "ja", "returnTo" -> "/") {
status should equal(302)
}
get("/") {
status should equal(200)
body should include("プログラマ")
}
}
}
it should "show error" in {
get("/error") {
status should equal(500)
body.size should be > 0
}
get("/error/runtime") {
status should equal(500)
body.size should be > 0
}
}
it should "show nested i18n messages" in {
get("/nested-i18n", "foo" -> "will be OK") {
status should equal(200)
}
get("/nested-i18n", "foo" -> "will be NG") {
status should equal(400)
body should include("foo must include 'OK'")
}
session {
get("/session/renew", "locale" -> "ja", "returnTo" -> "/") {
status should equal(302)
}
get("/nested-i18n", "foo" -> "will be NG") {
status should equal(400)
body should include("ふー は 'OK' を含まなければならない")
}
}
}
}
| skinny-framework/skinny-framework | example/src/test/scala/integrationtest/RootControllerSpec.scala | Scala | mit | 1,990 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.mllib
import org.apache.spark.{SparkConf, SparkContext}
// $example on$
import org.apache.spark.mllib.tree.GradientBoostedTrees
import org.apache.spark.mllib.tree.configuration.BoostingStrategy
import org.apache.spark.mllib.tree.model.GradientBoostedTreesModel
import org.apache.spark.mllib.util.MLUtils
// $example off$
object GradientBoostingRegressionExample {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("GradientBoostedTreesRegressionExample")
val sc = new SparkContext(conf)
// $example on$
// Load and parse the data file.
val data = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt")
// Split the data into training and test sets (30% held out for testing)
val splits = data.randomSplit(Array(0.7, 0.3))
val (trainingData, testData) = (splits(0), splits(1))
// Train a GradientBoostedTrees model.
// The defaultParams for Regression use SquaredError by default.
val boostingStrategy = BoostingStrategy.defaultParams("Regression")
boostingStrategy.numIterations = 3 // Note: Use more iterations in practice.
boostingStrategy.treeStrategy.maxDepth = 5
// Empty categoricalFeaturesInfo indicates all features are continuous.
boostingStrategy.treeStrategy.categoricalFeaturesInfo = Map[Int, Int]()
val model = GradientBoostedTrees.train(trainingData, boostingStrategy)
// Evaluate model on test instances and compute test error
val labelsAndPredictions = testData.map { point =>
val prediction = model.predict(point.features)
(point.label, prediction)
}
val testMSE = labelsAndPredictions.map{ case(v, p) => math.pow((v - p), 2)}.mean()
println("Test Mean Squared Error = " + testMSE)
println("Learned regression GBT model:\\n" + model.toDebugString)
// Save and load model
model.save(sc, "target/tmp/myGradientBoostingRegressionModel")
val sameModel = GradientBoostedTreesModel.load(sc,
"target/tmp/myGradientBoostingRegressionModel")
// $example off$
}
}
// scalastyle:on println
| mrchristine/spark-examples-dbc | src/main/scala/org/apache/spark/examples/mllib/GradientBoostingRegressionExample.scala | Scala | apache-2.0 | 2,926 |
/*
* Copyright (C) 2012 The Regents of The University California.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shark.execution
import java.util.{HashMap => JHashMap, List => JavaList}
import java.io.File
import org.apache.hadoop.hive.metastore.api.FieldSchema
import org.apache.hadoop.hive.ql.{Context, DriverContext}
import org.apache.hadoop.hive.ql.exec.{TableScanOperator => HiveTableScanOperator, Utilities}
import org.apache.hadoop.hive.ql.metadata.{Partition, Table}
import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner
import org.apache.hadoop.hive.ql.parse._
import org.apache.hadoop.hive.ql.plan.{PlanUtils, CreateTableDesc, PartitionDesc}
import org.apache.hadoop.hive.ql.plan.api.StageType
import org.apache.hadoop.hive.ql.session.SessionState
import scala.collection.JavaConversions._
import shark.api.TableRDD
import shark.{LogHelper, SharkEnv}
import spark.RDD
class SparkWork(
val pctx: ParseContext,
val terminalOperator: TerminalOperator,
val resultSchema: JavaList[FieldSchema])
extends java.io.Serializable
/**
* SparkTask executes a query plan composed of RDD operators.
*/
class SparkTask extends org.apache.hadoop.hive.ql.exec.Task[SparkWork]
with java.io.Serializable with LogHelper {
private var _tableRdd: TableRDD = null
def tableRdd = _tableRdd
override def execute(driverContext: DriverContext): Int = {
logInfo("Executing " + this.getClass.getName)
val ctx = driverContext.getCtx()
// Adding files to the SparkContext
// Added required files
val files = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE)
files.split(",").filterNot(x => x.isEmpty || SharkEnv.addedFiles.contains(x)).foreach { x =>
logInfo("Adding file " + x )
SharkEnv.addedFiles.add(x)
SharkEnv.sc.addFile(x)
}
// Added required jars
val jars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)
jars.split(",").filterNot(x => x.isEmpty || SharkEnv.addedJars.contains(x)).foreach { x =>
logInfo("Adding jar " + x )
SharkEnv.addedJars.add(x)
SharkEnv.sc.addJar(x)
}
Operator.hconf = conf
// Replace Hive physical plan with Shark plan.
val terminalOp = work.terminalOperator
val tableScanOps = terminalOp.returnTopOperators().asInstanceOf[Seq[TableScanOperator]]
//ExplainTaskHelper.outputPlan(terminalOp, Console.out, true, 2)
//ExplainTaskHelper.outputPlan(hiveTopOps.head, Console.out, true, 2)
initializeTableScanTableDesc(tableScanOps)
// Initialize the Hive query plan. This gives us all the object inspectors.
initializeAllHiveOperators(terminalOp)
terminalOp.initializeMasterOnAll()
val sinkRdd = terminalOp.execute().asInstanceOf[RDD[Any]]
_tableRdd = new TableRDD(sinkRdd, work.resultSchema, terminalOp.objectInspector)
0
}
def initializeTableScanTableDesc(topOps: Seq[TableScanOperator]) {
// topToTable maps Hive's TableScanOperator to the Table object.
val topToTable: JHashMap[HiveTableScanOperator, Table] = work.pctx.getTopToTable()
// Add table metadata to TableScanOperators
topOps.foreach { op =>
op.table = topToTable.get(op.hiveOp)
op.tableDesc = Utilities.getTableDesc(op.table)
PlanUtils.configureInputJobPropertiesForStorageHandler(op.tableDesc)
if (op.table.isPartitioned) {
val ppl = PartitionPruner.prune(
op.table,
work.pctx.getOpToPartPruner().get(op.hiveOp),
work.pctx.getConf(), "",
work.pctx.getPrunedPartitions())
op.parts = ppl.getConfirmedPartns.toArray ++ ppl.getUnknownPartns.toArray
val allParts = op.parts ++ ppl.getDeniedPartns.toArray
if (allParts.size == 0) {
op.firstConfPartDesc = new PartitionDesc(op.tableDesc, null)
} else {
op.firstConfPartDesc = Utilities.getPartitionDesc(allParts(0).asInstanceOf[Partition])
}
}
}
}
def initializeAllHiveOperators(terminalOp: TerminalOperator) {
// Need to guarantee all parents are initialized before the child.
val topOpList = new scala.collection.mutable.MutableList[HiveTopOperator]
val queue = new scala.collection.mutable.Queue[Operator[_]]
queue.enqueue(terminalOp)
while (!queue.isEmpty) {
val current = queue.dequeue()
current match {
case op: HiveTopOperator => topOpList += op
case _ => Unit
}
queue ++= current.parentOperators
}
// Run the initialization. This guarantees that upstream operators are
// initialized before downstream ones.
topOpList.reverse.foreach { topOp =>
topOp.initializeHiveTopOperator()
}
}
override def getType = StageType.MAPRED
override def getName = "MAPRED-SPARK"
override def localizeMRTmpFilesImpl(ctx: Context) = Unit
}
| vax11780/shark | src/main/scala/shark/execution/SparkTask.scala | Scala | apache-2.0 | 5,370 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import java.io.File
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.execution.DataSourceScanExec
import org.apache.spark.sql.execution.datasources.{BucketSpec, DataSourceStrategy}
import org.apache.spark.sql.execution.exchange.ShuffleExchange
import org.apache.spark.sql.execution.joins.SortMergeJoinExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.util.Utils
import org.apache.spark.util.collection.BitSet
class BucketedReadSuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import testImplicits._
private val df = (0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k")
private val nullDF = (for {
i <- 0 to 50
s <- Seq(null, "a", "b", "c", "d", "e", "f", null, "g")
} yield (i % 5, s, i % 13)).toDF("i", "j", "k")
test("read bucketed data") {
withTable("bucketed_table") {
df.write
.format("parquet")
.partitionBy("i")
.bucketBy(8, "j", "k")
.saveAsTable("bucketed_table")
for (i <- 0 until 5) {
val table = spark.table("bucketed_table").filter($"i" === i)
val query = table.queryExecution
val output = query.analyzed.output
val rdd = query.toRdd
assert(rdd.partitions.length == 8)
val attrs = table.select("j", "k").queryExecution.analyzed.output
val checkBucketId = rdd.mapPartitionsWithIndex((index, rows) => {
val getBucketId = UnsafeProjection.create(
HashPartitioning(attrs, 8).partitionIdExpression :: Nil,
output)
rows.map(row => getBucketId(row).getInt(0) -> index)
})
checkBucketId.collect().foreach(r => assert(r._1 == r._2))
}
}
}
// To verify if the bucket pruning works, this function checks two conditions:
// 1) Check if the pruned buckets (before filtering) are empty.
// 2) Verify the final result is the same as the expected one
private def checkPrunedAnswers(
bucketSpec: BucketSpec,
bucketValues: Seq[Integer],
filterCondition: Column,
originalDataFrame: DataFrame): Unit = {
// This test verifies parts of the plan. Disable whole stage codegen.
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val bucketedDataFrame = spark.table("bucketed_table").select("i", "j", "k")
val BucketSpec(numBuckets, bucketColumnNames, _) = bucketSpec
// Limit: bucket pruning only works when the bucket column has one and only one column
assert(bucketColumnNames.length == 1)
val bucketColumnIndex = bucketedDataFrame.schema.fieldIndex(bucketColumnNames.head)
val bucketColumn = bucketedDataFrame.schema.toAttributes(bucketColumnIndex)
val matchedBuckets = new BitSet(numBuckets)
bucketValues.foreach { value =>
matchedBuckets.set(DataSourceStrategy.getBucketId(bucketColumn, numBuckets, value))
}
// Filter could hide the bug in bucket pruning. Thus, skipping all the filters
val plan = bucketedDataFrame.filter(filterCondition).queryExecution.executedPlan
val rdd = plan.find(_.isInstanceOf[DataSourceScanExec])
assert(rdd.isDefined, plan)
val checkedResult = rdd.get.execute().mapPartitionsWithIndex { case (index, iter) =>
if (matchedBuckets.get(index % numBuckets) && iter.nonEmpty) Iterator(index) else Iterator()
}
// TODO: These tests are not testing the right columns.
// // checking if all the pruned buckets are empty
// val invalidBuckets = checkedResult.collect().toList
// if (invalidBuckets.nonEmpty) {
// fail(s"Buckets $invalidBuckets should have been pruned from:\\n$plan")
// }
checkAnswer(
bucketedDataFrame.filter(filterCondition).orderBy("i", "j", "k"),
originalDataFrame.filter(filterCondition).orderBy("i", "j", "k"))
}
}
test("read partitioning bucketed tables with bucket pruning filters") {
withTable("bucketed_table") {
val numBuckets = 8
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
for (j <- 0 until 13) {
// Case 1: EqualTo
checkPrunedAnswers(
bucketSpec,
bucketValues = j :: Nil,
filterCondition = $"j" === j,
df)
// Case 2: EqualNullSafe
checkPrunedAnswers(
bucketSpec,
bucketValues = j :: Nil,
filterCondition = $"j" <=> j,
df)
// Case 3: In
checkPrunedAnswers(
bucketSpec,
bucketValues = Seq(j, j + 1, j + 2, j + 3),
filterCondition = $"j".isin(j, j + 1, j + 2, j + 3),
df)
}
}
}
test("read non-partitioning bucketed tables with bucket pruning filters") {
withTable("bucketed_table") {
val numBuckets = 8
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
for (j <- 0 until 13) {
checkPrunedAnswers(
bucketSpec,
bucketValues = j :: Nil,
filterCondition = $"j" === j,
df)
}
}
}
test("read partitioning bucketed tables having null in bucketing key") {
withTable("bucketed_table") {
val numBuckets = 8
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
nullDF.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
// Case 1: isNull
checkPrunedAnswers(
bucketSpec,
bucketValues = null :: Nil,
filterCondition = $"j".isNull,
nullDF)
// Case 2: <=> null
checkPrunedAnswers(
bucketSpec,
bucketValues = null :: Nil,
filterCondition = $"j" <=> null,
nullDF)
}
}
test("read partitioning bucketed tables having composite filters") {
withTable("bucketed_table") {
val numBuckets = 8
val bucketSpec = BucketSpec(numBuckets, Seq("j"), Nil)
// json does not support predicate push-down, and thus json is used here
df.write
.format("json")
.partitionBy("i")
.bucketBy(numBuckets, "j")
.saveAsTable("bucketed_table")
for (j <- 0 until 13) {
checkPrunedAnswers(
bucketSpec,
bucketValues = j :: Nil,
filterCondition = $"j" === j && $"k" > $"j",
df)
checkPrunedAnswers(
bucketSpec,
bucketValues = j :: Nil,
filterCondition = $"j" === j && $"i" > j % 5,
df)
}
}
}
private val df1 = (0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
private val df2 = (0 until 50).map(i => (i % 7, i % 11, i.toString)).toDF("i", "j", "k").as("df2")
/**
* A helper method to test the bucket read functionality using join. It will save `df1` and `df2`
* to hive tables, bucketed or not, according to the given bucket specifics. Next we will join
* these 2 tables, and firstly make sure the answer is corrected, and then check if the shuffle
* exists as user expected according to the `shuffleLeft` and `shuffleRight`.
*/
private def testBucketing(
bucketSpecLeft: Option[BucketSpec],
bucketSpecRight: Option[BucketSpec],
joinType: String = "inner",
joinCondition: (DataFrame, DataFrame) => Column,
shuffleLeft: Boolean,
shuffleRight: Boolean): Unit = {
withTable("bucketed_table1", "bucketed_table2") {
def withBucket(
writer: DataFrameWriter[Row],
bucketSpec: Option[BucketSpec]): DataFrameWriter[Row] = {
bucketSpec.map { spec =>
writer.bucketBy(
spec.numBuckets,
spec.bucketColumnNames.head,
spec.bucketColumnNames.tail: _*)
}.getOrElse(writer)
}
withBucket(df1.write.format("parquet"), bucketSpecLeft).saveAsTable("bucketed_table1")
withBucket(df2.write.format("parquet"), bucketSpecRight).saveAsTable("bucketed_table2")
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "0",
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
val t1 = spark.table("bucketed_table1")
val t2 = spark.table("bucketed_table2")
val joined = t1.join(t2, joinCondition(t1, t2), joinType)
// First check the result is corrected.
checkAnswer(
joined.sort("bucketed_table1.k", "bucketed_table2.k"),
df1.join(df2, joinCondition(df1, df2), joinType).sort("df1.k", "df2.k"))
assert(joined.queryExecution.executedPlan.isInstanceOf[SortMergeJoinExec])
val joinOperator = joined.queryExecution.executedPlan.asInstanceOf[SortMergeJoinExec]
assert(
joinOperator.left.find(_.isInstanceOf[ShuffleExchange]).isDefined == shuffleLeft,
s"expected shuffle in plan to be $shuffleLeft but found\\n${joinOperator.left}")
assert(
joinOperator.right.find(_.isInstanceOf[ShuffleExchange]).isDefined == shuffleRight,
s"expected shuffle in plan to be $shuffleRight but found\\n${joinOperator.right}")
}
}
}
private def joinCondition(joinCols: Seq[String]) (left: DataFrame, right: DataFrame): Column = {
joinCols.map(col => left(col) === right(col)).reduce(_ && _)
}
test("avoid shuffle when join 2 bucketed tables") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = bucketSpec,
joinCondition = joinCondition(Seq("i", "j")),
shuffleLeft = false,
shuffleRight = false
)
}
// Enable it after fix https://issues.apache.org/jira/browse/SPARK-12704
ignore("avoid shuffle when join keys are a super-set of bucket keys") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = bucketSpec,
joinCondition = joinCondition(Seq("i", "j")),
shuffleLeft = false,
shuffleRight = false
)
}
test("only shuffle one side when join bucketed table and non-bucketed table") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = None,
joinCondition = joinCondition(Seq("i", "j")),
shuffleLeft = false,
shuffleRight = true
)
}
test("only shuffle one side when 2 bucketed tables have different bucket number") {
val bucketSpec1 = Some(BucketSpec(8, Seq("i", "j"), Nil))
val bucketSpec2 = Some(BucketSpec(5, Seq("i", "j"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec1,
bucketSpecRight = bucketSpec2,
joinCondition = joinCondition(Seq("i", "j")),
shuffleLeft = false,
shuffleRight = true
)
}
test("only shuffle one side when 2 bucketed tables have different bucket keys") {
val bucketSpec1 = Some(BucketSpec(8, Seq("i"), Nil))
val bucketSpec2 = Some(BucketSpec(8, Seq("j"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec1,
bucketSpecRight = bucketSpec2,
joinCondition = joinCondition(Seq("i")),
shuffleLeft = false,
shuffleRight = true
)
}
test("shuffle when join keys are not equal to bucket keys") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Nil))
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = bucketSpec,
joinCondition = joinCondition(Seq("j")),
shuffleLeft = true,
shuffleRight = true
)
}
test("shuffle when join 2 bucketed tables with bucketing disabled") {
val bucketSpec = Some(BucketSpec(8, Seq("i", "j"), Nil))
withSQLConf(SQLConf.BUCKETING_ENABLED.key -> "false") {
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = bucketSpec,
joinCondition = joinCondition(Seq("i", "j")),
shuffleLeft = true,
shuffleRight = true
)
}
}
test("avoid shuffle when grouping keys are equal to bucket keys") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("bucketed_table")
val tbl = spark.table("bucketed_table")
val agged = tbl.groupBy("i", "j").agg(max("k"))
checkAnswer(
agged.sort("i", "j"),
df1.groupBy("i", "j").agg(max("k")).sort("i", "j"))
assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchange]).isEmpty)
}
}
test("avoid shuffle when grouping keys are a super-set of bucket keys") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val tbl = spark.table("bucketed_table")
val agged = tbl.groupBy("i", "j").agg(max("k"))
checkAnswer(
agged.sort("i", "j"),
df1.groupBy("i", "j").agg(max("k")).sort("i", "j"))
assert(agged.queryExecution.executedPlan.find(_.isInstanceOf[ShuffleExchange]).isEmpty)
}
}
test("SPARK-17698 Join predicates should not contain filter clauses") {
val bucketSpec = Some(BucketSpec(8, Seq("i"), Seq("i")))
testBucketing(
bucketSpecLeft = bucketSpec,
bucketSpecRight = bucketSpec,
joinType = "fullouter",
joinCondition = (left: DataFrame, right: DataFrame) => {
val joinPredicates = left("i") === right("i")
val filterLeft = left("i") === Literal("1")
val filterRight = right("i") === Literal("1")
joinPredicates && filterLeft && filterRight
},
shuffleLeft = false,
shuffleRight = false
)
}
test("error if there exists any malformed bucket files") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val tableDir = new File(hiveContext
.sparkSession.warehousePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val agged = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[RuntimeException] {
agged.count()
}
assert(error.toString contains "Invalid bucket file")
}
}
test("disable bucketing when the output doesn't contain all bucketing columns") {
withTable("bucketed_table") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
checkAnswer(hiveContext.table("bucketed_table").select("j"), df1.select("j"))
checkAnswer(hiveContext.table("bucketed_table").groupBy("j").agg(max("k")),
df1.groupBy("j").agg(max("k")))
}
}
}
| gioenn/xSpark | sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadSuite.scala | Scala | apache-2.0 | 16,035 |
package com.alvin.niagara.util
import java.util.Properties
import com.alvin.niagara.config.Config
import com.alvin.niagara.model.PostTags
import org.apache.kafka.clients.producer._
/**
* Created by JINC4 on 5/26/2016.
*
* Avro message producer connects to kafka cluster
* Then, Sents avro message to kafka
*/
class AvroProducer extends Config {
val props = new Properties()
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.ByteArraySerializer")
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
"org.apache.kafka.common.serialization.StringSerializer")
props.put(ProducerConfig.ACKS_CONFIG, "all")
val producer = new KafkaProducer[String, Array[Byte]](props)
/**
* Sent a Post object as Avro records to Kafka.
*
* @param post a case class to send
* @return A sequence of FutureRecordMetadata instances
*/
def send(post: PostTags) = {
val message = new ProducerRecord[String, Array[Byte]](postTopic, PostTags.serialize(post))
producer.send(message)
}
def close() = producer.close()
}
| AlvinCJin/Niagara | src/main/scala/com/alvin/niagara/util/AvroProducer.scala | Scala | apache-2.0 | 1,162 |
/*******************************************************************
* See the NOTICE file distributed with this work for additional *
* information regarding Copyright ownership. The author/authors *
* license this file to you under the terms of the Apache License *
* Version 2.0 (the "License"); you may not use this file except *
* in compliance with the License. You may obtain a copy of the *
* License at: *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, *
* software distributed under the License is distributed on an *
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* either express or implied. See the License for the specific *
* language governing permissions and limitations under the *
* License. *
******************************************************************/
/** Provides an API for convenient use of 'java.time' classes.
*
* @example
* {{{
* import java.time._
* import scalatime._ // NOTE: Package name was changed from codes.reactive.scalatime => scalatime
* // for versions 0.5.x and newer. If still using 0.4.x, use codes.reactive.scalatime
*
* // Obtain a Duration instance from a Long
* val duration = 10L minutes
*
* val otherDuration = 1L minute
*
* // Obtain a Period instance from an Int
* val period = 2 weeks
*
* // Obtains a LocalDate instance
* val localDate = LocalDate.of(2014, 6, 7)
*
* // Obtain a default TemporalQuery for precision
* val query = temporal.TemporalQueries.precision
*
* // Obtain a Duration instance from a sum of Durations
* duration + otherDuration
*
* // Add a TemporalAmount to a Temporal
* period <<+ localDate
*
* // Add a TemporalAmount to a Temporal
* localDate + period
*
* // Subtract a TemporalAmount from a Temporal
* localDate - period
*
* // Query a specified Temporal
* val result = query |> localDate
* }}}
*
*/
package object scalatime extends impl.ToAllOps with impl.ToAllStd
| reactivecodes/scala-time | src/main/scala/scalatime/package.scala | Scala | apache-2.0 | 2,345 |
package demo
import spray.json.DefaultJsonProtocol
case class Stuff(id: Int, data: String)
object Stuff extends DefaultJsonProtocol {
implicit val stuffFormat = jsonFormat2(Stuff.apply)
}
| MonsantoCo/simple-spray-with-routing | src/main/scala/demo/Stuff.scala | Scala | bsd-3-clause | 196 |
package com.pointr.tcp.rpc
abstract class P2pRpc {
import reflect.runtime.universe.TypeTag
def connect(connParam: P2pConnectionParams): Boolean
def isConnected: Boolean
def request[U: TypeTag, V: TypeTag](req: P2pReq[U]): P2pResp[V] // = _
def requestJava[U, V](req: P2pReq[U]): P2pResp[V] // = { null.asInstanceOf[P2pResp[V]] }
}
| OpenChaiSpark/OCspark | tcpclient/src/main/scala/com/pointr/tcp/rpc/P2pRpc.scala | Scala | apache-2.0 | 349 |
package com.arcusys.learn.liferay.constants
import com.liferay.portal.kernel.workflow.WorkflowConstants
object WorkflowConstantsHelper {
val STATUS_APPROVED = WorkflowConstants.STATUS_APPROVED
val ACTION_PUBLISH = WorkflowConstants.ACTION_PUBLISH
}
| arcusys/Valamis | learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/constants/WorkflowConstantsHelper.scala | Scala | gpl-3.0 | 255 |
/*
* Copyright (c) 2014-2016
* nonblocking.at gmbh [http://www.nonblocking.at]
*
* This file is part of Cliwix.
*
* Cliwix is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package at.nonblocking.cliwix.core
class CliwixLiferayNotReadyException extends CliwixException("Liferay is not yet ready!")
| nonblocking/cliwix | cliwix-core/src/main/scala/at/nonblocking/cliwix/core/CliwixLiferayNotReadyException.scala | Scala | agpl-3.0 | 910 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.i18n.msg
import java.util.Locale
/**
* A bundle of locale-specific messages.
*
* @see PropertiesMessageBundle
*/
trait MessageBundle {
/**
* Get a message for the given code and Locale.
*
* @param locale the locale of the message
* @param code the code of the message
* @param args the arguments to use for substituting any variables in the message
*
* @throws MessageNotFoundException if a message does not exist for the
* specified code and Locale.
*/
def apply(locale: Locale, code: String, args: Any*): String
/**
* Get a message for the given code and Locale.
*
* @param locale the locale of the message
* @param code the code of the message
* @param args the arguments to use for substituting any variables in the message
*/
def get(locale: Locale, code: String, args: Any*): Option[String]
}
| sptz45/coeus | src/main/scala/com/tzavellas/coeus/i18n/msg/MessageBundle.scala | Scala | apache-2.0 | 1,046 |
class Nums {
class Num(x: Double) {
inline def power(inline n: Long) = ${ PowerMacro.powerCode('x, 'n) }
}
}
object Test {
def main(args: Array[String]): Unit = {
val nums = new Nums
val n = new nums.Num(1.5)
println(n.power(0))
println(n.power(1))
println(n.power(2))
println(n.power(5))
}
}
| som-snytt/dotty | tests/run-macros/i4803b/App_2.scala | Scala | apache-2.0 | 332 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.crossdata.test
import org.scalatest.concurrent.TimeLimitedTests
import org.scalatest.time.SpanSugar._
import org.scalatest.{FlatSpec, Matchers}
/**
* Base class for both unit and integration tests
*/
abstract class BaseXDTest extends FlatSpec with Matchers with TimeLimitedTests {
val timeLimit = 5 minutes
}
| luismcl/crossdata | core/src/test/scala/com/stratio/crossdata/test/BaseXDTest.scala | Scala | apache-2.0 | 958 |
package scala.meta.quasiquotes
import org.jetbrains.plugins.scala.SlowTests
import org.junit.experimental.categories.Category
/**
* @author mutcianm
* @since 28.10.16.
*/
@Category(Array(classOf[SlowTests]))
class RandomQQTest extends QuasiQuoteTypeInferenceTestBase {
def testPatVarTermApply() = doTest(
s"""
|${START}p"Foo"$END
|//Pat.Var.Term
""".stripMargin
)
def testPatCaseApply() = doTest(
s"""
|${START}p"case Foo(x) =>"$END
|//Case
""".stripMargin
)
def testPatExtractApply() = doTest(
s"""
|${START}p"Some(x)"$END
|//Pat.Extract
""".stripMargin
)
def testPatTypeVarApply() = doTest(
s"""
|${START}pt"foo"$END
|//Pat.Var.Type
""".stripMargin
)
def testPatTypeWildcardApply() = doTest(
s"""
|${START}pt"_"$END
|//Pat.Type.Wildcard
""".stripMargin
)
def testTermArgApply() = doTest(
s"""
|${START}arg"a: Int"$END
|//Term.Ascribe
""".stripMargin
)
def testModAnnotApply() = doTest(
s"""
|${START}mod"@foo"$END
|//Mod.Annot
""".stripMargin
)
def testTypeArgInfixApply() = doTest(
s"""
|${START}targ"T ^ U"$END
|//Type.ApplyInfix
""".stripMargin
)
def testPatArgTypedApply() = doTest(
s"""
|${START}parg"a:Int"$END
|//Pat.Typed
""".stripMargin
)
def testCtorApplyApply() = doTest(
s"""
|${START}ctor"A(b)"$END
|//Term.Apply
""".stripMargin
)
def testCtorRefNameApply() = doTest(
s"""
|${START}ctor"A"$END
|//Ctor.Ref.Name
""".stripMargin
)
def testTermParamApply() = doTest(
s"""
|${START}param"a: A"$END
|//Term.Param
""".stripMargin
)
def testTypeParamApply() = doTest(
s"""
|${START}tparam"f <: A with B forSome { val x: Int }"$END
|//Type.Param
""".stripMargin
)
def testSourceApply() = doTest(
s"""
|${START}source"class Foo"$END
|//Source
""".stripMargin
)
def testImporterApply() = doTest(
s"""
|${START}importer"foo.bar"$END
|//Importer
""".stripMargin
)
def testImporteeApply() = doTest(
s"""
|${START}importee"foo"$END
|//Importee.Name
""".stripMargin
)
def testEnumeratorApply() = doTest(
s"""
|${START}enumerator"x <- y"$END
|//Enumerator.Generator
""".stripMargin
)
}
| triplequote/intellij-scala | scala/scala-impl/test/scala/meta/quasiquotes/RandomQQTest.scala | Scala | apache-2.0 | 2,471 |
package breeze.linalg
import java.util._
import breeze.linalg.operators._
import breeze.linalg.support._
import breeze.numerics._
/** This is an auto-generated trait providing operators for CSCMatrix */
trait CSCMatrixOps_Double { this: CSCMatrix.type =>
class canMulM_V_Double private[linalg] () extends BinaryRegistry[CSCMatrix[Double], Vector[Double], breeze.linalg.operators.OpMulMatrix, Vector[Double]] {
override def bindingMissing(a: CSCMatrix[Double], b: Vector[Double]) = {
val res = DenseVector.zeros[Double](a.rows)
var c = 0
while(c < a.cols) {
var rr = a.colPtrs(c)
val rrlast = a.colPtrs(c+1)
while (rr < rrlast) {
val r = a.rowIndices(rr)
res(r) += a.data(rr) * b(c)
rr += 1
}
c += 1
}
res
}
};
val canMulM_V_Double = new canMulM_V_Double()
implicit def canMulM_V_Double_def[A <: CSCMatrix[Double], B <: Vector[Double]]:BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Double]] = (
canMulM_V_Double.asInstanceOf[BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Double]]]
)
class canMulM_DM_Double private[linalg] () extends BinaryOp[CSCMatrix[Double], DenseMatrix[Double], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Double]] {
def apply(a: CSCMatrix[Double], b: DenseMatrix[Double]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Double](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = 0
while (j < a.cols) {
val v = b(j, i)
var k = a.colPtrs(j)
while (k < a.colPtrs(j+1)) {
res(a.rowIndices(k), i) += v * a.data(k)
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulM_DM_Double = new canMulM_DM_Double ()
class canMulDM_M_Double private[linalg] () extends BinaryOp[DenseMatrix[Double], CSCMatrix[Double], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Double]] {
def apply(a: DenseMatrix[Double], b: CSCMatrix[Double]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Double](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
val ival = b.rowIndices(j)
var k = 0
while (k < a.rows) {
res(k,i) += a(k,ival)*dval
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulDM_M_Double = new canMulDM_M_Double ()
class canMulM_M_Double private[linalg] () extends BinaryOp[CSCMatrix[Double], CSCMatrix[Double], breeze.linalg.operators.OpMulMatrix, CSCMatrix[Double]] {
def apply(a: CSCMatrix[Double], b: CSCMatrix[Double]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
var numnz = 0
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
numnz += a.colPtrs(b.rowIndices(j)+1) - a.colPtrs(b.rowIndices(j))
j += 1
}
i += 1
}
val res = new CSCMatrix.Builder[Double](a.rows, b.cols, numnz)
i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
var k = a.colPtrs(b.rowIndices(j))
while (k < a.colPtrs(b.rowIndices(j)+1)) {
res.add(a.rowIndices(k), i, a.data(k) * dval)
k += 1
}
j += 1
}
i += 1
}
res.result()
}
}; implicit val canMulM_M_Double = new canMulM_M_Double ()
}
/** This is an auto-generated trait providing operators for CSCMatrix */
trait CSCMatrixOps_Float { this: CSCMatrix.type =>
class canMulM_V_Float private[linalg] () extends BinaryRegistry[CSCMatrix[Float], Vector[Float], breeze.linalg.operators.OpMulMatrix, Vector[Float]] {
override def bindingMissing(a: CSCMatrix[Float], b: Vector[Float]) = {
val res = DenseVector.zeros[Float](a.rows)
var c = 0
while(c < a.cols) {
var rr = a.colPtrs(c)
val rrlast = a.colPtrs(c+1)
while (rr < rrlast) {
val r = a.rowIndices(rr)
res(r) += a.data(rr) * b(c)
rr += 1
}
c += 1
}
res
}
};
val canMulM_V_Float = new canMulM_V_Float()
implicit def canMulM_V_Float_def[A <: CSCMatrix[Float], B <: Vector[Float]]:BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Float]] = (
canMulM_V_Float.asInstanceOf[BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Float]]]
)
class canMulM_DM_Float private[linalg] () extends BinaryOp[CSCMatrix[Float], DenseMatrix[Float], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Float]] {
def apply(a: CSCMatrix[Float], b: DenseMatrix[Float]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Float](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = 0
while (j < a.cols) {
val v = b(j, i)
var k = a.colPtrs(j)
while (k < a.colPtrs(j+1)) {
res(a.rowIndices(k), i) += v * a.data(k)
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulM_DM_Float = new canMulM_DM_Float ()
class canMulDM_M_Float private[linalg] () extends BinaryOp[DenseMatrix[Float], CSCMatrix[Float], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Float]] {
def apply(a: DenseMatrix[Float], b: CSCMatrix[Float]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Float](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
val ival = b.rowIndices(j)
var k = 0
while (k < a.rows) {
res(k,i) += a(k,ival)*dval
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulDM_M_Float = new canMulDM_M_Float ()
class canMulM_M_Float private[linalg] () extends BinaryOp[CSCMatrix[Float], CSCMatrix[Float], breeze.linalg.operators.OpMulMatrix, CSCMatrix[Float]] {
def apply(a: CSCMatrix[Float], b: CSCMatrix[Float]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
var numnz = 0
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
numnz += a.colPtrs(b.rowIndices(j)+1) - a.colPtrs(b.rowIndices(j))
j += 1
}
i += 1
}
val res = new CSCMatrix.Builder[Float](a.rows, b.cols, numnz)
i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
var k = a.colPtrs(b.rowIndices(j))
while (k < a.colPtrs(b.rowIndices(j)+1)) {
res.add(a.rowIndices(k), i, a.data(k) * dval)
k += 1
}
j += 1
}
i += 1
}
res.result()
}
}; implicit val canMulM_M_Float = new canMulM_M_Float ()
}
/** This is an auto-generated trait providing operators for CSCMatrix */
trait CSCMatrixOps_Int { this: CSCMatrix.type =>
class canMulM_V_Int private[linalg] () extends BinaryRegistry[CSCMatrix[Int], Vector[Int], breeze.linalg.operators.OpMulMatrix, Vector[Int]] {
override def bindingMissing(a: CSCMatrix[Int], b: Vector[Int]) = {
val res = DenseVector.zeros[Int](a.rows)
var c = 0
while(c < a.cols) {
var rr = a.colPtrs(c)
val rrlast = a.colPtrs(c+1)
while (rr < rrlast) {
val r = a.rowIndices(rr)
res(r) += a.data(rr) * b(c)
rr += 1
}
c += 1
}
res
}
};
val canMulM_V_Int = new canMulM_V_Int()
implicit def canMulM_V_Int_def[A <: CSCMatrix[Int], B <: Vector[Int]]:BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Int]] = (
canMulM_V_Int.asInstanceOf[BinaryOp[A, B, breeze.linalg.operators.OpMulMatrix, Vector[Int]]]
)
class canMulM_DM_Int private[linalg] () extends BinaryOp[CSCMatrix[Int], DenseMatrix[Int], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Int]] {
def apply(a: CSCMatrix[Int], b: DenseMatrix[Int]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Int](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = 0
while (j < a.cols) {
val v = b(j, i)
var k = a.colPtrs(j)
while (k < a.colPtrs(j+1)) {
res(a.rowIndices(k), i) += v * a.data(k)
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulM_DM_Int = new canMulM_DM_Int ()
class canMulDM_M_Int private[linalg] () extends BinaryOp[DenseMatrix[Int], CSCMatrix[Int], breeze.linalg.operators.OpMulMatrix, DenseMatrix[Int]] {
def apply(a: DenseMatrix[Int], b: CSCMatrix[Int]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
val res = new DenseMatrix[Int](a.rows, b.cols)
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
val ival = b.rowIndices(j)
var k = 0
while (k < a.rows) {
res(k,i) += a(k,ival)*dval
k += 1
}
j += 1
}
i += 1
}
res
}
}; implicit val canMulDM_M_Int = new canMulDM_M_Int ()
class canMulM_M_Int private[linalg] () extends BinaryOp[CSCMatrix[Int], CSCMatrix[Int], breeze.linalg.operators.OpMulMatrix, CSCMatrix[Int]] {
def apply(a: CSCMatrix[Int], b: CSCMatrix[Int]) = {
if(a.cols != b.rows) throw new RuntimeException("Dimension Mismatch!")
var numnz = 0
var i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
numnz += a.colPtrs(b.rowIndices(j)+1) - a.colPtrs(b.rowIndices(j))
j += 1
}
i += 1
}
val res = new CSCMatrix.Builder[Int](a.rows, b.cols, numnz)
i = 0
while (i < b.cols) {
var j = b.colPtrs(i)
while (j < b.colPtrs(i+1)) {
val dval = b.data(j)
var k = a.colPtrs(b.rowIndices(j))
while (k < a.colPtrs(b.rowIndices(j)+1)) {
res.add(a.rowIndices(k), i, a.data(k) * dval)
k += 1
}
j += 1
}
i += 1
}
res.result()
}
}; implicit val canMulM_M_Int = new canMulM_M_Int ()
}
| tjhunter/scalanlp-core | math/src/main/scala/breeze/linalg/CSCMatrixOps.scala | Scala | apache-2.0 | 11,499 |
package com.goibibo.sqlshift.models
/**
* Project: mysql-redshift-loader
* Author: shivamsharma
* Date: 12/29/16.
*/
case class Status(isSuccessful: Boolean, e: Exception)
| goibibo/SqlShift | src/main/scala/com/goibibo/sqlshift/models/Status.scala | Scala | mit | 181 |
package im.mange.flakeless.innards
import im.mange.flakeless.{Config, Flakeless}
import org.openqa.selenium.{By, WebElement}
//TODO: model Condition
//TODO: work out when description is an Actual and when its something else (like not found)
//... Actual sounds assertion based
//... other sounds like an Action ...
//... in which case arr we in Action & Check extends Command land?
//TODO: should this be WaitForCondition
/* deliberately not private [flakeless] */ object WaitForElement {
def apply(flakeless: Option[Flakeless], command: Command,
description: (WebElement) => String,
condition: (WebElement) => Boolean) = {
Execute(flakeless, new WaitForElement(command, description, condition))
}
}
private class WaitForElement(val command: Command,
description: (WebElement) => String,
condition: (WebElement) => Boolean) extends Executable {
override def execute(context: Context, config: Config) {
(command.in, command.by) match {
case (Some(in), Some(by)) =>
//TODO: we should ensure there is only one element - make configurable
Wait.waitUpTo(config).forCondition(command,
{
val element = in.findElement(by)
val result = condition(element)
if (result) context.succeeded()
else context.failed(description(element))
result
},
description(in.findElement(by))
)
case _ => throw new RuntimeException("cannot wait without in and by")
}
}
}
| alltonp/flakeless | src/main/scala/im/mange/flakeless/innards/WaitForElement.scala | Scala | mit | 1,575 |
/**
* The MIT License (MIT) Copyright (c) 2014 University of Applied Sciences, Berlin, Germany
* For more detailed information, please read the licence.txt in the root directory.
**/
package org.onepercent.utils.scoring
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
/**
* Representation of trained Data
* @param categoryProb the category probability, the previous probability of a word to be in a specific class
* @param termProb the term probability, the probabilities of a term to be in categories
* @param unknownWordProb the probability of a in the trained data unknown term to be in the categories
*/
case class TrainedData(categoryProb: Map[String, Double], termProb: Map[String, Map[String, Double]], unknownWordProb: Map[String, Double])
/**
* Produces probabilities of terms being in categories based on training data.
* The given SparkContext is used to compute these.
* @see http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
* @param sc SparkContext to compute the probabilities.
* @author pFriesch
*/
class TweetScoringLearner(sc: SparkContext) {
type Category = String
/**
* Learns from training data to produce a pair of category probability and term probabilities of terms being in a given category.
* @see http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
* @param tweets the training tweets divided in categories.
* @return a pair of category probability and term probabilities of terms being in a given category.
*/
def learn(tweets: Map[Category, List[String]]): TrainedData = {
val distTweets = sc.parallelize(tweets.toSeq)
val categories = distTweets.map(T => T._1)
val tokenizedTweets = distTweets.map(catTweets => catTweets._1 -> Tokenizer.tokenizeList(catTweets._2))
val termCount = computeTermCount(tokenizedTweets, categories)
val termCountPerClass = tokenizedTweets.map(catTokens => (catTokens._1, catTokens._2.length))
val totalToken = termCountPerClass.map(X => X._2).reduce(_ + _)
val categoryProb = termCountPerClass.map(CatCount => (CatCount._1, CatCount._2.toDouble / totalToken.toDouble))
val termProb = computeTermProb(termCount, categories.collect().toList)
TrainedData(categoryProb.collect().toMap, termProb._1.map(X => (X._1, X._2.toMap)).collect().toMap, termProb._2)
}
private def computeTermProb(termCount: RDD[(String, Map[Category, Int])], categories: List[Category]): (RDD[(String, List[(Category, Double)])], Map[Category, Double]) = {
val smoothing = 1
//needs to be accessible on all workers, so no RDD
val categoryTermCount = categories.map(C => (C, termCount.map(
X => (X._1, X._2.getOrElse(C, 0) + smoothing)).map(X => X._2).reduce(_ + _))
).toMap
// fills empty categories in the termcount with 0
val filledTermCount = termCount.map(
termWithCount => (termWithCount._1, categories.map(
C => C -> termWithCount._2.getOrElse(C, 0))
)
)
(filledTermCount.map(termsWithCount => (termsWithCount._1, termsWithCount._2.map {
case (category: Category, count: Int) =>
(category,
// condProbFun
(count + smoothing).toDouble / categoryTermCount(category).toDouble
// \condProbFun
)
// probability for an unknown Word
})), categoryTermCount.map(X => (X._1, 1.toDouble / X._2.toDouble)))
}
private def computeTermCount(tokenizedTweets: RDD[(Category, List[String])], categories: RDD[Category]): RDD[(String, Map[Category, Int])] = {
val termCount = tokenizedTweets.map(X => X._1 -> X._2.groupBy(X => X).map(X => X._1 -> X._2.length))
termCount.flatMap(X => X._2.map(Y => (Y._1, (X._1, Y._2)))).groupByKey().map(X => (X._1, X._2.toMap))
}
}
| isn0gud/onepercent | src/main/scala/org/onepercent/utils/scoring/TweetScoringLearner.scala | Scala | mit | 3,849 |
package org.openurp.edu.eams.teach.schedule.service
import org.openurp.edu.base.Student
trait StdCourseTablePermissionChecker {
def check(std: Student, kind: String, ids: String): String
}
| openurp/edu-eams-webapp | schedule/src/main/scala/org/openurp/edu/eams/teach/schedule/service/StdCourseTablePermissionChecker.scala | Scala | gpl-3.0 | 196 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr
import org.dom4j.{QName, Document}
import org.orbeon.oxf.pipeline.api.{PipelineContext}
import org.orbeon.oxf.processor.SimpleProcessor
import org.orbeon.oxf.properties.{PropertySet, Properties}
import org.orbeon.oxf.util.XPath
import org.orbeon.oxf.xml.{XMLReceiver, TransformerUtils, Dom4j}
import org.orbeon.saxon.dom4j.DocumentWrapper
import org.orbeon.saxon.om.NodeInfo
import org.orbeon.scaxon.XML._
// Processor to replace or add resources based on properties
//
// An property looks like: oxf.fr.resource.*.*.en.detail.labels.save
//
// NOTE: We used to do this in XSLT, but when it came to implement *adding* missing resources, the level of complexity
// increased too much and readability would have suffered so we rewrote in Scala.
class ResourcesPatcher extends SimpleProcessor {
def generateData(pipelineContext: PipelineContext, xmlReceiver: XMLReceiver): Unit = {
// Read inputs
val resourcesDocument = readInputAsDOM4J(pipelineContext, "data")
val instanceElement = new DocumentWrapper(readInputAsDOM4J(pipelineContext, "instance"), null, XPath.GlobalConfiguration) \\ *
val app = instanceElement \\ "app" stringValue
val form = instanceElement \\ "form" stringValue
// Transform and write out the document
ResourcesPatcher.transform(resourcesDocument, app, form)(Properties.instance.getPropertySet)
TransformerUtils.writeDom4j(resourcesDocument, xmlReceiver)
}
}
object ResourcesPatcher {
def transform(resourcesDocument: Document, app: String, form: String)(implicit properties: PropertySet): Unit = {
val resourcesElement = new DocumentWrapper(resourcesDocument, null, XPath.GlobalConfiguration) \\ *
val propertyNames = properties.propertiesStartsWith("oxf.fr.resource" :: app :: form :: Nil mkString ".")
// In 4.6 summary/detail buttons are at the top level
def filterPathForBackwardCompatibility(path: Seq[String]) = path take 2 match {
case Seq("detail" | "summary", "buttons") ⇒ path drop 1
case _ ⇒ path
}
val langPathValue =
for {
name ← propertyNames
tokens = name split """\\."""
lang = tokens(5)
path = filterPathForBackwardCompatibility(tokens drop 6) mkString "/"
} yield
(lang, path, properties.getString(name))
// Return all languages or the language specified if it exists
// For now we don't support creating new top-level resource elements for new languages.
def findConcreteLanguages(langOrWildcard: String) = {
val allLanguages =
eval(resourcesElement, "resource/@xml:lang/string()").asInstanceOf[Seq[String]]
val filtered =
if (langOrWildcard == "*")
allLanguages
else
allLanguages filter (_ == langOrWildcard)
filtered.distinct // there *shouldn't* be duplicate languages in the source
}
def resourceElementsForLang(lang: String) =
eval(resourcesElement, s"resource[@xml:lang = '$lang']").asInstanceOf[Seq[NodeInfo]] map unwrapElement
// Update or create elements and set values
for {
(langOrWildcard, path, value) ← langPathValue
lang ← findConcreteLanguages(langOrWildcard)
rootForLang ← resourceElementsForLang(lang)
} locally {
Dom4j.ensurePath(rootForLang, path split "/" map QName.get).setText(value)
}
}
} | martinluther/orbeon-forms | src/main/scala/org/orbeon/oxf/fr/ResourcesPatcher.scala | Scala | lgpl-2.1 | 4,351 |
/*
* Stratio Meta
*
* Copyright (c) 2014, Stratio, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library.
*/
package com.stratio.meta.common.ask
case class Connect (user: String)
| dhiguero/stratio-meta | meta-common/src/main/scala/com/stratio/meta/common/ask/Connect.scala | Scala | gpl-3.0 | 778 |
package hello
import org.springframework.context.annotation.Configuration
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.context.annotation.ComponentScan
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
/**
* This config class will trigger Spring @annotation scanning and auto configure Spring context.
*
* @author saung
* @since 1.0
*/
@Configuration
@EnableAutoConfiguration
@ComponentScan
@RestController
class HelloConfig {
@RequestMapping(value=Array("/"))
def controller() :String = {
return "Hello World"
}
}
| muffadaldaginawala/hello-world | src/main/scala/hello/HelloConfig.scala | Scala | mit | 745 |
package com.hunorkovacs.koauth.service
import java.security.SecureRandom
import scala.util.Random
trait TokenGenerator {
def generateTokenAndSecret: (String, String)
def generateVerifier: String
def generateNonce: String
}
object DefaultTokenGenerator extends TokenGenerator {
private final val LengthToken = 32
private final val LengthSecret = 32
private final val LengthVerifier = 16
private final val LengthNonce = 8
private val random = new Random(new SecureRandom())
override def generateTokenAndSecret = (generate(LengthToken), generate(LengthSecret))
override def generateVerifier = generate(LengthVerifier)
override def generateNonce = generate(LengthNonce)
private def generate(length: Int): String = random.alphanumeric.take(length).mkString
}
| kovacshuni/koauth | src/main/scala/com/hunorkovacs/koauth/service/DefaultTokenGenerator.scala | Scala | apache-2.0 | 789 |
package co.scriptgeek
import akka.actor.Actor
import spray.routing._
import spray.http._
import spray.http.StatusCodes._
import MediaTypes._
// we don't implement our route structure directly in the service actor because
// we want to be able to test it independently, without having to spin up an actor
class EventStoreActor extends Actor with EventStoreService {
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
// this actor only runs our route, but you could add
// other things here, like request stream processing
// or timeout handling
def receive = runRoute(esRoute)
}
// this trait defines our service behavior independently from the service actor
trait EventStoreService extends HttpService {
val esRoute = {
path("topics") {
get {
respondWithMediaType(`application/json`) {
complete("{}")
}
}
}
}
} | scriptgeeks/eventstore | src/main/scala/co/scriptgeek/EventStoreService.scala | Scala | apache-2.0 | 994 |
import shapes.pimpmylib.{Square, Circle, Shape}
import shapes.untouchable.{Circle => UCircle, Square => USquare}
package object pimpmylib {
implicit def decorateCircle(shape: UCircle): Shape =
Circle(shape.r)
implicit def decorateSquare(shape: USquare): Shape =
Square(shape.a)
}
| tupol/scala-patterns-tc-pml | src/main/scala/shapes/pimpmylib/package.scala | Scala | apache-2.0 | 297 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert
import java.io.InputStream
import com.typesafe.config.Config
import org.locationtech.geomesa.convert.Transformers.Predicate
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.annotation.tailrec
import scala.collection.JavaConversions._
import scala.collection.immutable.IndexedSeq
import scala.util.Try
class CompositeConverterFactory[I] extends SimpleFeatureConverterFactory[I] {
override def canProcess(conf: Config): Boolean =
if (conf.hasPath("type")) conf.getString("type").equals("composite-converter") else false
override def buildConverter(sft: SimpleFeatureType, conf: Config): SimpleFeatureConverter[I] = {
val converters: Seq[(Predicate, SimpleFeatureConverter[I])] =
conf.getConfigList("converters").map { c =>
val pred = Transformers.parsePred(c.getString("predicate"))
val converter = SimpleFeatureConverters.build[I](sft, conf.getConfig(c.getString("converter")))
(pred, converter)
}
new CompositeConverter[I](sft, converters)
}
}
class CompositeConverter[I](val targetSFT: SimpleFeatureType, converters: Seq[(Predicate, SimpleFeatureConverter[I])])
extends SimpleFeatureConverter[I] {
override val caches: Map[String, EnrichmentCache] = Map.empty
val predsWithIndex = converters.map(_._1).zipWithIndex.toIndexedSeq
val indexedConverters = converters.map(_._2).toIndexedSeq
override def createEvaluationContext(globalParams: Map[String, Any], counter: Counter): EvaluationContext = {
val delegates = converters.map(_._2.createEvaluationContext(globalParams, counter)).toIndexedSeq
new CompositeEvaluationContext(delegates)
}
override def processInput(is: Iterator[I], ec: EvaluationContext): Iterator[SimpleFeature] = {
val setEc: (Int) => Unit = ec match {
case c: CompositeEvaluationContext => (i) => c.setCurrent(i)
case _ => (_) => Unit
}
val toEval = Array.ofDim[Any](1)
def evalPred(pi: (Predicate, Int)): Boolean = {
setEc(pi._2)
Try(pi._1.eval(toEval)(ec)).getOrElse(false)
}
new Iterator[SimpleFeature] {
var iter: Iterator[SimpleFeature] = loadNext()
override def hasNext: Boolean = iter.hasNext
override def next(): SimpleFeature = {
val res = iter.next()
if (!iter.hasNext && is.hasNext) {
iter = loadNext()
}
res
}
@tailrec
def loadNext(): Iterator[SimpleFeature] = {
toEval(0) = is.next()
val i = predsWithIndex.find(evalPred).map(_._2).getOrElse(-1)
val res = if (i == -1) {
ec.counter.incLineCount()
ec.counter.incFailure()
Iterator.empty
} else {
indexedConverters(i).processInput(Iterator(toEval(0).asInstanceOf[I]), ec)
}
if (res.hasNext) {
res
} else if (!is.hasNext) {
Iterator.empty
} else {
loadNext()
}
}
}
}
override def processSingleInput(i: I, ec: EvaluationContext): Seq[SimpleFeature] = ???
override def process(is: InputStream, ec: EvaluationContext): Iterator[SimpleFeature] = ???
}
case class CompositeEvaluationContext(contexts: IndexedSeq[EvaluationContext]) extends EvaluationContext {
var current: EvaluationContext = contexts.headOption.orNull
def setCurrent(i: Int): Unit = current = contexts(i)
override def get(i: Int): Any = current.get(i)
override def set(i: Int, v: Any): Unit = current.set(i, v)
override def indexOf(n: String): Int = current.indexOf(n)
override def counter: Counter = current.counter
override def clear(): Unit = contexts.foreach(_.clear())
override def getCache(k: String): EnrichmentCache = current.getCache(k)
} | ronq/geomesa | geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert/CompositeConverter.scala | Scala | apache-2.0 | 4,231 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ml.dmlc.mxnetexamples.customop
import org.kohsuke.args4j.{CmdLineParser, Option}
import org.slf4j.LoggerFactory
import scala.collection.JavaConverters._
import ml.dmlc.mxnet.Symbol
import ml.dmlc.mxnet.DType.DType
import ml.dmlc.mxnet.DataIter
import ml.dmlc.mxnet.DataBatch
import ml.dmlc.mxnet.NDArray
import ml.dmlc.mxnet.Shape
import ml.dmlc.mxnet.EvalMetric
import ml.dmlc.mxnet.Context
import ml.dmlc.mxnet.Xavier
import ml.dmlc.mxnet.optimizer.RMSProp
import ml.dmlc.mxnet.CustomOp
import ml.dmlc.mxnet.CustomOpProp
import ml.dmlc.mxnet.Operator
import ml.dmlc.mxnet.optimizer.SGD
import ml.dmlc.mxnet.Accuracy
import ml.dmlc.mxnet.Callback.Speedometer
/**
* Example of CustomOp
* @author Depeng Liang
*/
object ExampleCustomOp {
private val logger = LoggerFactory.getLogger(classOf[ExampleCustomOp])
class Softmax(_param: Map[String, String]) extends CustomOp {
override def forward(sTrain: Boolean, req: Array[String],
inData: Array[NDArray], outData: Array[NDArray], aux: Array[NDArray]): Unit = {
val xShape = inData(0).shape
val x = inData(0).toArray.grouped(xShape(1)).toArray
val yArr = x.map { it =>
val max = it.max
val tmp = it.map(e => Math.exp(e.toDouble - max).toFloat)
val sum = tmp.sum
tmp.map(_ / sum)
}.flatten
val y = NDArray.empty(xShape, outData(0).context)
y.set(yArr)
this.assign(outData(0), req(0), y)
y.dispose()
}
override def backward(req: Array[String], outGrad: Array[NDArray],
inData: Array[NDArray], outData: Array[NDArray],
inGrad: Array[NDArray], aux: Array[NDArray]): Unit = {
val l = inData(1).toArray.map(_.toInt)
val oShape = outData(0).shape
val yArr = outData(0).toArray.grouped(oShape(1)).toArray
l.indices.foreach { i =>
yArr(i)(l(i)) -= 1.0f
}
val y = NDArray.empty(oShape, inGrad(0).context)
y.set(yArr.flatten)
this.assign(inGrad(0), req(0), y)
y.dispose()
}
}
class SoftmaxProp(needTopGrad: Boolean = false)
extends CustomOpProp(needTopGrad) {
override def listArguments(): Array[String] = Array("data", "label")
override def listOutputs(): Array[String] = Array("output")
override def inferShape(inShape: Array[Shape]):
(Array[Shape], Array[Shape], Array[Shape]) = {
val dataShape = inShape(0)
val labelShape = Shape(dataShape(0))
val outputShape = dataShape
(Array(dataShape, labelShape), Array(outputShape), null)
}
override def inferType(inType: Array[DType]):
(Array[DType], Array[DType], Array[DType]) = {
(inType, inType.take(1), null)
}
override def createOperator(ctx: String, inShapes: Array[Array[Int]],
inDtypes: Array[Int]): CustomOp = new Softmax(this.kwargs)
}
Operator.register("softmax", new SoftmaxProp)
def main(args: Array[String]): Unit = {
val leop = new ExampleCustomOp
val parser: CmdLineParser = new CmdLineParser(leop)
try {
parser.parseArgument(args.toList.asJava)
assert(leop.dataPath != null)
val ctx = if (leop.gpu >= 0) Context.gpu(0) else Context.cpu()
val dataName = Array("data")
val labelName = Array("softmax_label")
val data = Symbol.Variable("data")
val label = Symbol.Variable("label")
val fc1 = Symbol.FullyConnected("fc1")()(Map("data" -> data, "num_hidden" -> 128))
val act1 = Symbol.Activation("relu1")()(Map("data" -> fc1, "act_type" -> "relu"))
val fc2 = Symbol.FullyConnected("fc2")()(Map("data" -> act1, "num_hidden" -> 64))
val act2 = Symbol.Activation("relu2")()(Map("data" -> fc2, "act_type" -> "relu"))
val fc3 = Symbol.FullyConnected("fc3")()(Map("data" -> act2, "num_hidden" -> 10))
val mlp = Symbol.Custom("softmax")()(Map("data" -> fc3,
"label" -> label, "op_type" -> "softmax"))
val (trainIter, testIter) =
Data.mnistIterator(leop.dataPath, batchSize = 100, inputShape = Shape(784))
val datasAndLabels = trainIter.provideData ++ trainIter.provideLabel
val (argShapes, outputShapes, auxShapes) = mlp.inferShape(datasAndLabels)
val initializer = new Xavier(factorType = "in", magnitude = 2.34f)
val argNames = mlp.listArguments()
val argDict = argNames.zip(argShapes.map(s => NDArray.empty(s, ctx))).toMap
val gradDict = argNames.zip(argShapes).filter { case (name, shape) =>
!datasAndLabels.contains(name)
}.map(x => x._1 -> NDArray.empty(x._2, ctx) ).toMap
argDict.foreach { case (name, ndArray) =>
if (!datasAndLabels.contains(name)) {
initializer.initWeight(name, ndArray)
}
}
val executor = mlp.bind(ctx, argDict, gradDict)
val lr = 0.001f
val opt = new RMSProp(learningRate = lr, wd = 0.00001f)
val paramsGrads = gradDict.toList.zipWithIndex.map { case ((name, grad), idx) =>
(idx, name, grad, opt.createState(idx, argDict(name)))
}
val evalMetric = new Accuracy
val batchEndCallback = new Speedometer(100, 100)
val numEpoch = 20
for (epoch <- 0 until numEpoch) {
val tic = System.currentTimeMillis
evalMetric.reset()
var nBatch = 0
var epochDone = false
trainIter.reset()
while (!epochDone) {
var doReset = true
while (doReset && trainIter.hasNext) {
val dataBatch = trainIter.next()
argDict("data").set(dataBatch.data(0))
argDict("label").set(dataBatch.label(0))
executor.forward(isTrain = true)
executor.backward()
paramsGrads.foreach { case (idx, name, grad, optimState) =>
opt.update(idx, argDict(name), grad, optimState)
}
evalMetric.update(dataBatch.label, executor.outputs)
nBatch += 1
batchEndCallback.invoke(epoch, nBatch, evalMetric)
}
if (doReset) {
trainIter.reset()
}
epochDone = true
}
val (name, value) = evalMetric.get
name.zip(value).foreach { case (n, v) =>
logger.info(s"Epoch[$epoch] Train-accuracy=$v")
}
val toc = System.currentTimeMillis
logger.info(s"Epoch[$epoch] Time cost=${toc - tic}")
evalMetric.reset()
testIter.reset()
while (testIter.hasNext) {
val evalBatch = testIter.next()
argDict("data").set(evalBatch.data(0))
argDict("label").set(evalBatch.label(0))
executor.forward(isTrain = true)
evalMetric.update(evalBatch.label, executor.outputs)
evalBatch.dispose()
}
val (names, values) = evalMetric.get
names.zip(values).foreach { case (n, v) =>
logger.info(s"Epoch[$epoch] Validation-accuracy=$v")
}
}
executor.dispose()
} catch {
case ex: Exception => {
logger.error(ex.getMessage, ex)
parser.printUsage(System.err)
sys.exit(1)
}
}
}
}
class ExampleCustomOp {
@Option(name = "--data-path", usage = "the mnist data path")
private val dataPath: String = null
@Option(name = "--gpu", usage = "which gpu card to use, default is -1, means using cpu")
private val gpu: Int = -1
}
| Mega-DatA-Lab/mxnet | scala-package/examples/src/main/scala/ml/dmlc/mxnetexamples/customop/ExampleCustomOp.scala | Scala | apache-2.0 | 8,095 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}
import java.math.{MathContext, RoundingMode}
import scala.util.control.NonFatal
import net.jpountz.lz4.{LZ4BlockInputStream, LZ4BlockOutputStream}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.catalog.CatalogColumnStat
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.util.{ArrayData, DateTimeUtils}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* Estimates of various statistics. The default estimation logic simply lazily multiplies the
* corresponding statistic produced by the children. To override this behavior, override
* `statistics` and assign it an overridden version of `Statistics`.
*
* '''NOTE''': concrete and/or overridden versions of statistics fields should pay attention to the
* performance of the implementations. The reason is that estimations might get triggered in
* performance-critical processes, such as query plan planning.
*
* Note that we are using a BigInt here since it is easy to overflow a 64-bit integer in
* cardinality estimation (e.g. cartesian joins).
*
* @param sizeInBytes Physical size in bytes. For leaf operators this defaults to 1, otherwise it
* defaults to the product of children's `sizeInBytes`.
* @param rowCount Estimated number of rows.
* @param attributeStats Statistics for Attributes.
*/
case class Statistics(
sizeInBytes: BigInt,
rowCount: Option[BigInt] = None,
attributeStats: AttributeMap[ColumnStat] = AttributeMap(Nil)) {
override def toString: String = "Statistics(" + simpleString + ")"
/** Readable string representation for the Statistics. */
def simpleString: String = {
Seq(s"sizeInBytes=${Utils.bytesToString(sizeInBytes)}",
if (rowCount.isDefined) {
// Show row count in scientific notation.
s"rowCount=${BigDecimal(rowCount.get, new MathContext(3, RoundingMode.HALF_UP)).toString()}"
} else {
""
}
).filter(_.nonEmpty).mkString(", ")
}
}
/**
* Statistics collected for a column.
*
* 1. The JVM data type stored in min/max is the internal data type for the corresponding
* Catalyst data type. For example, the internal type of DateType is Int, and that the internal
* type of TimestampType is Long.
* 2. There is no guarantee that the statistics collected are accurate. Approximation algorithms
* (sketches) might have been used, and the data collected can also be stale.
*
* @param distinctCount number of distinct values
* @param min minimum value
* @param max maximum value
* @param nullCount number of nulls
* @param avgLen average length of the values. For fixed-length types, this should be a constant.
* @param maxLen maximum length of the values. For fixed-length types, this should be a constant.
* @param histogram histogram of the values
*/
case class ColumnStat(
distinctCount: Option[BigInt] = None,
min: Option[Any] = None,
max: Option[Any] = None,
nullCount: Option[BigInt] = None,
avgLen: Option[Long] = None,
maxLen: Option[Long] = None,
histogram: Option[Histogram] = None) {
// Are distinctCount and nullCount statistics defined?
val hasCountStats = distinctCount.isDefined && nullCount.isDefined
// Are min and max statistics defined?
val hasMinMaxStats = min.isDefined && max.isDefined
// Are avgLen and maxLen statistics defined?
val hasLenStats = avgLen.isDefined && maxLen.isDefined
def toCatalogColumnStat(colName: String, dataType: DataType): CatalogColumnStat =
CatalogColumnStat(
distinctCount = distinctCount,
min = min.map(CatalogColumnStat.toExternalString(_, colName, dataType)),
max = max.map(CatalogColumnStat.toExternalString(_, colName, dataType)),
nullCount = nullCount,
avgLen = avgLen,
maxLen = maxLen,
histogram = histogram)
}
/**
* This class is an implementation of equi-height histogram.
* Equi-height histogram represents the distribution of a column's values by a sequence of bins.
* Each bin has a value range and contains approximately the same number of rows.
*
* @param height number of rows in each bin
* @param bins equi-height histogram bins
*/
case class Histogram(height: Double, bins: Array[HistogramBin]) {
// Only for histogram equality test.
override def equals(other: Any): Boolean = other match {
case otherHgm: Histogram =>
height == otherHgm.height && bins.sameElements(otherHgm.bins)
case _ => false
}
override def hashCode(): Int = {
val temp = java.lang.Double.doubleToLongBits(height)
var result = (temp ^ (temp >>> 32)).toInt
result = 31 * result + java.util.Arrays.hashCode(bins.asInstanceOf[Array[AnyRef]])
result
}
}
/**
* A bin in an equi-height histogram. We use double type for lower/higher bound for simplicity.
*
* @param lo lower bound of the value range in this bin
* @param hi higher bound of the value range in this bin
* @param ndv approximate number of distinct values in this bin
*/
case class HistogramBin(lo: Double, hi: Double, ndv: Long)
object HistogramSerializer {
/**
* Serializes a given histogram to a string. For advanced statistics like histograms, sketches,
* etc, we don't provide readability for their serialized formats in metastore
* (string-to-string table properties). This is because it's hard or unnatural for these
* statistics to be human readable. For example, a histogram usually cannot fit in a single,
* self-described property. And for count-min-sketch, it's essentially unnatural to make it
* a readable string.
*/
final def serialize(histogram: Histogram): String = {
val bos = new ByteArrayOutputStream()
val out = new DataOutputStream(new LZ4BlockOutputStream(bos))
out.writeDouble(histogram.height)
out.writeInt(histogram.bins.length)
// Write data with same type together for compression.
var i = 0
while (i < histogram.bins.length) {
out.writeDouble(histogram.bins(i).lo)
i += 1
}
i = 0
while (i < histogram.bins.length) {
out.writeDouble(histogram.bins(i).hi)
i += 1
}
i = 0
while (i < histogram.bins.length) {
out.writeLong(histogram.bins(i).ndv)
i += 1
}
out.writeInt(-1)
out.flush()
out.close()
org.apache.commons.codec.binary.Base64.encodeBase64String(bos.toByteArray)
}
/** Deserializes a given string to a histogram. */
final def deserialize(str: String): Histogram = {
val bytes = org.apache.commons.codec.binary.Base64.decodeBase64(str)
val bis = new ByteArrayInputStream(bytes)
val ins = new DataInputStream(new LZ4BlockInputStream(bis))
val height = ins.readDouble()
val numBins = ins.readInt()
val los = new Array[Double](numBins)
var i = 0
while (i < numBins) {
los(i) = ins.readDouble()
i += 1
}
val his = new Array[Double](numBins)
i = 0
while (i < numBins) {
his(i) = ins.readDouble()
i += 1
}
val ndvs = new Array[Long](numBins)
i = 0
while (i < numBins) {
ndvs(i) = ins.readLong()
i += 1
}
ins.close()
val bins = new Array[HistogramBin](numBins)
i = 0
while (i < numBins) {
bins(i) = HistogramBin(los(i), his(i), ndvs(i))
i += 1
}
Histogram(height, bins)
}
}
| hhbyyh/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/Statistics.scala | Scala | apache-2.0 | 8,491 |
object Test extends App {
t
def t: Unit = {
val c1 = C1()(1)
println(c1.copy()(2))
{
implicit val i = 2873
println(c1.copy())
}
val c2 = C2()(1)
println(c2.copy()(37))
val c3 = C3(1,2)(3)
println(c3.copy()(27))
println(c3.copy(y = 22)(27))
println(c3.copy(y = 7, x = 11)(27))
val c4 = C4(1)
println(c4.copy())
println(c4.copy(x = 23))
val c5 = C5(1,2)(3,"a")
println(c5.copy()(33,"b"))
println(c5.copy(y = 19)(33,"b"))
{
implicit val i = 193
implicit val s = "c"
println(c5.copy())
println(c5.copy(y = 371))
println(c5.copy(x = -1)(-2, "lken"))
}
val c6 = C6(1)(2)(3)
println(c6.copy(29)(18)(-12))
{
implicit val i = 2892
println(c6.copy(x = 1)(93))
println(c6.copy(x = 1)(93)(761))
}
val c7 = C7(1)(2)(3)("h")
println(c7.copy()(22)(33)("elkj"))
{
implicit val s = "me"
println(c7.copy()(283)(29872))
println(c7.copy(37)(298)(899)("ekjr"))
}
val c8 = C8(1)(2,3)()("els")
println(c8.copy(x = 172)(989, 77)()("eliurna"))
{
implicit val s = "schtring"
println(c8.copy()(82,2111)())
println(c8.copy(x = -1)(92,29)()("lken"))
}
val c9 = C9(1)(2)()()("u")
println(c9.copy()(271)()()("ehebab"))
{
implicit val s = "enag"
println(c9.copy()(299)()())
println(c9.copy(x = -42)(99)()()("flae"))
}
class KA { override def toString = "ka" }
class KB extends KA { override def toString = "kb" }
val c10 = C10(10)(3)(19)
println(c10.copy()(298)(27))
println(c10.copy("elkn")("en")("emn"))
println(c10.copy(new KA)(new KB)(new KB))
{
implicit val k = new KA
println(c10.copy(new KA)(new KB))
}
}
}
case class C1()(implicit x: Int) {
override def toString = s"c1: $x"
}
case class C2()(y: Int) {
override def toString = s"c2: $y"
}
case class C3(x: Int, y: Int)(z: Int) {
override def toString = s"c3: $x, $y, $z"
}
case class C4(x: Int) {
override def toString = s"c4: $x"
}
case class C5(x: Int, y: Int)(implicit z: Int, s: String) {
override def toString = s"c5: $x, $y, $z, $s"
}
case class C6(x: Int)(y: Int)(implicit z: Int) {
override def toString = s"c6: $x, $y, $z"
}
case class C7(x: Int)(y: Int)(z: Int)(implicit s: String) {
override def toString = s"c7: $x, $y, $z, $s"
}
case class C8(x: Int)(y: Int, z: Int)()(implicit s: String) {
override def toString = s"c8: $x, $y, $z, $s"
}
case class C9(x: Int)(y: Int)()()(implicit s: String) {
override def toString = s"c9: $x, $y, $s"
}
case class C10[T,U <: T](x: T)(y: U)(implicit z: T) {
override def toString = s"c9: $x, $y, $z"
}
| lrytz/scala | test/files/run/t5907.scala | Scala | apache-2.0 | 2,714 |
package org.jetbrains.plugins.scala.failed.typeInference
import org.jetbrains.plugins.scala.PerfCycleTests
import org.jetbrains.plugins.scala.lang.typeConformance.TypeConformanceTestBase
import org.junit.experimental.categories.Category
/**
* @author mucianm
* @since 28.03.16.
*/
@Category(Array(classOf[PerfCycleTests]))
class HigherKindedTypesConformanceTest extends TypeConformanceTestBase {
def testSCL9713(): Unit = doTest(
"""
|import scala.language.higherKinds
|
|type Foo[_]
|type Bar[_]
|type S
|def foo(): Foo[S] with Bar[S]
|
|val x: Foo[S] = foo()
|//True
""".stripMargin
)
def testSCL7319(): Unit = doTest {
s"""trait XIndexedStateT[F[+_], -S1, +S2, +A] {
| def lift[M[+_]]: XIndexedStateT[({type λ[+α]=M[F[α]]})#λ, S1, S2, A] = ???
|}
|
|type XStateT[F[+_], S, +A] = XIndexedStateT[F, S, S, A]
|
|type XId[+X] = X
|
|def example[S, A](s: XStateT[XId, S, A]): XStateT[Option, S, A] = {
| ${caretMarker}val res: XStateT[Option, S, A] = s.lift[Option]
| res
|}
|//true""".stripMargin
}
def testSCL9088(): Unit = doTest {
s"""trait Bar {
| type FooType[T] <: Foo[T]
|
| trait Foo[T] {
| val x: T
| }
|
| def getFoo[T](x: T): FooType[T]
|}
|
|class BarImpl extends Bar {
| case class FooImpl[T](x: T) extends Foo[T]
|
| override type FooType[R] = FooImpl[R]
|
| override def getFoo[R](x: R): FooType[R] = FooImpl[R](x)
|}
|
|trait Container[B <: Bar] {
| val profile: B
|
| ${caretMarker}val test: B#FooType[Int] = profile.getFoo[Int](5)
|}
|//true""".stripMargin
}
}
| jeantil/intellij-scala | test/org/jetbrains/plugins/scala/failed/typeInference/HigherKindedTypesConformanceTest.scala | Scala | apache-2.0 | 1,802 |
package com.mesosphere.cosmos.thirdparty.marathon.circe
import com.mesosphere.cosmos.thirdparty.marathon.model._
import io.circe.Decoder
import io.circe.generic.semiauto._
object Decoders {
implicit val decodeAppId: Decoder[AppId] = Decoder.decodeString.map(AppId(_))
implicit val decodeMarathonApp: Decoder[MarathonApp] = deriveDecoder[MarathonApp]
implicit val decodeMarathonAppContainer: Decoder[MarathonAppContainer] = deriveDecoder[MarathonAppContainer]
implicit val decodeMarathonAppContainerDocker: Decoder[MarathonAppContainerDocker] = deriveDecoder[MarathonAppContainerDocker]
implicit val decodeMarathonAppResponse: Decoder[MarathonAppResponse] = deriveDecoder[MarathonAppResponse]
implicit val decodeMarathonAppsResponse: Decoder[MarathonAppsResponse] = deriveDecoder[MarathonAppsResponse]
implicit val decodeMarathonError: Decoder[MarathonError] = deriveDecoder[MarathonError]
}
| takirala/cosmos | cosmos-common/src/main/scala/com/mesosphere/cosmos/thirdparty/marathon/circe/Decoders.scala | Scala | apache-2.0 | 907 |
package models.customer
import models.AssetSupport.IdType
import models._
import models.common._
import org.joda.time.DateTime
import play.api.libs.json.Json
case class TermsOfPayment(
term: Int,
description: String
)
object TermsOfPayment {
implicit val formats = Json.format[TermsOfPayment]
}
case class MethodOfPayment(
method: Int,
description: String
)
object MethodOfPayment {
implicit val format = Json.format[MethodOfPayment]
}
case class PaymentSchedule(
period: DateTime,
percent: String
)
object PaymentSchedule {
implicit val format = Json.format[PaymentSchedule]
}
case class Payment(
termsOfPayment: TermsOfPayment,
methodOfPayment: MethodOfPayment,
paymentSchedule: PaymentSchedule,
paymentDay: DateTime,
bankAccount: String,
bankAccountNumber: String
)
object Payment {
implicit val format = Json.format[Payment]
}
case class CreditInformation(
mandatoryCreditLimit: Boolean,
creditRating: Int,
creditLimit: Int,
currency: String
)
object CreditInformation {
implicit val format = Json.format[CreditInformation]
}
case class Discount(
multilineDiscount: String,
totalDiscount: String,
price: Price,
lineDiscount: Int
)
object Discount {
implicit val format = Json.format[Discount]
}
case class CustomerIn(
_id: IdType,
createdAt: DateTime,
lastModifiedAt: DateTime,
active: Boolean,
description: String,
name: String,
agentId: IdType,
taxExemptNumber: Option[String],
lineOfBusinessId: IdType,
siteId: IdType,
companyTypeId: IdType,
shopIds: List[IdType],
addresses: List[Address],
groupIds: List[IdType],
contactIds: List[IdType],
accountNumber: Option[String]
) extends AssetIn with AssetUpdateBuilder[CustomerUpdate] {
override def fillup(lastModifiedAt: DateTime) = CustomerUpdate(lastModifiedAt, active, description, name, agentId, taxExemptNumber, lineOfBusinessId, siteId, companyTypeId, shopIds, addresses, groupIds, contactIds, accountNumber)
}
object CustomerIn extends AssetInCompanion[CustomerIn] {
val format = Json.format[CustomerIn]
override def collectionName: String = "customers"
}
case class CustomerUpdate(lastModifiedAt: DateTime,
active: Boolean,
description: String,
name: String,
agentId: IdType,
taxExemptNumber: Option[String],
lineOfBusinessId: IdType,
siteId: IdType,
companyTypeId: IdType,
shopIds: List[IdType],
addresses: List[Address],
groupIds: List[IdType],
contactIds: List[IdType],
accountNumber: Option[String]) extends AssetUpdate
object CustomerUpdate extends AssetUpdateCompanion[CustomerUpdate] {
val format = Json.format[CustomerUpdate]
override def collectionName: String = CustomerIn.collectionName
}
case class CustomerCreate(active: Boolean,
description: String,
name: String,
agentId: IdType,
taxExemptNumber: Option[String],
lineOfBusinessId: IdType,
siteId: IdType,
companyTypeId: IdType,
shopIds: List[IdType],
addresses: List[Address],
groupIds: List[IdType],
contactIds: List[IdType],
accountNumber: Option[String]) extends AssetCreate[CustomerIn] {
override def fillup(b: AssetBase): CustomerIn = CustomerIn(b.id, b.createdAt, b.lastModifiedAt, active, description, name, agentId, taxExemptNumber, lineOfBusinessId, siteId, companyTypeId, shopIds, addresses, groupIds, contactIds, accountNumber)
}
object CustomerCreate {
implicit val reads = Json.reads[CustomerCreate]
}
| tsechov/shoehorn | app/models/customer/customer.scala | Scala | apache-2.0 | 4,982 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes
import org.neo4j.cypher.internal.commands.NamedPath
import org.neo4j.cypher.internal.symbols.PathType
class NamedPathPipe(source: Pipe, path: NamedPath) extends Pipe {
def createResults(state: QueryState) = {
source.createResults(state).map(ctx => {
ctx.put(path.pathName, path.getPath(ctx))
ctx
})
}
// val symbols = source.symbols.add(Identifier(path.pathName, PathType()))
val symbols = source.symbols.add(path.pathName, PathType())
override def executionPlan(): String = source.executionPlan() + "\\r\\nExtractPath(" + path.pathName + " = " + path.pathPattern.mkString(", ") + ")"
} | dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/NamedPathPipe.scala | Scala | gpl-3.0 | 1,455 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.javascript
import scala.language.implicitConversions
import org.scalajs.core.ir
import org.scalajs.core.ir.Position
import Trees._
private[javascript] object TreeDSL {
implicit class TreeOps(val self: Tree) extends AnyVal {
/** Select a member */
def DOT(field: Ident)(implicit pos: Position): DotSelect =
DotSelect(self, field)
/** Select a member */
def DOT(field: String)(implicit pos: Position): DotSelect =
DotSelect(self, Ident(field))
// Some operators that we use
def ===(that: Tree)(implicit pos: Position): Tree =
BinaryOp(ir.Trees.JSBinaryOp.===, self, that)
def ===(that: String)(implicit pos: Position): Tree =
BinaryOp(ir.Trees.JSBinaryOp.===, self, StringLiteral(that))
def unary_!()(implicit pos: Position): Tree =
UnaryOp(ir.Trees.JSUnaryOp.!, self)
def &&(that: Tree)(implicit pos: Position): Tree =
BinaryOp(ir.Trees.JSBinaryOp.&&, self, that)
def ||(that: Tree)(implicit pos: Position): Tree =
BinaryOp(ir.Trees.JSBinaryOp.||, self, that)
// Other constructs
def :=(that: Tree)(implicit pos: Position): Tree =
Assign(self, that)
}
def typeof(expr: Tree)(implicit pos: Position): Tree =
UnaryOp(ir.Trees.JSUnaryOp.typeof, expr)
}
| matthughes/scala-js | tools/shared/src/main/scala/org/scalajs/core/tools/javascript/TreeDSL.scala | Scala | bsd-3-clause | 1,824 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.syncadapter;
import java.util.function.Consumer
import com.mongodb.Function
import com.mongodb.client.{ MongoCursor, MongoIterable }
import org.mongodb.scala.Observable
import scala.concurrent.Await
import scala.language.reflectiveCalls
trait SyncMongoIterable[T] extends MongoIterable[T] {
val wrapped: Observable[T]
override def iterator(): MongoCursor[T] = cursor()
override def cursor(): MongoCursor[T] = SyncMongoCursor[T](wrapped)
override def first(): T = Await.result(wrapped.head(), WAIT_DURATION)
override def map[U](mapper: Function[T, U]) = throw new UnsupportedOperationException
override def forEach(action: Consumer[_ >: T]): Unit = {
use(cursor())(localCursor => while (localCursor.hasNext) action.accept(localCursor.next()))
}
override def into[A <: java.util.Collection[_ >: T]](target: A): A = {
use(cursor())(localCursor => while (localCursor.hasNext) target.add(localCursor.next()))
target
}
def use[A <: { def close(): Unit }, B](resource: A)(code: A => B): B = {
try {
code(resource)
} finally {
resource.close()
}
}
}
| rozza/mongo-java-driver | driver-scala/src/it/scala/org/mongodb/scala/syncadapter/SyncMongoIterable.scala | Scala | apache-2.0 | 1,743 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import kafka.log.LogConfig._
import kafka.server.Constants._
import org.junit.Assert._
import org.apache.kafka.common.metrics.Quota
import org.easymock.EasyMock
import org.junit.Test
import kafka.integration.KafkaServerTestHarness
import kafka.utils._
import kafka.admin.{AdminOperationException, AdminUtils}
import org.apache.kafka.common.TopicPartition
import scala.collection.Map
class DynamicConfigChangeTest extends KafkaServerTestHarness {
def generateConfigs = List(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
@Test
def testConfigChange() {
assertTrue("Should contain a ConfigHandler for topics",
this.servers.head.dynamicConfigHandlers.contains(ConfigType.Topic))
val oldVal: java.lang.Long = 100000L
val newVal: java.lang.Long = 200000L
val tp = new TopicPartition("test", 0)
val logProps = new Properties()
logProps.put(FlushMessagesProp, oldVal.toString)
AdminUtils.createTopic(zkUtils, tp.topic, 1, 1, logProps)
TestUtils.retry(10000) {
val logOpt = this.servers.head.logManager.getLog(tp)
assertTrue(logOpt.isDefined)
assertEquals(oldVal, logOpt.get.config.flushInterval)
}
logProps.put(FlushMessagesProp, newVal.toString)
AdminUtils.changeTopicConfig(zkUtils, tp.topic, logProps)
TestUtils.retry(10000) {
assertEquals(newVal, this.servers.head.logManager.getLog(tp).get.config.flushInterval)
}
}
private def testQuotaConfigChange(user: String, clientId: String, rootEntityType: String, configEntityName: String) {
assertTrue("Should contain a ConfigHandler for " + rootEntityType ,
this.servers.head.dynamicConfigHandlers.contains(rootEntityType))
val props = new Properties()
props.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "1000")
props.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "2000")
val quotaManagers = servers.head.apis.quotas
rootEntityType match {
case ConfigType.Client => AdminUtils.changeClientIdConfig(zkUtils, configEntityName, props)
case _ => AdminUtils.changeUserOrUserClientIdConfig(zkUtils, configEntityName, props)
}
TestUtils.retry(10000) {
val overrideProducerQuota = quotaManagers.produce.quota(user, clientId)
val overrideConsumerQuota = quotaManagers.fetch.quota(user, clientId)
assertEquals(s"User $user clientId $clientId must have overridden producer quota of 1000",
Quota.upperBound(1000), overrideProducerQuota)
assertEquals(s"User $user clientId $clientId must have overridden consumer quota of 2000",
Quota.upperBound(2000), overrideConsumerQuota)
}
val defaultProducerQuota = Long.MaxValue.asInstanceOf[Double]
val defaultConsumerQuota = Long.MaxValue.asInstanceOf[Double]
val emptyProps = new Properties()
rootEntityType match {
case ConfigType.Client => AdminUtils.changeClientIdConfig(zkUtils, configEntityName, emptyProps)
case _ => AdminUtils.changeUserOrUserClientIdConfig(zkUtils, configEntityName, emptyProps)
}
TestUtils.retry(10000) {
val producerQuota = quotaManagers.produce.quota(user, clientId)
val consumerQuota = quotaManagers.fetch.quota(user, clientId)
assertEquals(s"User $user clientId $clientId must have reset producer quota to " + defaultProducerQuota,
Quota.upperBound(defaultProducerQuota), producerQuota)
assertEquals(s"User $user clientId $clientId must have reset consumer quota to " + defaultConsumerQuota,
Quota.upperBound(defaultConsumerQuota), consumerQuota)
}
}
@Test
def testClientIdQuotaConfigChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.Client, "testClient")
}
@Test
def testUserQuotaConfigChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.User, "ANONYMOUS")
}
@Test
def testUserClientIdQuotaChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.User, "ANONYMOUS/clients/testClient")
}
@Test
def testDefaultClientIdQuotaConfigChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.Client, "<default>")
}
@Test
def testDefaultUserQuotaConfigChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.User, "<default>")
}
@Test
def testDefaultUserClientIdQuotaConfigChange() {
testQuotaConfigChange("ANONYMOUS", "testClient", ConfigType.User, "<default>/clients/<default>")
}
@Test
def testQuotaInitialization() {
val server = servers.head
val clientIdProps = new Properties()
server.shutdown()
clientIdProps.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "1000")
clientIdProps.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "2000")
val userProps = new Properties()
userProps.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "10000")
userProps.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "20000")
val userClientIdProps = new Properties()
userClientIdProps.put(DynamicConfig.Client.ProducerByteRateOverrideProp, "100000")
userClientIdProps.put(DynamicConfig.Client.ConsumerByteRateOverrideProp, "200000")
AdminUtils.changeClientIdConfig(zkUtils, "overriddenClientId", clientIdProps)
AdminUtils.changeUserOrUserClientIdConfig(zkUtils, "overriddenUser", userProps)
AdminUtils.changeUserOrUserClientIdConfig(zkUtils, "ANONYMOUS/clients/overriddenUserClientId", userClientIdProps)
// Remove config change znodes to force quota initialization only through loading of user/client quotas
zkUtils.getChildren(ZkUtils.ConfigChangesPath).foreach { p => zkUtils.deletePath(ZkUtils.ConfigChangesPath + "/" + p) }
server.startup()
val quotaManagers = server.apis.quotas
assertEquals(Quota.upperBound(1000), quotaManagers.produce.quota("someuser", "overriddenClientId"))
assertEquals(Quota.upperBound(2000), quotaManagers.fetch.quota("someuser", "overriddenClientId"))
assertEquals(Quota.upperBound(10000), quotaManagers.produce.quota("overriddenUser", "someclientId"))
assertEquals(Quota.upperBound(20000), quotaManagers.fetch.quota("overriddenUser", "someclientId"))
assertEquals(Quota.upperBound(100000), quotaManagers.produce.quota("ANONYMOUS", "overriddenUserClientId"))
assertEquals(Quota.upperBound(200000), quotaManagers.fetch.quota("ANONYMOUS", "overriddenUserClientId"))
}
@Test
def testConfigChangeOnNonExistingTopic() {
val topic = TestUtils.tempTopic
try {
val logProps = new Properties()
logProps.put(FlushMessagesProp, 10000: java.lang.Integer)
AdminUtils.changeTopicConfig(zkUtils, topic, logProps)
fail("Should fail with AdminOperationException for topic doesn't exist")
} catch {
case _: AdminOperationException => // expected
}
}
@Test
def testProcessNotification(): Unit = {
val props = new Properties()
props.put("a.b", "10")
// Create a mock ConfigHandler to record config changes it is asked to process
val entityArgument = EasyMock.newCapture[String]
val propertiesArgument = EasyMock.newCapture[Properties]
val handler = EasyMock.createNiceMock(classOf[ConfigHandler])
handler.processConfigChanges(
EasyMock.and(EasyMock.capture(entityArgument), EasyMock.isA(classOf[String])),
EasyMock.and(EasyMock.capture(propertiesArgument), EasyMock.isA(classOf[Properties])))
EasyMock.expectLastCall().once()
EasyMock.replay(handler)
val configManager = new DynamicConfigManager(zkUtils, Map(ConfigType.Topic -> handler))
// Notifications created using the old TopicConfigManager are ignored.
configManager.ConfigChangedNotificationHandler.processNotification("not json")
// Incorrect Map. No version
try {
val jsonMap = Map("v" -> 1, "x" -> 2)
configManager.ConfigChangedNotificationHandler.processNotification(Json.encode(jsonMap))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case _: Throwable =>
}
// Version is provided. EntityType is incorrect
try {
val jsonMap = Map("version" -> 1, "entity_type" -> "garbage", "entity_name" -> "x")
configManager.ConfigChangedNotificationHandler.processNotification(Json.encode(jsonMap))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case _: Throwable =>
}
// EntityName isn't provided
try {
val jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.Topic)
configManager.ConfigChangedNotificationHandler.processNotification(Json.encode(jsonMap))
fail("Should have thrown an Exception while parsing incorrect notification " + jsonMap)
}
catch {
case _: Throwable =>
}
// Everything is provided
val jsonMap = Map("version" -> 1, "entity_type" -> ConfigType.Topic, "entity_name" -> "x")
configManager.ConfigChangedNotificationHandler.processNotification(Json.encode(jsonMap))
// Verify that processConfigChanges was only called once
EasyMock.verify(handler)
}
@Test
def shouldParseReplicationQuotaProperties(): Unit = {
val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null)
val props: Properties = new Properties()
//Given
props.put(LeaderReplicationThrottledReplicasProp, "0:101,0:102,1:101,1:102")
//When/Then
assertEquals(Seq(0,1), configHandler.parseThrottledPartitions(props, 102, LeaderReplicationThrottledReplicasProp))
assertEquals(Seq(), configHandler.parseThrottledPartitions(props, 103, LeaderReplicationThrottledReplicasProp))
}
@Test
def shouldParseWildcardReplicationQuotaProperties(): Unit = {
val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null)
val props: Properties = new Properties()
//Given
props.put(LeaderReplicationThrottledReplicasProp, "*")
//When
val result = configHandler.parseThrottledPartitions(props, 102, LeaderReplicationThrottledReplicasProp)
//Then
assertEquals(AllReplicas, result)
}
@Test
def shouldParseReplicationQuotaReset(): Unit = {
val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null)
val props: Properties = new Properties()
//Given
props.put(FollowerReplicationThrottledReplicasProp, "")
//When
val result = configHandler.parseThrottledPartitions(props, 102, FollowerReplicationThrottledReplicasProp)
//Then
assertEquals(Seq(), result)
}
@Test
def shouldParseRegardlessOfWhitespaceAroundValues() {
val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null)
assertEquals(AllReplicas, parse(configHandler, "* "))
assertEquals(Seq(), parse(configHandler, " "))
assertEquals(Seq(6), parse(configHandler, "6:102"))
assertEquals(Seq(6), parse(configHandler, "6:102 "))
assertEquals(Seq(6), parse(configHandler, " 6:102"))
}
def parse(configHandler: TopicConfigHandler, value: String): Seq[Int] = {
configHandler.parseThrottledPartitions(CoreUtils.propsWith(LeaderReplicationThrottledReplicasProp, value), 102, LeaderReplicationThrottledReplicasProp)
}
}
| ErikKringen/kafka | core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala | Scala | apache-2.0 | 12,071 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.compression
import java.io.ByteArrayInputStream
import java.util.Arrays
import java.util.zip.{Deflater, DeflaterInputStream, Inflater, InflaterInputStream}
import monix.reactive.Observable
import scala.annotation.tailrec
trait DeflateTestUtils extends CompressionTestData {
val inflateRandomExampleThatFailed: Array[Byte] =
Array(100, 96, 2, 14, 108, -122, 110, -37, 35, -11, -10, 14, 47, 30, 43, 111, -80, 44, -34, 35, 35, 37, -103).map(
_.toByte
)
def deflatedStream(bytes: Array[Byte], chunkSize: Int = 32 * 1024) =
deflatedWith(bytes, new Deflater(), chunkSize)
def noWrapDeflatedStream(bytes: Array[Byte], chunkSize: Int = 32 * 1024) =
deflatedWith(bytes, new Deflater(9, true), chunkSize)
def jdkDeflate(bytes: Array[Byte], deflater: Deflater): Array[Byte] = {
val bigBuffer = new Array[Byte](1024 * 1024)
val dif = new DeflaterInputStream(new ByteArrayInputStream(bytes), deflater)
val read = dif.read(bigBuffer, 0, bigBuffer.length)
Arrays.copyOf(bigBuffer, read)
}
def deflatedWith(bytes: Array[Byte], deflater: Deflater, chunkSize: Int = 32 * 1024) = {
val arr = jdkDeflate(bytes, deflater)
Observable
.fromIterable(arr)
.bufferTumbling(chunkSize)
.map(_.toArray)
}
def jdkInflate(bytes: Array[Byte], noWrap: Boolean): Array[Byte] = {
val bigBuffer = new Array[Byte](1024 * 1024)
val inflater = new Inflater(noWrap)
val iif = new InflaterInputStream(
new ByteArrayInputStream(bytes),
inflater
)
@tailrec
def inflate(acc: Array[Byte]): Array[Byte] = {
val read = iif.read(bigBuffer, 0, bigBuffer.length)
if (read <= 0) acc
else inflate(acc ++ bigBuffer.take(read).toList)
}
inflate(Array.emptyByteArray)
}
}
| alexandru/monifu | monix-reactive/jvm/src/test/scala/monix/reactive/compression/DeflateTestUtils.scala | Scala | apache-2.0 | 2,470 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.ws
import java.util.Properties
import java.util.concurrent.TimeUnit
import akka.actor.Actor
import cmwell.common.ExitWithError
import cmwell.common.OffsetsService
import cmwell.ws.Settings.kafkaURL
import com.typesafe.scalalogging.LazyLogging
import k.grid.Grid
import org.apache.kafka.clients.admin.{AdminClient, AdminClientConfig}
import org.apache.kafka.clients.consumer.KafkaConsumer
import org.apache.kafka.common.TopicPartition
import org.joda.time.DateTime
import scala.collection.mutable
import scala.jdk.CollectionConverters._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.{Failure, Success}
/**
* Created by israel on 01/12/2016.
*/
class BGMonitorActor(zkServers: String,
offsetService: OffsetsService,
implicit val ec: ExecutionContext = concurrent.ExecutionContext.Implicits.global)
extends Actor
with LazyLogging {
val kafkaAdminProperties = new Properties
kafkaAdminProperties.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaURL)
val adminClient = AdminClient.create(kafkaAdminProperties)
val topics = Seq("persist_topic", "persist_topic.priority", "index_topic", "index_topic.priority")
val describedTopics = adminClient.describeTopics(topics.asJava).all().get(30, TimeUnit.SECONDS)
val partitionsForTopics: mutable.Map[String, Seq[Int]] = describedTopics.asScala.map {
case (topic, topicPartition) => topic -> (0 until topicPartition.partitions.size)
}
val topicsPartitionsAndGroups = partitionsForTopics.flatMap {
case ("persist_topic", partitions) =>
partitions.map { partition =>
(new TopicPartition("persist_topic", partition), s"imp.$partition")
}
case ("persist_topic.priority", partitions) =>
partitions.map { partition =>
(new TopicPartition("persist_topic.priority", partition), s"imp.p.$partition")
}
case ("index_topic", partitions) =>
partitions.map { partition =>
(new TopicPartition("index_topic", partition), s"indexer.$partition")
}
case ("index_topic.priority", partitions) =>
partitions.map { partition =>
(new TopicPartition("index_topic.priority", partition), s"indexer.p.$partition")
}
case x@(topicName, partition) => logger.error(s"Unexpected topicName: $topicName . Partition: $partition"); ???
}
val topicsPartitionsAndConsumers = topicsPartitionsAndGroups.map {
case (topicPartition, groupId) =>
val kafkaConsumerProps = new Properties()
kafkaConsumerProps.put("bootstrap.servers", kafkaURL)
kafkaConsumerProps.put("group.id", groupId)
kafkaConsumerProps.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer")
kafkaConsumerProps.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer")
kafkaConsumerProps.put("value.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer")
kafkaConsumerProps.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer")
(topicPartition, new KafkaConsumer[Array[Byte], Array[Byte]](kafkaConsumerProps))
}
val topicsPartitions = topicsPartitionsAndConsumers.keys
@volatile var previousOffsetInfo: OffsetsInfo = OffsetsInfo(Map.empty[String, PartitionOffsetsInfo], DateTime.now())
@volatile var currentOffsetInfo: OffsetsInfo = OffsetsInfo(Map.empty[String, PartitionOffsetsInfo], DateTime.now())
@volatile var lastFetchDuration: Long = 30L
import java.util.concurrent.ConcurrentHashMap
val redSince: collection.concurrent.Map[String, Long] = new ConcurrentHashMap[String, Long]().asScala
self ! CalculateOffsetInfo
override def receive: Receive = {
case GetOffsetInfo =>
logger.debug(s"got GetOffsetInfo message returning $currentOffsetInfo")
sender() ! currentOffsetInfo
case CalculateOffsetInfo =>
logger.debug(s"got inner request to generate new offsets info")
generateOffsetsInfo
}
private def generateOffsetsInfo = {
logger.debug(s"generating offsets info")
def calculateOffsetInfo(): Future[(OffsetsInfo, Long)] = {
import concurrent._
Future {
blocking {
val start = System.currentTimeMillis()
val topicPartitionsWriteOffsets =
topicsPartitionsAndConsumers.head._2.endOffsets(topicsPartitions.asJavaCollection)
val partitionsOffsetsInfo: Map[String, PartitionOffsetsInfo] = topicPartitionsWriteOffsets.asScala.map {
case (topicPartition, writeOffset) =>
val streamId = topicPartition.topic() match {
case "persist_topic" => s"imp.${topicPartition.partition()}_offset"
case "persist_topic.priority" => s"imp.${topicPartition.partition()}.p_offset"
case "index_topic" => s"indexer.${topicPartition.partition()}_offset"
case "index_topic.priority" => s"indexer.${topicPartition.partition()}.p_offset"
}
val readOffset = offsetService.read(streamId).getOrElse(0L)
(topicPartition.topic() + topicPartition.partition(),
PartitionOffsetsInfo(topicPartition.topic(), topicPartition.partition(), readOffset, writeOffset))
}.toMap
val end = System.currentTimeMillis()
(OffsetsInfo(partitionsOffsetsInfo, new DateTime()), end - start)
}
}
}
calculateOffsetInfo().onComplete {
case Success((info, duration)) =>
logger.debug(s"calculate offset info successful: \\nInfo:$info\\nDuration:$duration")
previousOffsetInfo = currentOffsetInfo
try {
val partitionsOffsetInfoUpdated = info.partitionsOffsetInfo.map {
case (key, partitionInfo) =>
val readDiff = partitionInfo.readOffset - previousOffsetInfo.partitionsOffsetInfo
.get(key)
.map {
_.readOffset
}
.getOrElse(0L)
val partitionStatus = {
if (readDiff > 0) {
//Remove the bg from red map if was there
logger.debug(s"readDiff > 0. removing ${key}")
redSince.remove(key)
Green
}
else if (partitionInfo.readOffset - partitionInfo.writeOffset == 0) {
//Remove the bg from red map if was there
logger.debug(s"diff == 0. removing ${key}")
redSince.remove(key)
Green
} else if (previousOffsetInfo.partitionsOffsetInfo
.get(key)
.map {
_.partitionStatus
}
.getOrElse(Green) == Green) {
Yellow
} else {
Red
}
}
if (partitionStatus == Red) {
val currentTime = System.currentTimeMillis()
redSince.get(key) match {
case None =>
logger.warn(s"BG status for partition ${key} turned RED")
redSince.putIfAbsent(key, currentTime)
case Some(since) if ((currentTime - since) > 15 * 60 * 1000) =>
logger.error(
s"BG status for partition ${key} is RED for more than 15 minutes. (DISABLED - NOT sending it an exit message)"
)
// Grid.serviceRef(s"BGActor${partitionInfo.partition}") ! ExitWithError
redSince.replace(key, currentTime)
case Some(since) =>
logger.warn(
s"BG for partition ${key} is RED since ${(currentTime - since) / 1000} seconds ago"
)
}
}
key -> partitionInfo.copy(partitionStatus = partitionStatus)
}
currentOffsetInfo = info.copy(partitionsOffsetInfo = partitionsOffsetInfoUpdated)
} catch {
case t: Throwable => logger.error("exception ingesting offset info", t)
}
lastFetchDuration = duration
logger.debug(s"updated currentOffsetInfo: $currentOffsetInfo")
context.system.scheduler
.scheduleOnce(math.max(30000, lastFetchDuration).milliseconds, self, CalculateOffsetInfo)
case Failure(exception) =>
logger.error("failed to calculate offset info", exception)
context.system.scheduler
.scheduleOnce(math.max(30000, lastFetchDuration).milliseconds, self, CalculateOffsetInfo)
}
}
}
object BGMonitorActor {
def serviceName = classOf[BGMonitorActor].getName
}
case object GetOffsetInfo
case object CalculateOffsetInfo
case class OffsetsInfo(partitionsOffsetInfo: Map[String, PartitionOffsetsInfo], timeStamp: DateTime)
trait PartitionStatus
case object Green extends PartitionStatus
case object Yellow extends PartitionStatus
case object Red extends PartitionStatus
case class PartitionOffsetsInfo(topic: String,
partition: Int,
readOffset: Long,
writeOffset: Long,
partitionStatus: PartitionStatus = Green) {
def toShortInfoString =
s"${topic.head}${if (topic.contains(".p")) ".p" else ""}:${writeOffset - readOffset}:${partitionStatus.toString.head}"
}
| dudi3001/CM-Well | server/cmwell-ws/app/BGMonitorActor.scala | Scala | apache-2.0 | 10,098 |
package io.jfc.cursor
import cats.Functor
import io.jfc.{ Context, ContextElement, Cursor, Json }
private[jfc] case class CArray(
focus: Json,
p: Cursor,
u: Boolean,
ls: List[Json],
rs: List[Json]
) extends Cursor {
def context: Context = ContextElement.arrayContext(focus, ls.length) +: p.context
def up: Option[Cursor] = Some {
val j = Json.fromValues(ls.reverse_:::(focus :: rs))
p match {
case CJson(_) => CJson(j)
case CArray(_, pp, v, pls, prs) => CArray(j, pp, u || v, pls, prs)
case CObject(_, pk, pp, v, po) => CObject(j, pk, pp, u || v, if (u) po + (pk, j) else po)
}
}
def delete: Option[Cursor] = Some {
val j = Json.fromValues(ls.reverse_:::(rs))
p match {
case CJson(_) => CJson(j)
case CArray(_, pp, _, pls, prs) => CArray(j, pp, true, pls, prs)
case CObject(_, pk, pp, _, po) => CObject(j, pk, pp, true, po)
}
}
def withFocus(f: Json => Json): Cursor = copy(focus = f(focus), u = true)
def withFocusM[F[_]](f: Json => F[Json])(implicit F: Functor[F]): F[Cursor] =
F.map(f(focus))(j => copy(focus = j, u = true))
override def lefts: Option[List[Json]] = Some(ls)
override def rights: Option[List[Json]] = Some(rs)
override def left: Option[Cursor] = ls match {
case h :: t => Some(CArray(h, p, u, t, focus :: rs))
case Nil => None
}
override def right: Option[Cursor] = rs match {
case h :: t => Some(CArray(h, p, u, focus :: ls, t))
case Nil => None
}
override def first: Option[Cursor] = ls.reverse_:::(focus :: rs) match {
case h :: t => Some(CArray(h, p, u, Nil, t))
case Nil => None
}
override def last: Option[Cursor] = rs.reverse_:::(focus :: ls) match {
case h :: t => Some(CArray(h, p, u, t, Nil))
case Nil => None
}
override def deleteGoLeft: Option[Cursor] = ls match {
case h :: t => Some(CArray(h, p, true, t, rs))
case Nil => None
}
override def deleteGoRight: Option[Cursor] = rs match {
case h :: t => Some(CArray(h, p, true, ls, t))
case Nil => None
}
override def deleteGoFirst: Option[Cursor] = ls.reverse_:::(rs) match {
case h :: t => Some(CArray(h, p, true, Nil, t))
case Nil => None
}
override def deleteGoLast: Option[Cursor] = rs.reverse_:::(ls) match {
case h :: t => Some(CArray(h, p, true, t, Nil))
case Nil => None
}
override def deleteLefts: Option[Cursor] = Some(copy(u = true, ls = Nil))
override def deleteRights: Option[Cursor] = Some(copy(u = true, rs = Nil))
override def setLefts(js: List[Json]): Option[Cursor] = Some(copy(u = true, ls = js))
override def setRights(js: List[Json]): Option[Cursor] = Some(copy(u = true, rs = js))
}
| non/circe | core/src/main/scala/io/jfc/cursor/CArray.scala | Scala | apache-2.0 | 2,698 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package utils
// Java
import java.math.{BigInteger => JBigInteger}
import java.net.URLEncoder
// Jackson
import com.fasterxml.jackson.databind.{
ObjectMapper,
JsonNode
}
// Joda-Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// This project
import utils.{ConversionUtils => CU}
/**
* Contains general purpose extractors and other
* utilities for JSONs. Jackson-based.
*/
object JsonUtils {
type DateTimeFields = Option[Tuple2[NonEmptyList[String], DateTimeFormatter]]
private lazy val Mapper = new ObjectMapper
// Defines the maximalist JSON Schema-compatible date-time format
private val JsonSchemaDateTimeFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZone(DateTimeZone.UTC)
/**
* Decodes a URL-encoded String then validates
* it as correct JSON.
*/
val extractUrlEncJson: (Int, String, String, String) => Validation[String, String] = (maxLength, enc, field, str) =>
CU.decodeString(enc, field, str).flatMap(json => validateAndReformatJson(maxLength, field, json))
/**
* Decodes a Base64 (URL safe)-encoded String then
* validates it as correct JSON.
*/
val extractBase64EncJson: (Int, String, String) => Validation[String, String] = (maxLength, field, str) =>
CU.decodeBase64Url(field, str).flatMap(json => validateAndReformatJson(maxLength, field, json))
/**
* Converts a Joda DateTime into
* a JSON Schema-compatible date-time string.
*
* @param datetime The Joda DateTime
* to convert to a timestamp String
* @return the timestamp String
*/
private[utils] def toJsonSchemaDateTime(dateTime: DateTime): String = JsonSchemaDateTimeFormat.print(dateTime)
/**
* Converts a boolean-like String of value "true"
* or "false" to a JBool value of true or false
* respectively. Any other value becomes a
* JString.
*
* No erroring if the String is not boolean-like,
* leave it to eventual JSON Schema validation
* to enforce that.
*
* @param str The boolean-like String to convert
* @return true for "true", false for "false",
* and otherwise a JString wrapping the
* original String
*/
private[utils] def booleanToJValue(str: String): JValue = str match {
case "true" => JBool(true)
case "false" => JBool(false)
case _ => JString(str)
}
/**
* Converts an integer-like String to a
* JInt value. Any other value becomes a
* JString.
*
* No erroring if the String is not integer-like,
* leave it to eventual JSON Schema validation
* to enforce that.
*
* @param str The integer-like String to convert
* @return a JInt if the String was integer-like,
* or else a JString wrapping the original
* String.
*/
private[utils] def integerToJValue(str: String): JValue =
try {
JInt(new JBigInteger(str))
} catch {
case nfe: NumberFormatException =>
JString(str)
}
/**
* Reformats a non-standard date-time into a format
* compatible with JSON Schema's date-time format
* validation. If the String does not match the
* expected date format, then return the original String.
*
* @param str The date-time-like String to reformat
* to pass JSON Schema validation
* @return the reformatted date-time String if
* possible, or otherwise the original String
*/
def toJsonSchemaDateTime(str: String, fromFormat: DateTimeFormatter): String =
try {
val dt = DateTime.parse(str, fromFormat)
toJsonSchemaDateTime(dt)
} catch {
case iae: IllegalArgumentException => str
}
/**
* Converts an incoming key, value into a json4s JValue.
* Uses the lists of keys which should contain bools,
* ints and dates to apply specific processing to
* those values when found.
*
* @param key The key of the field to generate. Also used
* to determine what additional processing should
* be applied to the value
* @param value The value of the field
* @param bools A List of keys whose values should be
* processed as boolean-like Strings
* @param ints A List of keys whose values should be
* processed as integer-like Strings
* @param dates If Some, a NEL of keys whose values should
* be treated as date-time-like Strings, which will
* require processing from the specified format
* @return a JField, containing the original key and the
* processed String, now as a JValue
*/
def toJField(key: String, value: String, bools: List[String], ints: List[String],
dateTimes: DateTimeFields): JField = {
val v = (value, dateTimes) match {
case ("", _) => JNull
case _ if bools.contains(key) => booleanToJValue(value)
case _ if ints.contains(key) => integerToJValue(value)
case (_, Some((nel, fmt)))
if nel.toList.contains(key) => JString(toJsonSchemaDateTime(value, fmt))
case _ => JString(value)
}
(key, v)
}
/**
* Validates and reformats a JSON:
* 1. Checks the JSON is valid
* 2. Reformats, including removing unnecessary whitespace
* 3. Checks if reformatted JSON is <= maxLength, because
* a truncated JSON causes chaos in Redshift et al
*
* @param field the name of the field containing the JSON
* @param str the String hopefully containing JSON
* @param maxLength the maximum allowed length for this
* JSON when reformatted
* @return a Scalaz Validation, wrapping either an error
* String or the reformatted JSON String
*/
private[utils] def validateAndReformatJson(maxLength: Int, field: String, str: String): Validation[String, String] =
extractJson(field, str)
.map(j => compact(fromJsonNode(j)))
.flatMap(j => if (j.length > maxLength) {
"Field [%s]: reformatted JSON length [%s] exceeds maximum allowed length [%s]".format(field, j.length, maxLength).fail
} else j.success)
/**
* Converts a JSON string into a Validation[String, JsonNode]
*
* @param field The name of the field containing JSON
* @param instance The JSON string to parse
* @return a Scalaz Validation, wrapping either an error
* String or the extracted JsonNode
*/
def extractJson(field: String, instance: String): Validation[String, JsonNode] =
try {
Mapper.readTree(instance).success
} catch {
case e: Throwable => s"Field [$field]: invalid JSON [$instance] with parsing error: ${stripInstanceEtc(e.getMessage)}".fail
}
/**
* Converts a JSON string into a JsonNode.
*
* UNSAFE - only use it for Strings you have
* created yourself. Use extractJson for all
* external Strings.
*
* @param instance The JSON string to parse
* @return the extracted JsonNode
*/
def unsafeExtractJson(instance: String): JsonNode =
Mapper.readTree(instance)
/**
* Strips the instance information from a Jackson
* parsing exception message:
*
* "... at [Source: java.io.StringReader@1fe7a8f8; line: 1, column: 2]""
* ^^^^^^^^
*
* Also removes any control characters and replaces
* tabs with 4 spaces.
*
* @param message The exception message which needs
* tidying up
* @return the same exception message, but with
* instance information etc removed
*/
def stripInstanceEtc(message: String): String = {
message
.replaceAll("@[0-9a-z]+;", "@xxxxxx;")
.replaceAll("\\\\t", " ")
.replaceAll("\\\\p{Cntrl}", "") // Any other control character
.trim
}
/**
* Encodes every (key -> value) in the supplied
* JSON Object. Will only attempt to encode values
* of type String.
*
* @param enc The encoding to be used
* @param json The json which we will be encoding
* @return an updated json which has been correctly
* encoded.
*/
def encodeJsonObject(enc: String, json: JValue): JValue =
json transformField {
case (key, JString(value)) => (CU.encodeString(enc, key), JString(CU.encodeString(enc, value)))
}
}
| krahman/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/utils/JsonUtils.scala | Scala | apache-2.0 | 9,093 |
/*
* RobolabSim
* Copyright (C) 2014 Max Leuthaeuser
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*/
package tud.robolab
/**
* All classes and objects that are important for utilizing tests for the
* users solution (path, history, etc.)
*/
package object testing
{
}
| max-leuthaeuser/RobolabSim | server/src/main/scala/tud/robolab/testing/package.scala | Scala | gpl-3.0 | 880 |
package lila.learn
import org.joda.time.DateTime
case class LearnProgress(
_id: LearnProgress.Id,
stages: Map[String, StageProgress],
createdAt: DateTime,
updatedAt: DateTime
) {
def id = _id
def withScore(stage: String, level: Int, s: StageProgress.Score) =
copy(
stages = stages + (
stage -> stages.getOrElse(stage, StageProgress.empty).withScore(level, s)
),
updatedAt = DateTime.now
)
}
object LearnProgress {
case class Id(value: String) extends AnyVal
def empty(id: Id) =
LearnProgress(
_id = id,
stages = Map.empty,
createdAt = DateTime.now,
updatedAt = DateTime.now
)
}
| luanlv/lila | modules/learn/src/main/LearnProgress.scala | Scala | mit | 675 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.carbondata.restructure.rowreader
import java.math.{BigDecimal, RoundingMode}
import org.apache.spark.sql.Row
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.util.CarbonProperties
class DropColumnTestCases extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sqlContext.setConf("carbon.enable.vector.reader", "false")
sql("DROP TABLE IF EXISTS dropcolumntest")
sql("drop table if exists hivetable")
}
test("test drop column and insert into hive table") {
beforeAll
sql(
"CREATE TABLE dropcolumntest(intField int,stringField string,charField string," +
"timestampField timestamp,decimalField decimal(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE dropcolumntest" +
s" options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
sql("Alter table dropcolumntest drop columns(charField)")
sql(
"CREATE TABLE hivetable(intField int,stringField string,timestampField timestamp," +
"decimalField decimal(6,2)) stored as parquet")
sql("insert into table hivetable select * from dropcolumntest")
checkAnswer(sql("select * from hivetable"), sql("select * from dropcolumntest"))
afterAll
}
test("test drop column and load data") {
beforeAll
sql(
"CREATE TABLE dropcolumntest(intField int,stringField string,charField string," +
"timestampField timestamp,decimalField decimal(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE dropcolumntest" +
s" options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
sql("Alter table dropcolumntest drop columns(charField)")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data4.csv' INTO TABLE dropcolumntest" +
s" options('FILEHEADER'='intField,stringField,timestampField,decimalField')")
checkAnswer(sql("select count(*) from dropcolumntest"), Row(2))
afterAll
}
test("test drop column and compaction") {
beforeAll
sql(
"CREATE TABLE dropcolumntest(intField int,stringField string,charField string," +
"timestampField timestamp,decimalField decimal(6,2)) STORED BY 'carbondata'")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data1.csv' INTO TABLE dropcolumntest" +
s" options('FILEHEADER'='intField,stringField,charField,timestampField,decimalField')")
sql("Alter table dropcolumntest drop columns(charField)")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/restructure/data4.csv' INTO TABLE dropcolumntest" +
s" options('FILEHEADER'='intField,stringField,timestampField,decimalField')")
sql("alter table dropcolumntest compact 'major'")
checkExistence(sql("show segments for table dropcolumntest"), true, "0Compacted")
checkExistence(sql("show segments for table dropcolumntest"), true, "1Compacted")
checkExistence(sql("show segments for table dropcolumntest"), true, "0.1Success")
afterAll
}
override def afterAll {
sql("DROP TABLE IF EXISTS dropcolumntest")
sql("drop table if exists hivetable")
}
}
| Sephiroth-Lin/incubator-carbondata | integration/spark2/src/test/scala/org/apache/spark/carbondata/restructure/rowreader/DropColumnTestCases.scala | Scala | apache-2.0 | 4,080 |
package com.lucius.kafka.utils
/**
* 该类中定义的是跟项目的业务无关的一些共用方法。这些方法放入到DateUtils和FileUtils中是不合适的。
* 这些方法必须具有通用性。自己能够用到的,并且其他同事也可能用到的方法。且在未来的项目中也能使用。
*/
object Utils {
/**
* Convert Seq to Tuple
* you can use this method to convert List to Tuple,or Array to Tuple, etc.
* It's worth noting that: toTuple(List(111, 222)) is Error, because Int is not the subclass of Object.
* but toTuple(List[Integer](111, 222)) is ok
*/
def toProduct[A <: Object](seq: Seq[A]) =
Class.forName("scala.Tuple" + seq.size).getConstructors.apply(0).newInstance(seq: _*).asInstanceOf[Product]
/**
* Use trim() method for every element of Iterable,and return the result
*/
def trimIterable[A <: Iterable[String]](iterable: A): A = {
iterable.map(_.trim).asInstanceOf[A]
}
def trimTuple(x: Product) = toProduct((for (e <- x.productIterator) yield {
e.toString.trim
}).toList)
/**
* 保留小数位数
*/
def retainDecimal(number: Double, bits: Int = 2): Double = {
BigDecimal(number).setScale(bits, BigDecimal.RoundingMode.HALF_UP).doubleValue()
}
}
| zj-lingxin/kafka-fw | src/main/scala/com/lucius/kafka/utils/Utils.scala | Scala | mit | 1,267 |
package dotty.tools
package dotc
package typer
import core._
import ast.{Trees, untpd, tpd, TreeInfo}
import util.Positions._
import util.Stats.track
import Trees.Untyped
import Mode.ImplicitsEnabled
import Contexts._
import Flags._
import Denotations._
import NameOps._
import Symbols._
import Types._
import Decorators._
import ErrorReporting._
import Trees._
import Names._
import StdNames._
import ProtoTypes._
import EtaExpansion._
import collection.mutable
import config.Printers._
import TypeApplications._
import language.implicitConversions
object Applications {
import tpd._
private val isNamedArg = (arg: Any) => arg.isInstanceOf[Trees.NamedArg[_]]
def hasNamedArg(args: List[Any]) = args exists isNamedArg
def extractorMemberType(tp: Type, name: Name, errorPos: Position = NoPosition)(implicit ctx:Context) = {
val ref = tp member name
if (ref.isOverloaded)
errorType(i"Overloaded reference to $ref is not allowed in extractor", errorPos)
else if (ref.info.isInstanceOf[PolyType])
errorType(i"Reference to polymorphic $ref: ${ref.info} is not allowed in extractor", errorPos)
else
ref.info.widenExpr.dealias
}
def productSelectorTypes(tp: Type, errorPos: Position = NoPosition)(implicit ctx:Context): List[Type] = {
val sels = for (n <- Iterator.from(0)) yield extractorMemberType(tp, nme.selectorName(n), errorPos)
sels.takeWhile(_.exists).toList
}
def productSelectors(tp: Type)(implicit ctx:Context): List[Symbol] = {
val sels = for (n <- Iterator.from(0)) yield tp.member(nme.selectorName(n)).symbol
sels.takeWhile(_.exists).toList
}
def getUnapplySelectors(tp: Type, args: List[untpd.Tree], pos: Position = NoPosition)(implicit ctx: Context): List[Type] =
if (args.length > 1 && !(tp.derivesFrom(defn.SeqClass))) {
val sels = productSelectorTypes(tp, pos)
if (sels.length == args.length) sels
else tp :: Nil
} else tp :: Nil
def unapplyArgs(unapplyResult: Type, unapplyFn:Tree, args:List[untpd.Tree], pos: Position = NoPosition)(implicit ctx: Context): List[Type] = {
def seqSelector = defn.RepeatedParamType.appliedTo(unapplyResult.elemType :: Nil)
def getTp = extractorMemberType(unapplyResult, nme.get, pos)
// println(s"unapply $unapplyResult ${extractorMemberType(unapplyResult, nme.isDefined)}")
if (extractorMemberType(unapplyResult, nme.isDefined, pos) isRef defn.BooleanClass) {
if (getTp.exists)
if (unapplyFn.symbol.name == nme.unapplySeq) {
val seqArg = boundsToHi(getTp.firstBaseArgInfo(defn.SeqClass))
if (seqArg.exists) return args map Function.const(seqArg)
}
else return getUnapplySelectors(getTp, args, pos)
else if (defn.isProductSubType(unapplyResult)) return productSelectorTypes(unapplyResult, pos)
}
if (unapplyResult derivesFrom defn.SeqClass) seqSelector :: Nil
else if (unapplyResult isRef defn.BooleanClass) Nil
else {
ctx.error(i"$unapplyResult is not a valid result type of an unapply method of an extractor", pos)
Nil
}
}
def wrapDefs(defs: mutable.ListBuffer[Tree], tree: Tree)(implicit ctx: Context): Tree =
if (defs != null && defs.nonEmpty) tpd.Block(defs.toList, tree) else tree
}
import Applications._
trait Applications extends Compatibility { self: Typer =>
import Applications._
import tpd.{ cpy => _, _ }
import untpd.cpy
/** @tparam Arg the type of arguments, could be tpd.Tree, untpd.Tree, or Type
* @param methRef the reference to the method of the application
* @param funType the type of the function part of the application
* @param args the arguments of the application
* @param resultType the expected result type of the application
*/
abstract class Application[Arg](methRef: TermRef, funType: Type, args: List[Arg], resultType: Type)(implicit ctx: Context) {
/** The type of typed arguments: either tpd.Tree or Type */
type TypedArg
/** Given an original argument and the type of the corresponding formal
* parameter, produce a typed argument.
*/
protected def typedArg(arg: Arg, formal: Type): TypedArg
/** Turn a typed tree into an argument */
protected def treeToArg(arg: Tree): Arg
/** Check that argument corresponds to type `formal` and
* possibly add it to the list of adapted arguments
*/
protected def addArg(arg: TypedArg, formal: Type): Unit
/** Is this an argument of the form `expr: _*` or a RepeatedParamType
* derived from such an argument?
*/
protected def isVarArg(arg: Arg): Boolean
/** If constructing trees, turn last `n` processed arguments into a
* `SeqLiteral` tree with element type `elemFormal`.
*/
protected def makeVarArg(n: Int, elemFormal: Type): Unit
/** If all `args` have primitive numeric types, make sure it's the same one */
protected def harmonizeArgs(args: List[TypedArg]): List[TypedArg]
/** Signal failure with given message at position of given argument */
protected def fail(msg: => String, arg: Arg): Unit
/** Signal failure with given message at position of the application itself */
protected def fail(msg: => String): Unit
protected def appPos: Position
/** The current function part, which might be affected by lifting.
*/
protected def normalizedFun: Tree
/** If constructing trees, pull out all parts of the function
* which are not idempotent into separate prefix definitions
*/
protected def liftFun(): Unit = ()
/** A flag signalling that the typechecking the application was so far successful */
private[this] var _ok = true
def ok = _ok
def ok_=(x: Boolean) = {
assert(x || ctx.errorsReported || !ctx.typerState.isCommittable) // !!! DEBUG
_ok = x
}
/** The function's type after widening and instantiating polytypes
* with polyparams in constraint set
*/
val methType = funType.widen match {
case funType: MethodType => funType
case funType: PolyType => constrained(funType).resultType
case tp => tp //was: funType
}
/** The arguments re-ordered so that each named argument matches the
* same-named formal parameter.
*/
lazy val orderedArgs =
if (hasNamedArg(args))
reorder(args.asInstanceOf[List[untpd.Tree]]).asInstanceOf[List[Arg]]
else
args
protected def init() = methType match {
case methType: MethodType =>
// apply the result type constraint, unless method type is dependent
if (!methType.isDependent) {
val savedConstraint = ctx.typerState.constraint
if (!constrainResult(methType.resultType, resultType))
if (ctx.typerState.isCommittable)
// defer the problem until after the application;
// it might be healed by an implicit conversion
assert(ctx.typerState.constraint eq savedConstraint)
else
fail(err.typeMismatchStr(methType.resultType, resultType))
}
// match all arguments with corresponding formal parameters
matchArgs(orderedArgs, methType.paramTypes, 0)
case _ =>
if (methType.isError) ok = false
else fail(s"$methString does not take parameters")
}
/** The application was successful */
def success = ok
protected def methodType = methType.asInstanceOf[MethodType]
private def methString: String = i"${methRef.symbol}: ${methType.show}"
/** Re-order arguments to correctly align named arguments */
def reorder[T >: Untyped](args: List[Trees.Tree[T]]): List[Trees.Tree[T]] = {
/** @param pnames The list of parameter names that are missing arguments
* @param args The list of arguments that are not yet passed, or that are waiting to be dropped
* @param nameToArg A map from as yet unseen names to named arguments
* @param toDrop A set of names that have already be passed as named arguments
*
* For a well-typed application we have the invariants
*
* 1. `(args diff toDrop)` can be reordered to match `pnames`
* 2. For every `(name -> arg)` in `nameToArg`, `arg` is an element of `args`
*/
def recur(pnames: List[Name], args: List[Trees.Tree[T]],
nameToArg: Map[Name, Trees.NamedArg[T]], toDrop: Set[Name]): List[Trees.Tree[T]] = pnames match {
case pname :: pnames1 if nameToArg contains pname =>
// there is a named argument for this parameter; pick it
nameToArg(pname) :: recur(pnames1, args, nameToArg - pname, toDrop + pname)
case _ =>
def pnamesRest = if (pnames.isEmpty) pnames else pnames.tail
args match {
case (arg @ NamedArg(aname, _)) :: args1 =>
if (toDrop contains aname) // argument is already passed
recur(pnames, args1, nameToArg, toDrop - aname)
else if ((nameToArg contains aname) && pnames.nonEmpty) // argument is missing, pass an empty tree
genericEmptyTree :: recur(pnames.tail, args, nameToArg, toDrop)
else { // name not (or no longer) available for named arg
def msg =
if (methodType.paramNames contains aname)
s"parameter $aname of $methString is already instantiated"
else
s"$methString does not have a parameter $aname"
fail(msg, arg.asInstanceOf[Arg])
arg :: recur(pnamesRest, args1, nameToArg, toDrop)
}
case arg :: args1 =>
arg :: recur(pnamesRest, args1, nameToArg, toDrop) // unnamed argument; pick it
case Nil => // no more args, continue to pick up any preceding named args
if (pnames.isEmpty) Nil
else recur(pnamesRest, args, nameToArg, toDrop)
}
}
val nameAssocs = for (arg @ NamedArg(name, _) <- args) yield (name, arg)
recur(methodType.paramNames, args, nameAssocs.toMap, Set())
}
/** Splice new method reference into existing application */
def spliceMeth(meth: Tree, app: Tree): Tree = app match {
case Apply(fn, args) => Apply(spliceMeth(meth, fn), args)
case TypeApply(fn, targs) => TypeApply(spliceMeth(meth, fn), targs)
case _ => meth
}
/** Find reference to default parameter getter for parameter #n in current
* parameter list, or NoType if none was found
*/
def findDefaultGetter(n: Int)(implicit ctx: Context): Tree = {
val meth = methRef.symbol.asTerm
val receiver: Tree = methPart(normalizedFun) match {
case Select(receiver, _) => receiver
case mr => mr.tpe.normalizedPrefix match {
case mr: TermRef => ref(mr)
case _ => EmptyTree
}
}
val getterPrefix =
if ((meth is Synthetic) && meth.name == nme.apply) nme.CONSTRUCTOR else meth.name
def getterName = getterPrefix.defaultGetterName(n)
if (!meth.hasDefaultParams)
EmptyTree
else if (receiver.isEmpty) {
def findGetter(cx: Context): Tree = {
if (cx eq NoContext) EmptyTree
else if (cx.scope != cx.outer.scope &&
cx.denotNamed(meth.name).hasAltWith(_.symbol == meth)) {
val denot = cx.denotNamed(getterName)
assert(denot.exists, s"non-existent getter denotation ($denot) for getter($getterName)")
ref(TermRef(cx.owner.thisType, getterName, denot))
} else findGetter(cx.outer)
}
findGetter(ctx)
}
else {
def selectGetter(qual: Tree): Tree = {
val getterDenot = qual.tpe.member(getterName)
if (getterDenot.exists) qual.select(TermRef(qual.tpe, getterName, getterDenot))
else EmptyTree
}
if (!meth.isClassConstructor)
selectGetter(receiver)
else {
// default getters for class constructors are found in the companion object
val cls = meth.owner
val companion = cls.companionModule
receiver.tpe.baseTypeRef(cls) match {
case tp: TypeRef if companion.isTerm =>
selectGetter(ref(TermRef(tp.prefix, companion.asTerm)))
case _ =>
EmptyTree
}
}
}
}
/** Match re-ordered arguments against formal parameters
* @param n The position of the first parameter in formals in `methType`.
*/
def matchArgs(args: List[Arg], formals: List[Type], n: Int): Unit = {
if (success) formals match {
case formal :: formals1 =>
def addTyped(arg: Arg, formal: Type) =
addArg(typedArg(arg, formal), formal)
def missingArg(n: Int): Unit = {
val pname = methodType.paramNames(n)
fail(
if (pname contains '$') s"not enough arguments for $methString"
else s"missing argument for parameter $pname of $methString")
}
def tryDefault(n: Int, args1: List[Arg]): Unit = {
liftFun()
val getter = findDefaultGetter(n + numArgs(normalizedFun))
if (getter.isEmpty) missingArg(n)
else {
addTyped(treeToArg(spliceMeth(getter withPos appPos, normalizedFun)), formal)
matchArgs(args1, formals1, n + 1)
}
}
if (formal.isRepeatedParam)
args match {
case arg :: Nil if isVarArg(arg) =>
addTyped(arg, formal)
case _ =>
val elemFormal = formal.widenExpr.argTypesLo.head
val origConstraint = ctx.typerState.constraint
var typedArgs = args.map(typedArg(_, elemFormal))
val harmonizedArgs = harmonizeArgs(typedArgs)
if (harmonizedArgs ne typedArgs) {
ctx.typerState.constraint = origConstraint
typedArgs = harmonizedArgs
}
typedArgs.foreach(addArg(_, elemFormal))
makeVarArg(args.length, elemFormal)
}
else args match {
case EmptyTree :: args1 =>
tryDefault(n, args1)
case arg :: args1 =>
addTyped(arg, formal)
matchArgs(args1, formals1, n + 1)
case nil =>
tryDefault(n, args)
}
case nil =>
args match {
case arg :: args1 => fail(s"too many arguments for $methString", arg)
case nil =>
}
}
}
}
/** Subclass of Application for the cases where we are interested only
* in a "can/cannot apply" answer, without needing to construct trees or
* issue error messages.
*/
abstract class TestApplication[Arg](methRef: TermRef, funType: Type, args: List[Arg], resultType: Type)(implicit ctx: Context)
extends Application[Arg](methRef, funType, args, resultType) {
type TypedArg = Arg
type Result = Unit
/** The type of the given argument */
protected def argType(arg: Arg, formal: Type): Type
def typedArg(arg: Arg, formal: Type): Arg = arg
def addArg(arg: TypedArg, formal: Type) =
ok = ok & isCompatible(argType(arg, formal), formal)
def makeVarArg(n: Int, elemFormal: Type) = {}
def fail(msg: => String, arg: Arg) =
ok = false
def fail(msg: => String) =
ok = false
def appPos = NoPosition
lazy val normalizedFun = ref(methRef)
init()
}
/** Subclass of Application for applicability tests with type arguments and value
* argument trees.
*/
class ApplicableToTrees(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context)
extends TestApplication(methRef, methRef.widen.appliedTo(targs), args, resultType) {
def argType(arg: Tree, formal: Type): Type = normalize(arg.tpe, formal)
def treeToArg(arg: Tree): Tree = arg
def isVarArg(arg: Tree): Boolean = tpd.isWildcardStarArg(arg)
def harmonizeArgs(args: List[Tree]) = harmonize(args)
}
/** Subclass of Application for applicability tests with type arguments and value
* argument trees.
*/
class ApplicableToTreesDirectly(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context) extends ApplicableToTrees(methRef, targs, args, resultType)(ctx) {
override def addArg(arg: TypedArg, formal: Type) =
ok = ok & (argType(arg, formal) <:< formal)
}
/** Subclass of Application for applicability tests with value argument types. */
class ApplicableToTypes(methRef: TermRef, args: List[Type], resultType: Type)(implicit ctx: Context)
extends TestApplication(methRef, methRef, args, resultType) {
def argType(arg: Type, formal: Type): Type = arg
def treeToArg(arg: Tree): Type = arg.tpe
def isVarArg(arg: Type): Boolean = arg.isRepeatedParam
def harmonizeArgs(args: List[Type]) = harmonizeTypes(args)
}
/** Subclass of Application for type checking an Apply node, where
* types of arguments are either known or unknown.
*/
abstract class TypedApply[T >: Untyped](
app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Trees.Tree[T]], resultType: Type)(implicit ctx: Context)
extends Application(methRef, fun.tpe, args, resultType) {
type TypedArg = Tree
def isVarArg(arg: Trees.Tree[T]): Boolean = untpd.isWildcardStarArg(arg)
private var typedArgBuf = new mutable.ListBuffer[Tree]
private var liftedDefs: mutable.ListBuffer[Tree] = null
private var myNormalizedFun: Tree = fun
init()
def addArg(arg: Tree, formal: Type): Unit =
typedArgBuf += adaptInterpolated(arg, formal.widenExpr, EmptyTree)
def makeVarArg(n: Int, elemFormal: Type): Unit = {
val args = typedArgBuf.takeRight(n).toList
typedArgBuf.trimEnd(n)
val seqLit = if (methodType.isJava) JavaSeqLiteral(args) else SeqLiteral(args)
typedArgBuf += seqToRepeated(seqLit)
}
def harmonizeArgs(args: List[TypedArg]) = harmonize(args)
override def appPos = app.pos
def fail(msg: => String, arg: Trees.Tree[T]) = {
ctx.error(msg, arg.pos)
ok = false
}
def fail(msg: => String) = {
ctx.error(msg, app.pos)
ok = false
}
def normalizedFun = myNormalizedFun
override def liftFun(): Unit =
if (liftedDefs == null) {
liftedDefs = new mutable.ListBuffer[Tree]
myNormalizedFun = liftApp(liftedDefs, myNormalizedFun)
}
/** The index of the first difference between lists of trees `xs` and `ys`,
* where `EmptyTree`s in the second list are skipped.
* -1 if there are no differences.
*/
private def firstDiff[T <: Trees.Tree[_]](xs: List[T], ys: List[T], n: Int = 0): Int = xs match {
case x :: xs1 =>
ys match {
case EmptyTree :: ys1 => firstDiff(xs1, ys1, n)
case y :: ys1 => if (x ne y) n else firstDiff(xs1, ys1, n + 1)
case nil => n
}
case nil =>
ys match {
case EmptyTree :: ys1 => firstDiff(xs, ys1, n)
case y :: ys1 => n
case nil => -1
}
}
private def sameSeq[T <: Trees.Tree[_]](xs: List[T], ys: List[T]): Boolean = firstDiff(xs, ys) < 0
val result = {
var typedArgs = typedArgBuf.toList
def app0 = cpy.Apply(app)(normalizedFun, typedArgs) // needs to be a `def` because typedArgs can change later
val app1 =
if (!success) app0.withType(ErrorType)
else {
if (!sameSeq(args, orderedArgs)) {
// need to lift arguments to maintain evaluation order in the
// presence of argument reorderings.
liftFun()
val eqSuffixLength = firstDiff(app.args.reverse, orderedArgs.reverse)
val (liftable, rest) = typedArgs splitAt (typedArgs.length - eqSuffixLength)
typedArgs = liftArgs(liftedDefs, methType, liftable) ++ rest
}
if (sameSeq(typedArgs, args)) // trick to cut down on tree copying
typedArgs = args.asInstanceOf[List[Tree]]
assignType(app0, normalizedFun, typedArgs)
}
wrapDefs(liftedDefs, app1)
}
}
/** Subclass of Application for type checking an Apply node with untyped arguments. */
class ApplyToUntyped(app: untpd.Apply, fun: Tree, methRef: TermRef, proto: FunProto, resultType: Type)(implicit ctx: Context)
extends TypedApply(app, fun, methRef, proto.args, resultType) {
def typedArg(arg: untpd.Tree, formal: Type): TypedArg = proto.typedArg(arg, formal.widenExpr)
def treeToArg(arg: Tree): untpd.Tree = untpd.TypedSplice(arg)
}
/** Subclass of Application for type checking an Apply node with typed arguments. */
class ApplyToTyped(app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context)
extends TypedApply[Type](app, fun, methRef, args, resultType) {
// Dotty deviation: Dotc infers Untyped for the supercall. This seems to be according to the rules
// (of both Scala and Dotty). Untyped is legal, and a subtype of Typed, whereas TypeApply
// is invariant in the type parameter, so the minimal type should be inferred. But then typedArg does
// not match the abstract method in Application and an abstract class error results.
def typedArg(arg: tpd.Tree, formal: Type): TypedArg = arg
def treeToArg(arg: Tree): Tree = arg
}
def typedApply(tree: untpd.Apply, pt: Type)(implicit ctx: Context): Tree = {
def realApply(implicit ctx: Context): Tree = track("realApply") {
def argCtx(implicit ctx: Context) =
if (untpd.isSelfConstrCall(tree)) ctx.thisCallArgContext else ctx
var proto = new FunProto(tree.args, IgnoredProto(pt), this)(argCtx)
val fun1 = typedExpr(tree.fun, proto)
// Warning: The following line is dirty and fragile. We record that auto-tupling was demanded as
// a side effect in adapt. If it was, we assume the tupled proto-type in the rest of the application.
// This crucially relies on he fact that `proto` is used only in a single call of `adapt`,
// otherwise we would get possible cross-talk between different `adapt` calls using the same
// prototype. A cleaner alternative would be to return a modified prototype from `adapt` together with
// a modified tree but this would be more convoluted and less efficient.
if (proto.isTupled) proto = proto.tupled
methPart(fun1).tpe match {
case funRef: TermRef =>
tryEither { implicit ctx =>
val app =
if (proto.argsAreTyped) new ApplyToTyped(tree, fun1, funRef, proto.typedArgs, pt)
else new ApplyToUntyped(tree, fun1, funRef, proto, pt)(argCtx)
val result = app.result
convertNewArray(ConstFold(result))
} { (failedVal, failedState) =>
val fun2 = tryInsertImplicitOnQualifier(fun1, proto)
if (fun1 eq fun2) {
failedState.commit()
failedVal
} else typedApply(
cpy.Apply(tree)(untpd.TypedSplice(fun2), proto.typedArgs map untpd.TypedSplice), pt)
}
case _ =>
fun1.tpe match {
case ErrorType => tree.withType(ErrorType)
case tp => handleUnexpectedFunType(tree, fun1)
}
}
}
/** Convert expression like
*
* e += (args)
*
* where the lifted-for-assignment version of e is { val xs = es; e' } to
*
* { val xs = es; e' = e' + args }
*/
def typedOpAssign: Tree = track("typedOpAssign") {
val Apply(Select(lhs, name), rhss) = tree
val lhs1 = typedExpr(lhs)
val liftedDefs = new mutable.ListBuffer[Tree]
val lhs2 = untpd.TypedSplice(liftAssigned(liftedDefs, lhs1))
val assign = untpd.Assign(lhs2, untpd.Apply(untpd.Select(lhs2, name.init), rhss))
wrapDefs(liftedDefs, typed(assign))
}
if (untpd.isOpAssign(tree))
tryEither {
implicit ctx => realApply
} { (failedVal, failedState) =>
tryEither {
implicit ctx => typedOpAssign
} { (_, _) =>
failedState.commit()
failedVal
}
}
else realApply
}
/** Overridden in ReTyper to handle primitive operations that can be generated after erasure */
protected def handleUnexpectedFunType(tree: untpd.Apply, fun: Tree)(implicit ctx: Context): Tree =
throw new Error(s"unexpected type.\n fun = $fun,\n methPart(fun) = ${methPart(fun)},\n methPart(fun).tpe = ${methPart(fun).tpe},\n tpe = ${fun.tpe}")
def typedTypeApply(tree: untpd.TypeApply, pt: Type)(implicit ctx: Context): Tree = track("typedTypeApply") {
var typedArgs = tree.args mapconserve (typedType(_))
val typedFn = typedExpr(tree.fun, PolyProto(typedArgs.tpes, pt))
typedFn.tpe.widen match {
case pt: PolyType =>
if (typedArgs.length <= pt.paramBounds.length)
typedArgs = typedArgs.zipWithConserve(pt.paramBounds)(adaptTypeArg)
checkBounds(typedArgs, pt)
case _ =>
}
assignType(cpy.TypeApply(tree)(typedFn, typedArgs), typedFn, typedArgs)
}
def adaptTypeArg(tree: tpd.Tree, bound: Type)(implicit ctx: Context): tpd.Tree =
tree.withType(tree.tpe.EtaExpandIfHK(bound))
/** Rewrite `new Array[T](....)` trees to calls of newXYZArray methods. */
def convertNewArray(tree: tpd.Tree)(implicit ctx: Context): tpd.Tree = tree match {
case Apply(TypeApply(tycon, targ :: Nil), args) if tycon.symbol == defn.ArrayConstructor =>
fullyDefinedType(tree.tpe, "array", tree.pos)
tpd.cpy.Apply(tree)(newArray(targ, tree.pos), args)
case _ =>
tree
}
def typedUnApply(tree: untpd.Apply, selType: Type)(implicit ctx: Context): Tree = track("typedUnApply") {
val Apply(qual, args) = tree
def notAnExtractor(tree: Tree) =
errorTree(tree, s"${qual.show} cannot be used as an extractor in a pattern because it lacks an unapply or unapplySeq method")
/** If this is a term ref tree, try to typecheck with its type name.
* If this refers to a type alias, follow the alias, and if
* one finds a class, reference the class companion module.
*/
def followTypeAlias(tree: untpd.Tree): untpd.Tree = {
tree match {
case tree: untpd.RefTree =>
val ttree = typedType(untpd.rename(tree, tree.name.toTypeName))
ttree.tpe match {
case alias: TypeRef if alias.info.isAlias =>
companionRef(alias) match {
case companion: TermRef => return untpd.ref(companion) withPos tree.pos
case _ =>
}
case _ =>
}
case _ =>
}
untpd.EmptyTree
}
/** A typed qual.unapply or qual.unapplySeq tree, if this typechecks.
* Otherwise fallBack with (maltyped) qual.unapply as argument
* Note: requires special handling for overloaded occurrences of
* unapply or unapplySeq. We first try to find a non-overloaded
* method which matches any type. If that fails, we try to find an
* overloaded variant which matches one of the argument types.
* In fact, overloaded unapply's are problematic because a non-
* overloaded unapply does *not* need to be applicable to its argument
* whereas overloaded variants need to have a conforming variant.
*/
def trySelectUnapply(qual: untpd.Tree)(fallBack: Tree => Tree): Tree = {
val genericProto = new UnapplyFunProto(WildcardType, this)
def specificProto = new UnapplyFunProto(selType, this)
// try first for non-overloaded, then for overloaded ocurrences
def tryWithName(name: TermName)(fallBack: Tree => Tree)(implicit ctx: Context): Tree =
tryEither {
implicit ctx => typedExpr(untpd.Select(qual, name), genericProto)
} {
(sel, _) =>
tryEither {
implicit ctx => typedExpr(untpd.Select(qual, name), specificProto)
} {
(_, _) => fallBack(sel)
}
}
// try first for unapply, then for unapplySeq
tryWithName(nme.unapply) {
sel => tryWithName(nme.unapplySeq)(_ => fallBack(sel)) // for backwards compatibility; will be dropped
}
}
/** Produce a typed qual.unapply or qual.unapplySeq tree, or
* else if this fails follow a type alias and try again.
*/
val unapplyFn = trySelectUnapply(qual) { sel =>
val qual1 = followTypeAlias(qual)
if (qual1.isEmpty) notAnExtractor(sel)
else trySelectUnapply(qual1)(_ => notAnExtractor(sel))
}
def fromScala2x = unapplyFn.symbol.exists && (unapplyFn.symbol.owner is Scala2x)
/** Can `subtp` be made to be a subtype of `tp`, possibly by dropping some
* refinements in `tp`?
*/
def isSubTypeOfParent(subtp: Type, tp: Type)(implicit ctx: Context): Boolean =
if (subtp <:< tp) true
else tp match {
case RefinedType(parent, _) => isSubTypeOfParent(subtp, parent)
case _ => false
}
unapplyFn.tpe.widen match {
case mt: MethodType if mt.paramTypes.length == 1 && !mt.isDependent =>
val m = mt
val unapplyArgType = mt.paramTypes.head
unapp.println(i"unapp arg tpe = $unapplyArgType, pt = $selType")
def wpt = widenForMatchSelector(selType) // needed?
val ownType =
if (selType <:< unapplyArgType) {
//fullyDefinedType(unapplyArgType, "extractor argument", tree.pos)
unapp.println(i"case 1 $unapplyArgType ${ctx.typerState.constraint}")
selType
} else if (isSubTypeOfParent(unapplyArgType, wpt)(ctx.addMode(Mode.GADTflexible))) {
maximizeType(unapplyArgType) match {
case Some(tvar) =>
def msg =
d"""There is no best instantiation of pattern type $unapplyArgType
|that makes it a subtype of selector type $selType.
|Non-variant type variable ${tvar.origin} cannot be uniquely instantiated.""".stripMargin
if (fromScala2x) {
// We can't issue an error here, because in Scala 2, ::[B] is invariant
// whereas List[+T] is covariant. According to the strict rule, a pattern
// match of a List[C] against a case x :: xs is illegal, because
// B cannot be uniquely instantiated. Of course :: should have been
// covariant in the first place, but in the Scala libraries it isn't.
// So for now we allow these kinds of patterns, even though they
// can open unsoundness holes. See SI-7952 for an example of the hole this opens.
if (ctx.settings.verbose.value) ctx.warning(msg, tree.pos)
} else {
unapp.println(s" ${unapplyFn.symbol.owner} ${unapplyFn.symbol.owner is Scala2x}")
ctx.strictWarning(msg, tree.pos)
}
case _ =>
}
unapp.println(i"case 2 $unapplyArgType ${ctx.typerState.constraint}")
unapplyArgType
} else {
unapp.println("Neither sub nor super")
unapp.println(TypeComparer.explained(implicit ctx => unapplyArgType <:< wpt))
errorType(
d"Pattern type $unapplyArgType is neither a subtype nor a supertype of selector type $wpt",
tree.pos)
}
val dummyArg = dummyTreeOfType(unapplyArgType)
val unapplyApp = typedExpr(untpd.TypedSplice(Apply(unapplyFn, dummyArg :: Nil)))
val unapplyImplicits = unapplyApp match {
case Apply(Apply(unapply, `dummyArg` :: Nil), args2) => assert(args2.nonEmpty); args2
case Apply(unapply, `dummyArg` :: Nil) => Nil
}
var argTypes = unapplyArgs(unapplyApp.tpe, unapplyFn, args, tree.pos)
for (argType <- argTypes) assert(!argType.isInstanceOf[TypeBounds], unapplyApp.tpe.show)
val bunchedArgs = argTypes match {
case argType :: Nil =>
if (argType.isRepeatedParam) untpd.SeqLiteral(args) :: Nil
else if (args.lengthCompare(1) > 0 && ctx.canAutoTuple) untpd.Tuple(args) :: Nil
else args
case _ => args
}
if (argTypes.length != bunchedArgs.length) {
ctx.error(d"wrong number of argument patterns for $qual; expected: ($argTypes%, %)", tree.pos)
argTypes = argTypes.take(args.length) ++
List.fill(argTypes.length - args.length)(WildcardType)
}
val unapplyPatterns = (bunchedArgs, argTypes).zipped map (typed(_, _))
val result = assignType(cpy.UnApply(tree)(unapplyFn, unapplyImplicits, unapplyPatterns), ownType)
unapp.println(s"unapply patterns = $unapplyPatterns")
if ((ownType eq selType) || ownType.isError) result
else Typed(result, TypeTree(ownType))
case tp =>
val unapplyErr = if (tp.isError) unapplyFn else notAnExtractor(unapplyFn)
val typedArgsErr = args mapconserve (typed(_, defn.AnyType))
cpy.UnApply(tree)(unapplyErr, Nil, typedArgsErr) withType ErrorType
}
}
/** A typed unapply hook, can be overridden by re any-typers between frontend
* and pattern matcher.
*/
def typedUnApply(tree: untpd.UnApply, selType: Type)(implicit ctx: Context) =
throw new UnsupportedOperationException("cannot type check an UnApply node")
/** Is given method reference applicable to type arguments `targs` and argument trees `args`?
* @param resultType The expected result type of the application
*/
def isApplicable(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context): Boolean = {
val nestedContext = ctx.fresh.setExploreTyperState
new ApplicableToTrees(methRef, targs, args, resultType)(nestedContext).success
}
/** Is given method reference applicable to type arguments `targs` and argument trees `args` without inferring views?
* @param resultType The expected result type of the application
*/
def isDirectlyApplicable(methRef: TermRef, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context): Boolean = {
val nestedContext = ctx.fresh.setExploreTyperState
new ApplicableToTreesDirectly(methRef, targs, args, resultType)(nestedContext).success
}
/** Is given method reference applicable to argument types `args`?
* @param resultType The expected result type of the application
*/
def isApplicable(methRef: TermRef, args: List[Type], resultType: Type)(implicit ctx: Context): Boolean = {
val nestedContext = ctx.fresh.setExploreTyperState
new ApplicableToTypes(methRef, args, resultType)(nestedContext).success
}
/** Is given type applicable to type arguments `targs` and argument trees `args`,
* possibly after inserting an `apply`?
* @param resultType The expected result type of the application
*/
def isApplicable(tp: Type, targs: List[Type], args: List[Tree], resultType: Type)(implicit ctx: Context): Boolean =
onMethod(tp, isApplicable(_, targs, args, resultType))
/** Is given type applicable to argument types `args`, possibly after inserting an `apply`?
* @param resultType The expected result type of the application
*/
def isApplicable(tp: Type, args: List[Type], resultType: Type)(implicit ctx: Context): Boolean =
onMethod(tp, isApplicable(_, args, resultType))
private def onMethod(tp: Type, p: TermRef => Boolean)(implicit ctx: Context): Boolean = tp match {
case methRef: TermRef if methRef.widenSingleton.isInstanceOf[MethodicType] =>
p(methRef)
case mt: MethodicType =>
p(mt.narrow)
case _ =>
tp.member(nme.apply).hasAltWith(d => p(TermRef(tp, nme.apply, d)))
}
/** In a set of overloaded applicable alternatives, is `alt1` at least as good as
* `alt2`? `alt1` and `alt2` are non-overloaded references.
*/
def isAsGood(alt1: TermRef, alt2: TermRef)(implicit ctx: Context): Boolean = track("isAsGood") { ctx.traceIndented(i"isAsGood($alt1, $alt2)", overload) {
assert(alt1 ne alt2)
/** Is class or module class `sym1` derived from class or module class `sym2`?
* Module classes also inherit the relationship from their companions.
*/
def isDerived(sym1: Symbol, sym2: Symbol): Boolean =
if (sym1 isSubClass sym2) true
else if (sym2 is Module) isDerived(sym1, sym2.companionClass)
else (sym1 is Module) && isDerived(sym1.companionClass, sym2)
/** Is alternative `alt1` with type `tp1` as specific as alternative
* `alt2` with type `tp2` ? This is the case if
*
* 1. `tp2` is a method or poly type but `tp1` isn't, or `tp1` is nullary.
* 2. `tp2` and `tp1` are method or poly types and `tp2` can be applied to the parameters of `tp1`.
* 3. Neither `tp1` nor `tp2` are method or poly types and `tp1` is compatible with `tp2`.
*/
def isAsSpecific(alt1: TermRef, tp1: Type, alt2: TermRef, tp2: Type): Boolean = ctx.traceIndented(i"isAsSpecific $tp1 $tp2", overload) { tp1 match {
case tp1: PolyType =>
val tparams = ctx.newTypeParams(alt1.symbol, tp1.paramNames, EmptyFlags, tp1.instantiateBounds)
isAsSpecific(alt1, tp1.instantiate(tparams map (_.typeRef)), alt2, tp2)
case tp1: MethodType =>
def repeatedToSingle(tp: Type): Type = tp match {
case tp @ ExprType(tp1) => tp.derivedExprType(repeatedToSingle(tp1))
case _ => if (tp.isRepeatedParam) tp.argTypesHi.head else tp
}
val formals1 =
if (tp1.isVarArgsMethod && tp2.isVarArgsMethod) tp1.paramTypes map repeatedToSingle
else tp1.paramTypes
isApplicable(alt2, formals1, WildcardType) ||
tp1.paramTypes.isEmpty && tp2.isInstanceOf[MethodOrPoly]
case _ =>
tp2 match {
case tp2: MethodOrPoly => true
case _ => isCompatible(tp1, tp2)
}
}}
/** Drop any implicit parameter section */
def stripImplicit(tp: Type): Type = tp match {
case mt: ImplicitMethodType if !mt.isDependent =>
mt.resultType // todo: make sure implicit method types are not dependent
case pt: PolyType =>
pt.derivedPolyType(pt.paramNames, pt.paramBounds, stripImplicit(pt.resultType))
case _ =>
tp
}
val owner1 = if (alt1.symbol.exists) alt1.symbol.owner else NoSymbol
val owner2 = if (alt2.symbol.exists) alt2.symbol.owner else NoSymbol
val tp1 = stripImplicit(alt1.widen)
val tp2 = stripImplicit(alt2.widen)
def winsOwner1 = isDerived(owner1, owner2)
def winsType1 = isAsSpecific(alt1, tp1, alt2, tp2)
def winsOwner2 = isDerived(owner2, owner1)
def winsType2 = isAsSpecific(alt2, tp2, alt1, tp1)
overload.println(i"isAsGood($alt1, $alt2)? $tp1 $tp2 $winsOwner1 $winsType1 $winsOwner2 $winsType2")
// Assume the following probabilities:
//
// P(winsOwnerX) = 2/3
// P(winsTypeX) = 1/3
//
// Then the call probabilities of the 4 basic operations are as follows:
//
// winsOwner1: 1/1
// winsOwner2: 1/1
// winsType1 : 7/9
// winsType2 : 4/9
if (winsOwner1) /* 6/9 */ !winsOwner2 || /* 4/9 */ winsType1 || /* 8/27 */ !winsType2
else if (winsOwner2) /* 2/9 */ winsType1 && /* 2/27 */ !winsType2
else /* 1/9 */ winsType1 || /* 2/27 */ !winsType2
}}
def narrowMostSpecific(alts: List[TermRef])(implicit ctx: Context): List[TermRef] = track("narrowMostSpecific") {
alts match {
case Nil => alts
case _ :: Nil => alts
case alt :: alts1 =>
def winner(bestSoFar: TermRef, alts: List[TermRef]): TermRef = alts match {
case alt :: alts1 =>
winner(if (isAsGood(alt, bestSoFar)) alt else bestSoFar, alts1)
case nil =>
bestSoFar
}
val best = winner(alt, alts1)
def asGood(alts: List[TermRef]): List[TermRef] = alts match {
case alt :: alts1 =>
if ((alt eq best) || !isAsGood(alt, best)) asGood(alts1)
else alt :: asGood(alts1)
case nil =>
Nil
}
best :: asGood(alts)
}
}
/** Resolve overloaded alternative `alts`, given expected type `pt` and
* possibly also type argument `targs` that need to be applied to each alternative
* to form the method type.
* todo: use techniques like for implicits to pick candidates quickly?
*/
def resolveOverloaded(alts: List[TermRef], pt: Type, targs: List[Type] = Nil)(implicit ctx: Context): List[TermRef] = track("resolveOverloaded") {
def isDetermined(alts: List[TermRef]) = alts.isEmpty || alts.tail.isEmpty
/** The shape of given tree as a type; cannot handle named arguments. */
def typeShape(tree: untpd.Tree): Type = tree match {
case untpd.Function(args, body) =>
defn.FunctionType(args map Function.const(defn.AnyType), typeShape(body))
case _ =>
defn.NothingType
}
/** The shape of given tree as a type; is more expensive than
* typeShape but can can handle named arguments.
*/
def treeShape(tree: untpd.Tree): Tree = tree match {
case NamedArg(name, arg) =>
val argShape = treeShape(arg)
cpy.NamedArg(tree)(name, argShape).withType(argShape.tpe)
case _ =>
dummyTreeOfType(typeShape(tree))
}
def narrowByTypes(alts: List[TermRef], argTypes: List[Type], resultType: Type): List[TermRef] =
alts filter (isApplicable(_, argTypes, resultType))
/** Is `alt` a method or polytype whose result type after the first value parameter
* section conforms to the expected type `resultType`? If `resultType`
* is a `IgnoredProto`, pick the underlying type instead.
*/
def resultConforms(alt: Type, resultType: Type)(implicit ctx: Context): Boolean = resultType match {
case IgnoredProto(ignored) => resultConforms(alt, ignored)
case _: ValueType =>
alt.widen match {
case tp: PolyType => resultConforms(constrained(tp).resultType, resultType)
case tp: MethodType => constrainResult(tp.resultType, resultType)
case _ => true
}
case _ => true
}
/** If the `chosen` alternative has a result type incompatible with the expected result
* type `pt`, run overloading resolution again on all alternatives that do match `pt`.
* If the latter succeeds with a single alternative, return it, otherwise
* fallback to `chosen`.
*
* Note this order of events is done for speed. One might be tempted to
* preselect alternatives by result type. But is slower, because it discriminates
* less. The idea is when searching for a best solution, as is the case in overloading
* resolution, we should first try criteria which are cheap and which have a high
* probability of pruning the search. result type comparisons are neither cheap nor
* do they prune much, on average.
*/
def adaptByResult(alts: List[TermRef], chosen: TermRef) = {
def nestedCtx = ctx.fresh.setExploreTyperState
pt match {
case pt: FunProto if !resultConforms(chosen, pt.resultType)(nestedCtx) =>
alts.filter(alt =>
(alt ne chosen) && resultConforms(alt, pt.resultType)(nestedCtx)) match {
case Nil => chosen
case alt2 :: Nil => alt2
case alts2 =>
resolveOverloaded(alts2, pt) match {
case alt2 :: Nil => alt2
case _ => chosen
}
}
case _ => chosen
}
}
val candidates = pt match {
case pt @ FunProto(args, resultType, _) =>
val numArgs = args.length
val normArgs = args.mapConserve {
case Block(Nil, expr) => expr
case x => x
}
def sizeFits(alt: TermRef, tp: Type): Boolean = tp match {
case tp: PolyType => sizeFits(alt, tp.resultType)
case MethodType(_, ptypes) =>
val numParams = ptypes.length
def isVarArgs = ptypes.nonEmpty && ptypes.last.isRepeatedParam
def hasDefault = alt.symbol.hasDefaultParams
if (numParams == numArgs) true
else if (numParams < numArgs) isVarArgs
else if (numParams > numArgs + 1) hasDefault
else isVarArgs || hasDefault
case _ =>
numArgs == 0
}
def narrowBySize(alts: List[TermRef]): List[TermRef] =
alts filter (alt => sizeFits(alt, alt.widen))
def narrowByShapes(alts: List[TermRef]): List[TermRef] = {
if (normArgs exists (_.isInstanceOf[untpd.Function]))
if (args exists (_.isInstanceOf[Trees.NamedArg[_]]))
narrowByTrees(alts, args map treeShape, resultType)
else
narrowByTypes(alts, normArgs map typeShape, resultType)
else
alts
}
def narrowByTrees(alts: List[TermRef], args: List[Tree], resultType: Type): List[TermRef] =
alts filter ( alt =>
if (!ctx.isAfterTyper) isApplicable(alt, targs, args, resultType)
else isDirectlyApplicable(alt, targs, args, resultType)
)
val alts1 = narrowBySize(alts)
//ctx.log(i"narrowed by size: ${alts1.map(_.symbol.showDcl)}%, %")
if (isDetermined(alts1)) alts1
else {
val alts2 = narrowByShapes(alts1)
//ctx.log(i"narrowed by shape: ${alts1.map(_.symbol.showDcl)}%, %")
if (isDetermined(alts2)) alts2
else narrowByTrees(alts2, pt.typedArgs, resultType)
}
case pt @ PolyProto(targs, pt1) =>
val alts1 = alts filter pt.isMatchedBy
resolveOverloaded(alts1, pt1, targs)
case defn.FunctionType(args, resultType) =>
narrowByTypes(alts, args, resultType)
case pt =>
alts filter (normalizedCompatible(_, pt))
}
narrowMostSpecific(candidates) match {
case Nil => Nil
case alt :: Nil =>
adaptByResult(alts, alt) :: Nil
// why `alts` and not `candidates`? pos/array-overload.scala gives a test case.
// Here, only the Int-apply is a candidate, but it is not compatible with the result
// type. Picking the Byte-apply as the only result-compatible solution then forces
// the arguments (which are constants) to be adapted to Byte. If we had picked
// `candidates` instead, no solution would have been found.
case alts =>
// overload.println(i"ambiguous $alts%, %")
val deepPt = pt.deepenProto
if (deepPt ne pt) resolveOverloaded(alts, deepPt, targs)
else alts
}
}
private def harmonizeWith[T <: AnyRef](ts: List[T])(tpe: T => Type, adapt: (T, Type) => T)(implicit ctx: Context): List[T] = {
def numericClasses(ts: List[T], acc: Set[Symbol]): Set[Symbol] = ts match {
case t :: ts1 =>
val sym = tpe(t).widen.classSymbol
if (sym.isNumericValueClass) numericClasses(ts1, acc + sym)
else Set()
case Nil =>
acc
}
val clss = numericClasses(ts, Set())
if (clss.size > 1) {
val lub = defn.ScalaNumericValueClassList.find(lubCls =>
clss.forall(defn.isValueSubClass(_, lubCls))).get.typeRef
ts.mapConserve(adapt(_, lub))
}
else ts
}
/** If `trees` all have numeric value types, and they do not have all the same type,
* pick a common numeric supertype and convert all trees to this type.
*/
def harmonize(trees: List[Tree])(implicit ctx: Context): List[Tree] = {
def adapt(tree: Tree, pt: Type): Tree = tree match {
case cdef: CaseDef => tpd.cpy.CaseDef(cdef)(body = adapt(cdef.body, pt))
case _ => adaptInterpolated(tree, pt, tree)
}
if (ctx.isAfterTyper) trees else harmonizeWith(trees)(_.tpe, adapt)
}
/** If all `types` are numeric value types, and they are not all the same type,
* pick a common numeric supertype and return it instead of every original type.
*/
def harmonizeTypes(tpes: List[Type])(implicit ctx: Context): List[Type] =
harmonizeWith(tpes)(identity, (tp, pt) => pt)
}
/*
def typedApply(app: untpd.Apply, fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context): Tree = track("typedApply") {
new ApplyToTyped(app, fun, methRef, args, resultType).result
}
def typedApply(fun: Tree, methRef: TermRef, args: List[Tree], resultType: Type)(implicit ctx: Context): Tree =
typedApply(untpd.Apply(untpd.TypedSplice(fun), args), fun, methRef, args, resultType)
*/
| VladimirNik/dotty | src/dotty/tools/dotc/typer/Applications.scala | Scala | bsd-3-clause | 48,811 |
import org.scalatest.{Matchers, FunSuite}
class RobotTest extends FunSuite with Matchers {
test("create") {
val robot = Robot(Bearing.North, (0, 0))
robot.bearing should equal(Bearing.North)
robot.coordinates should equal((0, 0))
}
test("advance - positive") {
Robot(Bearing.North, (0, 0)).advance should equal(Robot(Bearing.North, (0, 1)))
Robot(Bearing.East, (0, 0)).advance should equal(Robot(Bearing.East, (1, 0)))
}
test("advance - negative") {
Robot(Bearing.South, (0, 0)).advance should equal(Robot(Bearing.South, (0, -1)))
Robot(Bearing.West, (0, 0)).advance should equal(Robot(Bearing.West, (-1, 0)))
}
test("turning") {
Robot(Bearing.South, (0, 0)).turnRight should equal(Robot(Bearing.West, (0, 0)))
Robot(Bearing.West, (0, 0)).turnLeft() should equal(Robot(Bearing.South, (0, 0)))
}
test("turning - edge cases") {
Robot(Bearing.West, (0, 0)).turnRight should equal(Robot(Bearing.North, (0, 0)))
Robot(Bearing.North, (0, 0)).turnLeft should equal(Robot(Bearing.West, (0, 0)))
}
test("simulate Seurat") {
val seurat = Robot(Bearing.East, (-2, 1))
val movedSeurat = seurat.simulate("RLAALAL")
movedSeurat should equal(Robot(Bearing.West, (0, 2)))
}
test("simulate Erasmus") {
val erasmus = Robot(Bearing.North, (0, 0))
val movedErasmus = erasmus.simulate("LAAARALA")
movedErasmus should equal(Robot(Bearing.West, (-4, 1)))
}
test("simulate Chirox") {
val chirox = Robot(Bearing.East, (2, -7))
val movedChirox = chirox.simulate("RRAAAAALA")
movedChirox should equal(Robot(Bearing.South, (-3, -8)))
}
test("simulate Awesomo") {
val awesomo = Robot(Bearing.South, (8, 4))
val movedAwesomo = awesomo.simulate("LAAARRRALLLL")
movedAwesomo should equal(Robot(Bearing.North, (11, 5)))
}
}
| nlochschmidt/xscala | robot-simulator/src/test/scala/RobotTest.scala | Scala | mit | 1,829 |
package chat.tox.antox.callbacks
import android.app.{Notification, PendingIntent}
import android.content.{Context, Intent}
import android.preference.PreferenceManager
import android.support.v4.app.NotificationCompat
import android.util.Log
import chat.tox.antox.R
import chat.tox.antox.activities.MainActivity
import chat.tox.antox.data.State
import chat.tox.antox.tox.ToxSingleton
import chat.tox.antox.wrapper.ToxKey
import im.tox.tox4j.core.callbacks.FriendRequestCallback
object AntoxOnFriendRequestCallback {
private val TAG = "chat.tox.antox.TAG"
val FRIEND_KEY = "chat.tox.antox.FRIEND_KEY"
val FRIEND_MESSAGE = "chat.tox.antox.FRIEND_MESSAGE"
}
class AntoxOnFriendRequestCallback(private var ctx: Context) extends FriendRequestCallback[Unit] {
override def friendRequest(keyBytes: Array[Byte], timeDelta: Int, message: Array[Byte])(state: Unit): Unit = {
val db = State.db
val key = new ToxKey(keyBytes)
if (!db.isContactBlocked(key)){
db.addFriendRequest(key, new String(message, "UTF-8"))
}
Log.d("FriendRequestCallback", "")
val preferences = PreferenceManager.getDefaultSharedPreferences(this.ctx)
if (preferences.getBoolean("notifications_enable_notifications", true) &&
preferences.getBoolean("notifications_friend_request", true)) {
val vibratePattern = Array[Long](0, 500)
if (!preferences.getBoolean("notifications_new_message_vibrate", true)) {
vibratePattern(1) = 0
}
val mBuilder = new NotificationCompat.Builder(this.ctx)
.setSmallIcon(R.drawable.ic_actionbar)
.setContentTitle(this.ctx.getString(R.string.friend_request))
.setContentText(new String(message, "UTF-8"))
.setVibrate(vibratePattern)
.setDefaults(Notification.DEFAULT_ALL)
.setAutoCancel(true)
val targetIntent = new Intent(this.ctx, classOf[MainActivity])
val contentIntent = PendingIntent.getActivity(this.ctx, 0, targetIntent, PendingIntent.FLAG_UPDATE_CURRENT)
mBuilder.setContentIntent(contentIntent)
ToxSingleton.mNotificationManager.notify(0, mBuilder.build())
}
}
}
| gale320/Antox | app/src/main/scala/chat/tox/antox/callbacks/AntoxOnFriendRequestCallback.scala | Scala | gpl-3.0 | 2,124 |
package nz.ubermouse.rsbot.tasks
import nz.ubermouse.rsbot.actions.{ActionContainer, Action}
import nz.ubermouse.rsbot._
abstract class Task(val priority: Int) {
def shouldExecute: Boolean
def execute: ActionContainer
}
| UberMouse/RSArchitectureTest | src/nz/ubermouse/rsbot/tasks/Task.scala | Scala | mit | 226 |
package com.twitter.finagle.mux.util
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.FunSuite
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class TagMapTest extends FunSuite with GeneratorDrivenPropertyChecks {
val min = 8
val max = 10000
implicit val genTagSet: Arbitrary[Range] =
Arbitrary(for {
start <- Gen.choose(0, max - min)
end <- Gen.choose(start, max - min)
} yield start to end + min)
test("map tags to elems") {
forAll { range: Range =>
val ints = TagMap[java.lang.Integer](range, 256)
for ((i, j) <- range.zipWithIndex) {
assert(ints.map(-i) == Some(i))
assert(ints.size == j + 1)
}
for (i <- range) assert(ints.unmap(i) == Some(-i))
}
}
test("Respect the limits of the provided Range") {
val range = 0 until 1 // only 0 is available
val ints = TagMap[java.lang.Integer](range, 256)
assert(ints.map(100) == Some(0))
assert(ints.size == 1)
assert(ints.map(101) == None)
assert(ints.size == 1)
}
test("ignore tags outside its range") {
forAll { range: Range =>
val ints = TagMap[java.lang.Integer](range, 256)
val right = range.last + 1
assert(ints.unmap(right) == None)
assert(ints.size == 0)
val left = range.start - 1
assert(ints.unmap(left) == None)
assert(ints.size == 0)
}
}
}
| mkhq/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/util/TagMapTest.scala | Scala | apache-2.0 | 1,386 |
package com.wordnik.client.model
case class Category (
id: Long,
name: String
)
| jfiala/swagger-spring-demo | user-rest-service-1.0.2/generated-code/scalatra/src/main/scala/com/wordnik/client/model/Category.scala | Scala | apache-2.0 | 91 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.onlinetesting.phase2
import services.AuditService
import akka.actor.ActorSystem
import com.google.inject.name.Named
import common.{ FutureEx, Phase2TestConcern }
import config._
import connectors.ExchangeObjects._
import connectors.{ AuthProviderClient, OnlineTestEmailClient, OnlineTestsGatewayClient }
import factories.{ DateTimeFactory, UUIDFactory }
import javax.inject.{ Inject, Singleton }
import model.Exceptions._
import model.OnlineTestCommands._
import model.ProgressStatuses._
import model._
import model.command.{ Phase3ProgressResponse, ProgressResponse }
import model.exchange.{ Phase2TestGroupWithActiveTest, PsiRealTimeResults, PsiTestResultReady }
import model.persisted._
import model.stc.StcEventTypes.StcEventType
import model.stc.{ AuditEvent, AuditEvents, DataStoreEvents }
import org.joda.time.DateTime
import play.api.Logging
import play.api.mvc.RequestHeader
import repositories.application.GeneralApplicationRepository
import repositories.contactdetails.ContactDetailsRepository
import repositories.onlinetesting.Phase2TestRepository
import services.onlinetesting.Exceptions.{ TestCancellationException, TestRegistrationException }
import services.onlinetesting.phase3.Phase3TestService
import services.onlinetesting.{ TextSanitizer, OnlineTestService }
import services.sift.ApplicationSiftService
import services.stc.StcEventService
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.{ Failure, Success, Try }
// scalastyle:off number.of.methods
@Singleton
class Phase2TestService @Inject() (val appRepository: GeneralApplicationRepository,
val cdRepository: ContactDetailsRepository,
val testRepository: Phase2TestRepository,
val onlineTestsGatewayClient: OnlineTestsGatewayClient,
val tokenFactory: UUIDFactory,
val dateTimeFactory: DateTimeFactory,
@Named("Phase2OnlineTestEmailClient") val emailClient: OnlineTestEmailClient,
val auditService: AuditService,
authProvider: AuthProviderClient,
phase3TestService: Phase3TestService,
val siftService: ApplicationSiftService,
appConfig: MicroserviceAppConfig,
val eventService: StcEventService,
actor: ActorSystem
) extends OnlineTestService with Phase2TestConcern with ResetPhase2Test with Logging {
type TestRepository = Phase2TestRepository
val onlineTestsGatewayConfig = appConfig.onlineTestsGatewayConfig
def testConfig: Phase2TestsConfig = onlineTestsGatewayConfig.phase2Tests
case class Phase2TestInviteData(application: OnlineTestApplication, psiTest: PsiTest)
def getTestGroup(applicationId: String): Future[Option[Phase2TestGroupWithActiveTest]] = {
for {
phase2Opt <- testRepository.getTestGroup(applicationId)
} yield phase2Opt.map { phase2 =>
Phase2TestGroupWithActiveTest(
phase2.expirationDate,
phase2.activeTests,
resetAllowed = true
)
}
}
def getTestGroupByOrderId(orderId: String): Future[Option[Phase2TestGroupWithActiveTest]] = {
for {
phase2Opt <- testRepository.getTestGroupByOrderId(orderId)
} yield phase2Opt.map { phase2 =>
Phase2TestGroupWithActiveTest(
phase2.expirationDate,
phase2.activeTests,
resetAllowed = true
)
}
}
def verifyAccessCode(email: String, accessCode: String): Future[String] = for {
userId <- cdRepository.findUserIdByEmail(email)
testGroupOpt <- testRepository.getTestGroupByUserId(userId)
testUrl <- Future.fromTry(processInvigilatedEtrayAccessCode(testGroupOpt, accessCode))
} yield testUrl
override def nextApplicationsReadyForOnlineTesting(maxBatchSize: Int): Future[List[OnlineTestApplication]] = {
testRepository.nextApplicationsReadyForOnlineTesting(maxBatchSize)
}
override def nextTestGroupWithReportReady: Future[Option[Phase2TestGroupWithAppId]] = {
testRepository.nextTestGroupWithReportReady
}
override def emailCandidateForExpiringTestReminder(expiringTest: NotificationExpiringOnlineTest,
emailAddress: String,
reminder: ReminderNotice)(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
emailClient.sendTestExpiringReminder(emailAddress, expiringTest.preferredName,
reminder.hoursBeforeReminder, reminder.timeUnit, expiringTest.expiryDate).map { _ =>
audit(s"ReminderPhase2ExpiringOnlineTestNotificationBefore${reminder.hoursBeforeReminder}HoursEmailed",
expiringTest.userId, Some(emailAddress))
}
}
def resetTest(application: OnlineTestApplication, orderIdToReset: String, actionTriggeredBy: String)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
val (invitationDate, expirationDate) = calcOnlineTestDates(onlineTestsGatewayConfig.phase2Tests.expiryTimeInDays)
for {
// Fetch existing test group that should exist
testGroupOpt <- testRepository.getTestGroup(application.applicationId)
testGroup = testGroupOpt
.getOrElse(throw CannotFindTestGroupByApplicationIdException(s"appId - ${application.applicationId}"))
// Extract test that requires reset
testToReset = testGroup.tests.find(_.orderId == orderIdToReset)
.getOrElse(throw CannotFindTestByOrderIdException(s"OrderId - $orderIdToReset"))
_ = logger.info(s"testToReset -- $testToReset")
// Create PsiIds to use for re-invitation
psiIds = onlineTestsGatewayConfig.phase2Tests.tests.find {
case (_, ids) => ids.inventoryId == testToReset.inventoryId
}.getOrElse(throw CannotFindTestByInventoryIdException(s"InventoryId - ${testToReset.inventoryId}"))._2
_ = logger.info(s"psiIds -- $psiIds")
// Register applicant
testInvite <- registerPsiApplicant(application, psiIds, invitationDate)
newPsiTest = testInvite.psiTest
_ = logger.info(s"newPsiTest -- $newPsiTest")
// Set old test to inactive
testsWithInactiveTest = testGroup.tests
.map { t => if (t.orderId == orderIdToReset) { t.copy(usedForResults = false) } else t }
_ = logger.info(s"testsWithInactiveTest -- $testsWithInactiveTest")
// insert new test and maintain test order
idxOfResetTest = testGroup.tests.indexWhere(_.orderId == orderIdToReset)
updatedTests = insertTest(testsWithInactiveTest, idxOfResetTest, newPsiTest)
_ = logger.info(s"updatedTests -- $updatedTests")
_ <- insertOrUpdateTestGroup(application)(testGroup.copy(expirationDate = expirationDate, tests = updatedTests))
_ <- emailInviteToApplicant(application)(hc, rh, invitationDate, expirationDate)
} yield ()
}
private def insertTest(ls: List[PsiTest], i: Int, value: PsiTest): List[PsiTest] = {
val (front, back) = ls.splitAt(i)
front ++ List(value) ++ back
}
private def cancelPsiTest(appId: String,
userId: String,
orderId: String): Future[AssessmentCancelAcknowledgementResponse] = {
val req = CancelCandidateTestRequest(orderId)
onlineTestsGatewayClient.psiCancelTest(req).map { response =>
logger.debug(s"Response from cancellation for orderId=$orderId")
if (response.status != AssessmentCancelAcknowledgementResponse.completedStatus) {
logger.debug(s"Cancellation failed with errors: ${response.details}")
throw TestCancellationException(s"appId=$appId, orderId=$orderId")
} else {
audit("TestCancelledForCandidate", userId)
response
}
}
}
private def inPhase3TestsInvited(applicationId: String): Future[Boolean] = {
for {
progressResponse <- appRepository.findProgress(applicationId)
} yield {
progressResponse.phase3ProgressResponse match {
case response: Phase3ProgressResponse if response.phase3TestsInvited => true
case _ => false
}
}
}
override def registerAndInviteForTestGroup(application: OnlineTestApplication)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
registerAndInviteForTestGroup(List(application))
}
override def processNextExpiredTest(expiryTest: TestExpirationEvent)(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
testRepository.nextExpiringApplication(expiryTest).flatMap {
case Some(expired) =>
logger.warn(s"Expiring candidates for PHASE2 - expiring candidate ${expired.applicationId}")
processExpiredTest(expired, expiryTest)
case None =>
logger.warn(s"Expiring candidates for PHASE2 - none found")
Future.successful(())
}
}
override def registerAndInvite(applications: List[OnlineTestApplication])
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
registerAndInviteForTestGroup(applications)
}
override def registerAndInviteForTestGroup(applications: List[OnlineTestApplication])
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
val firstApplication = applications.head
val applicationsWithTheSameType = applications filter (_.isInvigilatedETray == firstApplication.isInvigilatedETray)
val standardTests = onlineTestsGatewayConfig.phase2Tests.standard // use this for the order of tests
FutureEx.traverseSerial(applicationsWithTheSameType) { application =>
registerCandidateForTests(application, standardTests).recover {
case e: Exception =>
logger.error(s"Error occurred registering candidate ${application.applicationId} with phase 2 tests - ${e.getMessage}")
}
}.map { _ => () }
}
// Register a single candidate with all the phase 2 tests.
private def registerCandidateForTests(application: OnlineTestApplication, testNames: List[String])(
implicit hc: HeaderCarrier, request: RequestHeader) = {
val tests = onlineTestsGatewayConfig.phase2Tests.tests
logger.warn(s"Processing phase2 candidate ${application.applicationId} - inviting to ${tests.size} tests")
// Register the 2 tests with a delay between each test
val candidateRegistrations = FutureEx.traverseToTry(testNames.zipWithIndex) {
case (testName, delayModifier) =>
val testIds = tests.getOrElse(testName, throw new Exception(s"Unable to find test ids when registering phase 2 candidate for $testName"))
val delay = (delayModifier * onlineTestsGatewayConfig.phase2Tests.testRegistrationDelayInSecs).second
akka.pattern.after(delay, actor.scheduler) {
logger.debug(s"Phase2TestService - about to call registerPsiApplicant for application=$application with testIds=$testIds")
registerAndInviteForTestGroup2(application, testIds).map(_ => ())
}
}
val processFailedRegistrations = candidateRegistrations.flatMap { phase2TestsRegistrations =>
phase2TestsRegistrations.collect {
case Failure(e) => throw e
}
Future.successful(())
}
for {
_ <- processFailedRegistrations
emailAddress <- candidateEmailAddress(application.userId)
(invitationDate, expirationDate) = calculateDates(application)
_ <- emailInviteToApplicant(application, emailAddress, invitationDate, expirationDate)
} yield audit("Phase2InvitationComplete", application.userId)
}
//scalastyle:off method.length
def inviteP2CandidateToMissingTest(applicationId: String): Future[Unit] = {
def allInventoryIds = {
val standardTests = onlineTestsGatewayConfig.phase2Tests.standard
standardTests.map { testName =>
onlineTestsGatewayConfig.phase2Tests.tests.getOrElse(testName, throw new Exception(s"Unable to find inventoryId for $testName"))
}.map(_.inventoryId).toSet
}
def getPsiTestsIds(inventoryId: String) = {
onlineTestsGatewayConfig.phase2Tests.tests.values.filter( _.inventoryId == inventoryId ).head
}
def getCurrentlyRegisteredInventoryIds(phase2TestGroupOpt: Option[Phase2TestGroup]) = {
phase2TestGroupOpt.map { phase2TestGroup =>
phase2TestGroup.tests.map( _.inventoryId )
}.getOrElse(Nil).toSet
}
def identifyInventoryIdsCandidateIsMissing(registeredIds: Set[String], allIds: Set[String]) = allIds.diff(registeredIds)
def registerCandidateForMissingTest(applicationId: String, psiTestIds: PsiTestIds) = {
logger.warn(s"Candidate $applicationId needs to register for inventoryId:${psiTestIds.inventoryId}")
implicit val hc = HeaderCarrier()
for {
onlineTestApplicationOpt <- testRepository.applicationReadyForOnlineTesting(applicationId)
application = onlineTestApplicationOpt.getOrElse(throw new Exception(s"No application found for $applicationId"))
invitationDate = dateTimeFactory.nowLocalTimeZone
registeredApplicant <- registerPsiApplicant(application, psiTestIds, invitationDate)
currentTestGroupOpt <- testRepository.getTestGroup(applicationId)
currentTestGroup = currentTestGroupOpt.getOrElse(throw new Exception(s"No existing p2 test group found for $applicationId"))
_ <- insertPhase2TestGroups(registeredApplicant)(invitationDate, currentTestGroup.expirationDate, hc)
} yield ()
}
logger.warn(s"Attempting to invite candidate $applicationId to missing P2 tests")
logger.warn(s"Candidate $applicationId - the full set of inventoryIds=${allInventoryIds.mkString(",")}")
for {
status <- appRepository.findStatus(applicationId)
_ = if (ApplicationStatus.PHASE2_TESTS.toString != status.status) {
throw new Exception(s"Candidate $applicationId application status is ${status.status}. Expecting ${ApplicationStatus.PHASE2_TESTS}")
}
phase2TestGroupOpt <- testRepository.getTestGroup(applicationId)
registeredInventoryIds = getCurrentlyRegisteredInventoryIds(phase2TestGroupOpt)
_ = logger.warn(s"Candidate $applicationId is currently registered with tests whose inventory ids=${registeredInventoryIds.mkString(",")}")
idsToRegisterFor = identifyInventoryIdsCandidateIsMissing(registeredInventoryIds, allInventoryIds)
_ = if (idsToRegisterFor.size != 1) {
val idsToRegisterForText = if (idsToRegisterFor.isEmpty){ "empty" } else { idsToRegisterFor.mkString(",") }
val msg = s"Candidate $applicationId has incorrect number of tests to register for (should be 1). " +
s"InventoryIds to register for = $idsToRegisterForText"
throw new Exception(msg)
}
_ <- registerCandidateForMissingTest(applicationId, getPsiTestsIds(idsToRegisterFor.head))
} yield ()
} //scalastyle:on
private def processInvigilatedEtrayAccessCode(phase: Option[Phase2TestGroup], accessCode: String): Try[String] = {
phase.fold[Try[String]](Failure(new NotFoundException(Some("No Phase2TestGroup found")))){
phase2TestGroup => {
val psiTestOpt = phase2TestGroup.activeTests.find( _.invigilatedAccessCode == Option(accessCode) )
psiTestOpt.map { psiTest =>
if(phase2TestGroup.expirationDate.isBefore(dateTimeFactory.nowLocalTimeZone)) {
Failure(ExpiredTestForTokenException("Phase 2 test expired for invigilated access code"))
} else {
Success(psiTest.testUrl)
}
}.getOrElse(Failure(InvalidTokenException("Invigilated access code not found")))
}
}
}
private def calculateDates(application: OnlineTestApplication, expiresDate: Option[DateTime] = None) = {
val isInvigilatedETray = application.isInvigilatedETray
val expiryTimeInDays = if (isInvigilatedETray) {
onlineTestsGatewayConfig.phase2Tests.expiryTimeInDaysForInvigilatedETray
} else {
onlineTestsGatewayConfig.phase2Tests.expiryTimeInDays
}
val (invitationDate, expirationDate) = expiresDate match {
case Some(expDate) => (dateTimeFactory.nowLocalTimeZone, expDate)
case _ => calcOnlineTestDates(expiryTimeInDays)
}
invitationDate -> expirationDate
}
private def registerAndInviteForTestGroup2(application: OnlineTestApplication,
testIds: PsiTestIds,
expiresDate: Option[DateTime] = None)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[OnlineTestApplication] = {
//TODO: Do we need to worry about this for PSI?
// require(applications.map(_.isInvigilatedETray).distinct.size <= 1, "the batch can have only one type of invigilated e-tray")
implicit val (invitationDate, expirationDate) = calculateDates(application, expiresDate)
for {
registeredApplicant <- registerPsiApplicant(application, testIds, invitationDate)
_ <- insertPhase2TestGroups(registeredApplicant)(invitationDate, expirationDate, hc)
} yield {
logger.warn(s"Phase2 candidate ${application.applicationId} successfully invited to P2 test - inventoryId:${testIds.inventoryId}")
application
}
}
def registerPsiApplicants(applications: List[OnlineTestApplication],
testIds: PsiTestIds, invitationDate: DateTime)
(implicit hc: HeaderCarrier): Future[List[Phase2TestInviteData]] = {
Future.sequence(
applications.map { application =>
registerPsiApplicant(application, testIds, invitationDate)
})
}
private def registerPsiApplicant(application: OnlineTestApplication,
testIds: PsiTestIds,
invitationDate: DateTime)
(implicit hc: HeaderCarrier): Future[Phase2TestInviteData] = {
registerApplicant2(application, testIds).map { aoa =>
if (aoa.status != AssessmentOrderAcknowledgement.acknowledgedStatus) {
val msg = s"Received response status of ${aoa.status} when registering candidate " +
s"${application.applicationId} to phase2 tests with Ids=$testIds"
logger.warn(msg)
throw TestRegistrationException(msg)
} else {
val psiTest = PsiTest(
inventoryId = testIds.inventoryId,
orderId = aoa.orderId,
usedForResults = true,
testUrl = aoa.testLaunchUrl,
invitationDate = invitationDate,
assessmentId = testIds.assessmentId,
reportId = testIds.reportId,
normId = testIds.normId
)
Phase2TestInviteData(application, psiTest)
}
}
}
private def registerApplicant2(application: OnlineTestApplication, testIds: PsiTestIds)
(implicit hc: HeaderCarrier): Future[AssessmentOrderAcknowledgement] = {
val orderId = tokenFactory.generateUUID()
val preferredName = TextSanitizer.sanitizeFreeText(application.preferredName)
val lastName = TextSanitizer.sanitizeFreeText(application.lastName)
val maybePercentage = application.eTrayAdjustments.flatMap(_.percentage)
val registerCandidateRequest = RegisterCandidateRequest(
inventoryId = testIds.inventoryId, // Read from config to identify the test we are registering for
orderId = orderId, // Identifier we generate to uniquely identify the test
accountId = application.testAccountId, // Candidate's account across all tests
preferredName = preferredName,
lastName = lastName,
// The url psi will redirect to when the candidate completes the test
redirectionUrl = buildRedirectionUrl(orderId, testIds.inventoryId),
adjustment = maybePercentage.map(TestAdjustment.apply),
assessmentId = testIds.assessmentId,
reportId = testIds.reportId,
normId = testIds.normId
)
onlineTestsGatewayClient.psiRegisterApplicant(registerCandidateRequest).map { response =>
audit("UserRegisteredForPhase2Test", application.userId)
response
}
}
private def buildRedirectionUrl(orderId: String, inventoryId: String) = {
val appUrl = onlineTestsGatewayConfig.candidateAppUrl
val scheduleCompletionBaseUrl = s"$appUrl/fset-fast-stream/online-tests/psi/phase2"
s"$scheduleCompletionBaseUrl/complete/$orderId"
}
// TODO: cubiks delete
/*
def buildInviteApplication(application: OnlineTestApplication, token: String,
userId: Int, schedule: Phase2Schedule): InviteApplicant = {
val scheduleCompletionBaseUrl = s"${onlineTestsGatewayConfig.candidateAppUrl}/fset-fast-stream/online-tests/phase2"
InviteApplicant(schedule.scheduleId,
userId,
s"$scheduleCompletionBaseUrl/complete/$token",
resultsURL = None,
timeAdjustments = buildTimeAdjustments(schedule.assessmentId, application)
)
}*/
private def insertPhase2TestGroups(o: List[Phase2TestInviteData])
(implicit invitationDate: DateTime,
expirationDate: DateTime, hc: HeaderCarrier): Future[Unit] = {
Future.sequence(o.map { completedInvite =>
val maybeInvigilatedAccessCodeFut = if (completedInvite.application.isInvigilatedETray) {
authProvider.generateAccessCode.map(ac => Some(ac.token))
} else {
Future.successful(None)
}
for {
maybeInvigilatedAccessCode <- maybeInvigilatedAccessCodeFut
testWithAccessCode = completedInvite.psiTest.copy(invigilatedAccessCode = maybeInvigilatedAccessCode)
newTestGroup = Phase2TestGroup(expirationDate = expirationDate, List(testWithAccessCode))
_ <- insertOrUpdateTestGroup(completedInvite.application)(newTestGroup)
} yield {}
}).map(_ => ())
}
private def insertPhase2TestGroups(completedInvite: Phase2TestInviteData)
(implicit invitationDate: DateTime,
expirationDate: DateTime, hc: HeaderCarrier): Future[Unit] = {
val maybeInvigilatedAccessCodeFut = if (completedInvite.application.isInvigilatedETray) {
authProvider.generateAccessCode.map(ac => Some(ac.token))
} else {
Future.successful(None)
}
val appId = completedInvite.application.applicationId
for {
maybeInvigilatedAccessCode <- maybeInvigilatedAccessCodeFut
currentTestGroupOpt <- testRepository.getTestGroup(appId)
existingTests = currentTestGroupOpt.map(_.tests).getOrElse(Nil)
testWithAccessCode = completedInvite.psiTest.copy(invigilatedAccessCode = maybeInvigilatedAccessCode)
newTestGroup = Phase2TestGroup(expirationDate = expirationDate, existingTests :+ testWithAccessCode)
_ <- insertOrUpdateTestGroup(completedInvite.application)(newTestGroup)
} yield {}
}
private def insertOrUpdateTestGroup(application: OnlineTestApplication)
(newOnlineTestProfile: Phase2TestGroup): Future[Unit] = for {
currentOnlineTestProfile <- testRepository.getTestGroup(application.applicationId)
updatedTestProfile <- insertOrAppendNewTests(application.applicationId, currentOnlineTestProfile, newOnlineTestProfile)
_ <- testRepository.resetTestProfileProgresses(application.applicationId, determineStatusesToRemove(updatedTestProfile))
} yield ()
private def insertOrAppendNewTests(applicationId: String,
currentProfile: Option[Phase2TestGroup],
newProfile: Phase2TestGroup): Future[Phase2TestGroup] = {
val insertFut = testRepository.insertOrUpdateTestGroup(applicationId, newProfile)
insertFut.flatMap { _ =>
testRepository.getTestGroup(applicationId).map {
case Some(testProfile) => testProfile
case None => throw ApplicationNotFound(applicationId)
}
}
}
def markAsStarted2(orderId: String, startedTime: DateTime = dateTimeFactory.nowLocalTimeZone)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = eventSink {
updatePhase2Test2(orderId, testRepository.updateTestStartTime(_: String, startedTime)).flatMap { u =>
//TODO: remove the next line and comment in the following line at end of campaign 2019
testRepository.updateProgressStatus(u.applicationId, ProgressStatuses.PHASE2_TESTS_STARTED) map { _ =>
// maybeMarkAsStarted(u.applicationId).map { _ =>
DataStoreEvents.ETrayStarted(u.applicationId) :: Nil
}
}
}
private def maybeMarkAsStarted(appId: String): Future[Unit] = {
appRepository.getProgressStatusTimestamps(appId).map { timestamps =>
val hasStarted = timestamps.exists { case (progressStatus, _) => progressStatus == PHASE2_TESTS_STARTED.key }
if (hasStarted) {
Future.successful(())
} else {
testRepository.updateProgressStatus(appId, PHASE2_TESTS_STARTED)
}
}
}
def markAsCompleted2(orderId: String)(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = eventSink {
val updateTestFunc = testRepository.updateTestCompletionTime2(_: String, dateTimeFactory.nowLocalTimeZone)
updatePhase2Test2(orderId, updateTestFunc).flatMap { u =>
val msg = s"Active tests cannot be found when marking phase2 test complete for orderId: $orderId"
require(u.testGroup.activeTests.nonEmpty, msg)
val activeTestsCompleted = u.testGroup.activeTests forall (_.completedDateTime.isDefined)
if (activeTestsCompleted) {
testRepository.updateProgressStatus(u.applicationId, ProgressStatuses.PHASE2_TESTS_COMPLETED) map { _ =>
DataStoreEvents.ETrayCompleted(u.applicationId) :: Nil
}
} else {
Future.successful(List.empty[StcEventType])
}
}
}
def markAsReportReadyToDownload2(orderId: String, reportReady: PsiTestResultReady): Future[Unit] = {
updatePhase2Test2(orderId, testRepository.updateTestReportReady2(_: String, reportReady)).flatMap { updated =>
if (updated.testGroup.activeTests forall (_.resultsReadyToDownload)) {
testRepository.updateProgressStatus(updated.applicationId, ProgressStatuses.PHASE2_TESTS_RESULTS_READY)
} else {
Future.successful(())
}
}
}
/*
private def updatePhase2Test(cubiksUserId: Int, updateCubiksTest: Int => Future[Unit]): Future[Phase2TestGroupWithAppId] = {
for {
_ <- updateCubiksTest(cubiksUserId)
updated <- testRepository.getTestProfileByCubiksId(cubiksUserId)
} yield {
updated
}
}
*/
private def updatePhase2Test2(orderId: String,
updatePsiTest: String => Future[Unit]): Future[Phase2TestGroupWithAppId] = {
updatePsiTest(orderId).flatMap { _ =>
testRepository.getTestProfileByOrderId(orderId).map(testGroup => testGroup)
}
}
//scalastyle:off method.length
override def storeRealTimeResults(orderId: String, results: PsiRealTimeResults)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = {
def insertResults(applicationId: String, orderId: String, testProfile: Phase2TestGroupWithAppId,
results: PsiRealTimeResults): Future[Unit] =
testRepository.insertTestResult2(
applicationId,
testProfile.testGroup.tests.find(_.orderId == orderId)
.getOrElse(throw CannotFindTestByOrderIdException(s"Test not found for orderId=$orderId")),
model.persisted.PsiTestResult.fromCommandObject(results)
)
def maybeUpdateProgressStatus(appId: String) = {
testRepository.getTestGroup(appId).flatMap { testProfileOpt =>
val latestProfile = testProfileOpt.getOrElse(throw new Exception(s"No test profile returned for $appId"))
if (latestProfile.activeTests.forall(_.testResult.isDefined)) {
testRepository.updateProgressStatus(appId, ProgressStatuses.PHASE2_TESTS_RESULTS_RECEIVED).map(_ =>
audit(s"ProgressStatusSet${ProgressStatuses.PHASE2_TESTS_RESULTS_RECEIVED}", appId))
} else {
val msg = s"Did not update progress status to ${ProgressStatuses.PHASE2_TESTS_RESULTS_RECEIVED} for $appId - " +
s"not all active tests have a testResult saved"
logger.warn(msg)
Future.successful(())
}
}
}
def markTestAsCompleted(profile: Phase2TestGroupWithAppId): Future[Unit] = {
profile.testGroup.tests.find(_.orderId == orderId).map { test =>
if (!test.isCompleted) {
logger.info(s"Processing real time results - setting completed date on psi test whose orderId=$orderId")
markAsCompleted2(orderId)
}
else {
logger.info(s"Processing real time results - completed date is already set on psi test whose orderId=$orderId " +
s"so will not mark as complete")
Future.successful(())
}
}.getOrElse(throw CannotFindTestByOrderIdException(s"Test not found for orderId=$orderId"))
}
(for {
appIdOpt <- testRepository.getApplicationIdForOrderId(orderId, "PHASE2")
} yield {
val appId = appIdOpt.getOrElse(throw CannotFindTestByOrderIdException(s"Application not found for test for orderId=$orderId"))
for {
profile <- testRepository.getTestProfileByOrderId(orderId)
_ <- markTestAsCompleted(profile)
_ <- profile.testGroup.tests.find(_.orderId == orderId).map { test => insertResults(appId, test.orderId, profile, results) }
.getOrElse(throw CannotFindTestByOrderIdException(s"Test not found for orderId=$orderId"))
_ <- maybeUpdateProgressStatus(appId)
} yield ()
}).flatMap(identity)
}
//scalastyle:on
def buildTimeAdjustments(assessmentId: Int, application: OnlineTestApplication): List[TimeAdjustments] = {
application.eTrayAdjustments.flatMap(_.timeNeeded).map { _ =>
List(TimeAdjustments(assessmentId, sectionId = 1, absoluteTime = calculateAbsoluteTimeWithAdjustments(application)))
}.getOrElse(Nil)
}
def emailInviteToApplicants(candidates: List[OnlineTestApplication])
(implicit hc: HeaderCarrier, rh: RequestHeader,
invitationDate: DateTime, expirationDate: DateTime): Future[Unit] = {
Future.sequence(candidates.map { candidate =>
emailInviteToApplicant(candidate)(hc, rh, invitationDate, expirationDate)
}).map(_ => ())
}
private def emailInviteToApplicant(candidate: OnlineTestApplication)
(implicit hc: HeaderCarrier,
rh: RequestHeader,
invitationDate: DateTime,
expirationDate: DateTime): Future[Unit] = {
if (candidate.isInvigilatedETray) {
Future.successful(())
} else {
candidateEmailAddress(candidate.userId).flatMap(emailInviteToApplicant(candidate, _ , invitationDate, expirationDate))
}
}
def extendTestGroupExpiryTime(applicationId: String, extraDays: Int, actionTriggeredBy: String)
(implicit hc: HeaderCarrier, rh: RequestHeader): Future[Unit] = eventSink {
val progressFut = appRepository.findProgress(applicationId)
val phase2TestGroup = testRepository.getTestGroup(applicationId)
.map(tg => tg.getOrElse(throw new IllegalStateException("Expiration date for Phase 2 cannot be extended. Test group not found.")))
for {
progress <- progressFut
phase2 <- phase2TestGroup
isAlreadyExpired = progress.phase2ProgressResponse.phase2TestsExpired
extendDays = extendTime(isAlreadyExpired, phase2.expirationDate)
newExpiryDate = extendDays(extraDays)
_ <- testRepository.updateGroupExpiryTime(applicationId, newExpiryDate, testRepository.phaseName)
_ <- progressStatusesToRemoveWhenExtendTime(newExpiryDate, phase2, progress)
.fold(Future.successful(()))(p => appRepository.removeProgressStatuses(applicationId, p))
} yield {
audit(isAlreadyExpired, applicationId) ::
DataStoreEvents.ETrayExtended(applicationId, actionTriggeredBy) ::
Nil
}
}
private def progressStatusesToRemoveWhenExtendTime(extendedExpiryDate: DateTime,
profile: Phase2TestGroup,
progress: ProgressResponse): Option[List[ProgressStatus]] = {
val shouldRemoveExpired = progress.phase2ProgressResponse.phase2TestsExpired
val today = dateTimeFactory.nowLocalTimeZone
val shouldRemoveSecondReminder = extendedExpiryDate.minusHours(Phase2SecondReminder.hoursBeforeReminder).isAfter(today)
val shouldRemoveFirstReminder = extendedExpiryDate.minusHours(Phase2FirstReminder.hoursBeforeReminder).isAfter(today)
val progressStatusesToRemove = (Set.empty[ProgressStatus]
++ (if (shouldRemoveExpired) Set(PHASE2_TESTS_EXPIRED) else Set.empty)
++ (if (shouldRemoveSecondReminder) Set(PHASE2_TESTS_SECOND_REMINDER) else Set.empty)
++ (if (shouldRemoveFirstReminder) Set(PHASE2_TESTS_FIRST_REMINDER) else Set.empty)).toList
if (progressStatusesToRemove.isEmpty) {
None
} else {
Some(progressStatusesToRemove)
}
}
private def audit(isAlreadyExpired: Boolean, applicationId: String): AuditEvent = {
val details = Map("applicationId" -> applicationId)
if (isAlreadyExpired) {
AuditEvents.ExpiredTestsExtended(details)
} else {
AuditEvents.NonExpiredTestsExtended(details)
}
}
protected[onlinetesting] def calculateAbsoluteTimeWithAdjustments(application: OnlineTestApplication): Int = {
val baseEtrayTestDurationInMinutes = 80
(application.eTrayAdjustments.flatMap { etrayAdjustments => etrayAdjustments.timeNeeded }.getOrElse(0)
* baseEtrayTestDurationInMinutes / 100) + baseEtrayTestDurationInMinutes
}
// TODO this method is exactly the same as the Phase1 version (with the exception of the progress status)
// It's a bit fiddly to extract up to the OnlineTestService/Repository traits without defining another common
// CubiksTestService/Repository layer as it will be different for Launchapd.
// Still feels wrong to leave it here when it's 99% the same as phase1.
def retrieveTestResult(testProfile: RichTestGroup)(implicit hc: HeaderCarrier): Future[Unit] = {
/*
def insertTests(testResults: List[(OnlineTestCommands.PsiTestResult, U)]): Future[Unit] = {
Future.sequence(testResults.map {
case (result, phaseTest) => testRepository2.insertTestResult2(
testProfile.applicationId,
phaseTest, model.persisted.PsiTestResult.fromCommandObject(result)
)
}).map(_ => ())
}
def maybeUpdateProgressStatus(appId: String) = {
testRepository2.getTestGroup(appId).flatMap { eventualProfile =>
val latestProfile = eventualProfile.getOrElse(throw new Exception(s"No profile returned for $appId"))
if (latestProfile.activeTests.forall(_.testResult.isDefined)) {
testRepository.updateProgressStatus(appId, ProgressStatuses.PHASE2_TESTS_RESULTS_RECEIVED).map(_ =>
audit(s"ProgressStatusSet${ProgressStatuses.PHASE2_TESTS_RESULTS_RECEIVED}", appId))
} else {
Future.successful(())
}
}
}
val testResults = Future.sequence(testProfile.testGroup.activeTests.flatMap { test =>
test.reportId.map { reportId =>
onlineTestsGatewayClient.downloadPsiTestResults(reportId)
}.map(_.map(_ -> test))
})
for {
eventualTestResults <- testResults
_ <- insertTests(eventualTestResults)
_ <- maybeUpdateProgressStatus(testProfile.applicationId)
} yield {
eventualTestResults.foreach { _ =>
audit(s"ResultsRetrievedForSchedule", testProfile.applicationId)
}
}
*/
Future.successful(())
}
}
trait ResetPhase2Test {
import ProgressStatuses._
def determineStatusesToRemove(testGroup: Phase2TestGroup): List[ProgressStatus] = {
(if (testGroup.hasNotStartedYet) List(PHASE2_TESTS_STARTED) else List()) ++
(if (testGroup.hasNotCompletedYet) List(PHASE2_TESTS_COMPLETED) else List()) ++
(if (testGroup.hasNotResultReadyToDownloadForAllTestsYet) List(PHASE2_TESTS_RESULTS_RECEIVED, PHASE2_TESTS_RESULTS_READY) else List()) ++
List(PHASE2_TESTS_FAILED, PHASE2_TESTS_EXPIRED, PHASE2_TESTS_PASSED, PHASE2_TESTS_FAILED_NOTIFIED, PHASE2_TESTS_FAILED_SDIP_AMBER)
}
}
//scalastyle:on number.of.methods
| hmrc/fset-faststream | app/services/onlinetesting/phase2/Phase2TestService.scala | Scala | apache-2.0 | 37,687 |
/*
* bytefrog: a tracing framework for the JVM. For more information
* see http://code-pulse.com/bytefrog
*
* Copyright (C) 2014 Applied Visions - http://securedecisions.avi.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.secdec.bytefrog.fileapi.tracefile.entry
import java.io.IOException
import java.io.InputStream
import java.io.PrintStream
import scala.io.Source
import com.secdec.bytefrog.fileapi.io.zip.ZipFileBuilderToken
import com.secdec.bytefrog.fileapi.tracefile.TraceFileBuilder
object EncounteredMethodsEntry extends TraceFileEntry[String] {
val path = "encountered-methods.txt"
def reader: TraceFileEntryReader[String] = new Reader
def writer(builder: TraceFileBuilder): TraceFileEntryWriter[String] =
new Writer(builder.vend(path))
/** This string must appear at the beginning of a file
* that can be parsed by this object.
*/
private val magicString = "// Trace Data: encountered method names"
private class Reader extends TraceFileEntryReader[String] {
def read(content: InputStream)(callback: String => Unit): Unit = {
val source = Source fromInputStream content
try {
val lines = source.getLines
if (lines.hasNext) {
val firstLine = lines.next
if (firstLine == magicString) {
lines foreach callback
} else {
throw new IOException(
"Unexpected input: content does not appear to contain encountered method signatures")
}
} else {
throw new IOException("The given content appears to be empty")
}
} finally {
source.close
content.close
}
}
}
private class Writer(token: ZipFileBuilderToken) extends TraceFileEntryWriter[String] {
private val outstr = token.resource.openOutput
private val out = new PrintStream(outstr)
private def tryWrite(line: String): Unit = {
try {
out.println(line)
} catch {
// if an exception is caught, close the streams and re-throw it
case e: IOException =>
out.close
outstr.close
throw e
}
}
//first line must be the `magicString`
tryWrite(magicString)
def write(item: String): Unit = tryWrite(item)
def finish(): Unit = {
out.close
outstr.close
token.completionCallback()
}
}
} | secdec/bytefrog-clients | file-api/src/main/scala/com/secdec/bytefrog/fileapi/tracefile/entry/EncounteredMethodsEntry.scala | Scala | apache-2.0 | 2,732 |
package io.iohk.ethereum.db.storage
import akka.util.ByteString
import boopickle.Default.{Pickle, Unpickle}
import io.iohk.ethereum.db.dataSource.DataSource
import io.iohk.ethereum.db.storage.ReceiptStorage._
import io.iohk.ethereum.domain.{Address, SuccessOutcome, _}
import io.iohk.ethereum.utils.ByteUtils.{byteSequenceToBuffer, compactPickledBytes}
import boopickle.DefaultBasic._
/**
* This class is used to store the Receipts, by using:
* Key: hash of the block to which the list of receipts belong
* Value: the list of receipts
*/
class ReceiptStorage(val dataSource: DataSource) extends TransactionalKeyValueStorage[BlockHash, Seq[Receipt]] {
import ReceiptStorage._
override val namespace: IndexedSeq[Byte] = Namespaces.ReceiptsNamespace
override def keySerializer: BlockHash => IndexedSeq[Byte] = _.toIndexedSeq
// FIXME: perhaps we should just operate on ByteString to avoid such strange conversions: ETCM-322
override def keyDeserializer: IndexedSeq[Byte] => BlockHash = k => ByteString.fromArrayUnsafe(k.toArray)
override def valueSerializer: ReceiptSeq => IndexedSeq[Byte] = receipts =>
compactPickledBytes(Pickle.intoBytes(receipts))
override def valueDeserializer: IndexedSeq[Byte] => ReceiptSeq =
byteSequenceToBuffer _ andThen Unpickle[Seq[Receipt]].fromBytes
}
object ReceiptStorage {
type BlockHash = ByteString
type ReceiptSeq = Seq[Receipt]
implicit val byteStringPickler: Pickler[ByteString] =
transformPickler[ByteString, Array[Byte]](ByteString(_))(_.toArray[Byte])
implicit val hashOutcomePickler: Pickler[HashOutcome] = transformPickler[HashOutcome, ByteString] { hash =>
HashOutcome(hash)
} { outcome => outcome.stateHash }
implicit val successOutcomePickler: Pickler[SuccessOutcome.type] = transformPickler[SuccessOutcome.type, ByteString] {
_ => SuccessOutcome
} { _ => ByteString(Array(1.toByte)) }
implicit val failureOutcomePickler: Pickler[FailureOutcome.type] = transformPickler[FailureOutcome.type, ByteString] {
_ => FailureOutcome
} { _ => ByteString(Array(0.toByte)) }
implicit val transactionOutcomePickler: Pickler[TransactionOutcome] = compositePickler[TransactionOutcome]
.addConcreteType[HashOutcome]
.addConcreteType[SuccessOutcome.type]
.addConcreteType[FailureOutcome.type]
implicit val addressPickler: Pickler[Address] =
transformPickler[Address, ByteString](bytes => Address(bytes))(address => address.bytes)
implicit val txLogEntryPickler: Pickler[TxLogEntry] =
transformPickler[TxLogEntry, (Address, Seq[ByteString], ByteString)] { case (address, topics, data) =>
TxLogEntry(address, topics, data)
} { entry => (entry.loggerAddress, entry.logTopics, entry.data) }
implicit val receiptPickler: Pickler[Receipt] =
transformPickler[Receipt, (TransactionOutcome, BigInt, ByteString, Seq[TxLogEntry])] {
case (state, gas, filter, logs) => new Receipt(state, gas, filter, logs)
} { receipt =>
(receipt.postTransactionStateHash, receipt.cumulativeGasUsed, receipt.logsBloomFilter, receipt.logs)
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/db/storage/ReceiptStorage.scala | Scala | mit | 3,082 |
package exercises
import scala.util.Random
import scala.collection.mutable.ArrayBuffer
/**
* Created by aguestuser on 12/31/14.
*/
class Ch03_Arrays {
//1//
// imperative
def make_array(n: Int) : Array[Int] = {
for (item <- new Array[Int](n)) yield Random.nextInt(n)
}
// functional
def make_array_1(n: Int) : Array[Int] = {
new Array[Int](n) map { (_: Int) => Random.nextInt(n) }
}
//3//
// with loop //
def swap(arr: Array[Int]) : Array[Int] = {
(for (i <- 0 until arr.length) yield
if (i % 2 == 0)
if (i == arr.size - 1) arr(i)
else arr(i + 1)
else arr(i - 1)
).toArray
}
// with map
def swap_1(arr: Array[Int]) : Array[Int] = {
arr.zipWithIndex.map{ case(_, i) =>
if (i % 2 == 0)
if (i == arr.size - 1) arr(i)
else arr(i + 1)
else arr(i - 1)
}
}
// 4 //
def sort_by_sign(a: Array[Int]) : Array[Int] = {
a.filter(_ > 0) ++ a.filter(_ <= 0)
}
// 5 //
// FOLD for averaging
// terse (but confusing?)
def avg(a: Array[Double]) : Double = {
(0.0 /: a)(_ + _) / a.length
}
//verbose (but less confusing?)
def avg_1(a: Array[Double]) : Double = {
a.foldLeft(0.0)(_ + _) / a.length
}
// 6 //
def rev_sort_2(a: Array[Int]) : Array[Int] = {
a.sortWith(_ > _)
}
// 7 //
println(Array(1,1,1,2,3).distinct.toList)
println((ArrayBuffer() ++= Array(1,1,1,1,2,3).distinct).toString)
// 8 //
// PROBLEM: Given a sequence of integers, we want to remove all but the first negative number.
// NOTE: below solution works, but seems extremely inelegant and probably slow!
def rem_tail_negs(a: Array[Int]) : Array[Int] = {
val indices = a.zipWithIndex.filter { _._1 < 0 }.map { _._2 }.drop(1)
a.zipWithIndex.filter { e => !indices.contains(e._2) }.map{ _._1 }
}
def keepOnlyTheFirstNegative(ints: List[Int]) : List[Int] = {
ints.span(_ >= 0) match {
case (positives, List()) => positives
case (positives, head :: tail) =>
positives ++ (head :: tail.filter(_ >= 0))
}
}
}
| aguestuser/hackerschool | scala_for_the_impatient/src/main/scala/exercises/Ch03_Arrays.scala | Scala | gpl-3.0 | 2,086 |
package com.portia.downloader
import com.portia.models._
import com.mongodb.WriteConcern
import com.mongodb.casbah.commons.MongoDBObject
import com.novus.salat.dao.SalatMongoCursor
import org.jsoup.Jsoup
import com.github.nscala_time.time.Imports._
/** A dummy downloader which has a simple job of downloading content of crawled web nodes
*
* @author duytd
*/
class Downloader {
def run(): Unit = {
while(true){
val freshUrls = UrlDAO.find(MongoDBObject("downloaded"->false))
this.download(freshUrls)
}
}
private def download(freshUrls:SalatMongoCursor[Url]): Unit = {
for (url<-freshUrls) {
Downloader.download(url)
}
}
}
/** Factory for [[com.portia.models.Downloader]] instances. */
object Downloader {
def download(url:Url): Unit = {
try {
println("Downloading " + url.absPath + "...")
// Get the HTML body content
saveDocByURL(url)
println("Finish downloading " + url.absPath)
}
catch {
case e: Exception => println("Failed to download "+url.absPath+". Reason:" + e.getMessage)
val updateUrl = url.copy(downloaded = false)
UrlDAO.update(MongoDBObject("_id"->url._id), updateUrl, upsert = false, multi = false, new WriteConcern)
}
}
def saveDocByURL(url: Url): Unit = {
val doc = Jsoup.connect(url.absPath).get.body()
// Mark as downloaded
val updateUrl = url.copy(downloaded = true, parseTime = DateTime.now.toString)
UrlDAO.update(MongoDBObject("_id"->url._id), updateUrl, upsert = false, multi = false, new WriteConcern)
// Save content to the database
val documentObj = new Document(urlId = url._id, content = doc.toString)
DocumentDAO.insert(documentObj)
}
} | duytd/blackspider | src/main/scala/com/portia/downloader/Downloader.scala | Scala | gpl-3.0 | 1,720 |
package org.duffqiu.rest.common
import net.liftweb.json.DefaultFormats
import net.liftweb.json.Serialization.write
object RestUtility {
def asJson(body: AnyRef) = {
//Using Lift
implicit val formats = DefaultFormats
val bodyJsonString = write(body)
bodyJsonString
}
}
| duffqiu/rest-test-dsl | src/main/scala/org/duffqiu/rest/common/RestUtility.scala | Scala | apache-2.0 | 331 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.text
import org.parboiled.errors.{ErrorUtils, ParsingException}
import org.parboiled.scala.parserunners.{BasicParseRunner, ReportingParseRunner}
/**
* Parses a simple set of key-value pairs, in the form 'key1:value1,key2:value2'
*/
object KVPairParser {
private val Parser = new KVPairParser()
@throws(classOf[ParsingException])
def parse(s: String): Map[String, String] = parse(s, report = true)
@throws(classOf[ParsingException])
def parse(s: String, report: Boolean): Map[String, String] = {
val runner = if (report) { ReportingParseRunner(Parser.map) } else { BasicParseRunner(Parser.map) }
val parsing = runner.run(s.stripMargin('|').replaceAll("\\s*", ""))
parsing.result.getOrElse {
throw new ParsingException(s"Invalid split pattern: ${ErrorUtils.printParseErrors(parsing)}")
}
}
}
private class KVPairParser(pairSep: String = ",", kvSep: String = ":") extends BasicParser {
import org.parboiled.scala._
private def key: Rule1[String] = rule {
oneOrMore(char | anyOf(".-")) ~> { (k) => k }
}
private def value: Rule1[String] = rule {
quotedString | singleQuotedString | oneOrMore(char | anyOf(".-[]%")) ~> { (k) => k }
}
private def keyValue: Rule1[(String, String)] = rule {
(key ~ kvSep ~ value) ~~> { (k, v) => (k, v) }
}
def map: Rule1[Map[String, String]] = rule {
oneOrMore(keyValue, pairSep) ~ EOI ~~> { (kvs) => kvs.toMap }
}
} | jahhulbert-ccri/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/text/KVPairParser.scala | Scala | apache-2.0 | 1,931 |
package com.cds.learn.chapter5
/**
* Created by chendongsheng5 on 2017/5/27.
*/
import Stream._
trait Stream[+A] {
// The natural recursive solution
def toListRecursive: List[A] = this match {
case Cons(h,t) => h() :: t().toListRecursive
case _ => List()
}
/*
The above solution will stack overflow for large streams, since it's
not tail-recursive. Here is a tail-recursive implementation. At each
step we cons onto the front of the `acc` list, which will result in the
reverse of the stream. Then at the end we reverse the result to get the
correct order again.
*/
def toList: List[A] = {
@annotation.tailrec
def go(s: Stream[A], acc: List[A]): List[A] = s match {
case Cons(h,t) => go(t(), h() :: acc)
case _ => acc
}
go(this, List()).reverse
}
/*
In order to avoid the `reverse` at the end, we could write it using a
mutable list buffer and an explicit loop instead. Note that the mutable
list buffer never escapes our `toList` method, so this function is
still _pure_.
*/
def toListFast: List[A] = {
val buf = new collection.mutable.ListBuffer[A]
@annotation.tailrec
def go(s: Stream[A]): List[A] = s match {
case Cons(h,t) =>
buf += h()
go(t())
case _ => buf.toList
}
go(this)
}
/*
Create a new Stream[A] from taking the n first elements from this. We can achieve that by recursively
calling take on the invoked tail of a cons cell. We make sure that the tail is not invoked unless
we need to, by handling the special case where n == 1 separately. If n == 0, we can avoid looking
at the stream at all.
*/
def take(n: Int): Stream[A] = this match {
case Cons(h, t) if n > 1 => cons(h(), t().take(n - 1))
case Cons(h, _) if n == 1 => cons(h(), empty)
case _ => empty
}
/*
Create a new Stream[A] from this, but ignore the n first elements. This can be achieved by recursively calling
drop on the invoked tail of a cons cell. Note that the implementation is also tail recursive.
*/
@annotation.tailrec
final def drop(n: Int): Stream[A] = this match {
case Cons(_, t) if n > 0 => t().drop(n - 1)
case _ => this
}
/*
It's a common Scala style to write method calls without `.` notation, as in `t() takeWhile f`.
*/
def takeWhile(f: A => Boolean): Stream[A] = this match {
case Cons(h,t) if f(h()) => cons(h(), t() takeWhile f)
case _ => empty
}
def foldRight[B](z: => B)(f: (A, => B) => B): B = // The arrow `=>` in front of the argument type `B` means that the function `f` takes its second argument by name and may choose not to evaluate it.
this match {
case Cons(h,t) => f(h(), t().foldRight(z)(f)) // If `f` doesn't evaluate its second argument, the recursion never occurs.
case _ => z
}
def exists(p: A => Boolean): Boolean =
foldRight(false)((a, b) => p(a) || b) // Here `b` is the unevaluated recursive step that folds the tail of the stream. If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early.
/*
Since `&&` is non-strict in its second argument, this terminates the traversal as soon as a nonmatching element is found.
*/
def forAll(f: A => Boolean): Boolean =
foldRight(true)((a,b) => f(a) && b)
def takeWhile_1(f: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) =>
if (f(h)) cons(h,t)
else empty)
def headOption: Option[A] =
foldRight(None: Option[A])((h,_) => Some(h))
def map[B](f: A => B): Stream[B] =
foldRight(empty[B])((h,t) => cons(f(h), t))
def filter(f: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) =>
if (f(h)) cons(h, t)
else t)
def append[B>:A](s: => Stream[B]): Stream[B] =
foldRight(s)((h,t) => cons(h,t))
def flatMap[B](f: A => Stream[B]): Stream[B] =
foldRight(empty[B])((h,t) => f(h) append t)
def mapViaUnfold[B](f: A => B): Stream[B] =
unfold(this) {
case Cons(h,t) => Some((f(h()), t()))
case _ => None
}
def takeViaUnfold(n: Int): Stream[A] =
unfold((this,n)) {
case (Cons(h,t), 1) => Some((h(), (empty, 0)))
case (Cons(h,t), n) if n > 1 => Some((h(), (t(), n-1)))
case _ => None
}
def takeWhileViaUnfold(f: A => Boolean): Stream[A] =
unfold(this) {
case Cons(h,t) if f(h()) => Some((h(), t()))
case _ => None
}
def zipWith[B,C](s2: Stream[B])(f: (A,B) => C): Stream[C] =
unfold((this, s2)) {
case (Cons(h1,t1), Cons(h2,t2)) =>
Some((f(h1(), h2()), (t1(), t2())))
case _ => None
}
// special case of `zipWith`
def zip[B](s2: Stream[B]): Stream[(A,B)] =
zipWith(s2)((_,_))
def zipAll[B](s2: Stream[B]): Stream[(Option[A],Option[B])] =
zipWithAll(s2)((_,_))
def zipWithAll[B, C](s2: Stream[B])(f: (Option[A], Option[B]) => C): Stream[C] =
Stream.unfold((this, s2)) {
case (Empty, Empty) => None
case (Cons(h, t), Empty) => Some(f(Some(h()), Option.empty[B]) -> (t(), empty[B]))
case (Empty, Cons(h, t)) => Some(f(Option.empty[A], Some(h())) -> (empty[A] -> t()))
case (Cons(h1, t1), Cons(h2, t2)) => Some(f(Some(h1()), Some(h2())) -> (t1() -> t2()))
}
/*
`s startsWith s2` when corresponding elements of `s` and `s2` are all equal, until the point that `s2` is exhausted. If `s` is exhausted first, or we find an element that doesn't match, we terminate early. Using non-strictness, we can compose these three separate logical steps--the zipping, the termination when the second stream is exhausted, and the termination if a nonmatching element is found or the first stream is exhausted.
*/
def startsWith[A](s: Stream[A]): Boolean =
zipAll(s).takeWhile(!_._2.isEmpty) forAll {
case (h,h2) => h == h2
}
/*
The last element of `tails` is always the empty `Stream`, so we handle this as a special case, by appending it to the output.
*/
def tails: Stream[Stream[A]] =
unfold(this) {
case Empty => None
case s => Some((s, s drop 1))
} append Stream(empty)
def hasSubsequence[A](s: Stream[A]): Boolean =
tails exists (_ startsWith s)
/*
The function can't be implemented using `unfold`, since `unfold` generates elements of the `Stream` from left to right. It can be implemented using `foldRight` though.
The implementation is just a `foldRight` that keeps the accumulated value and the stream of intermediate results, which we `cons` onto during each iteration. When writing folds, it's common to have more state in the fold than is needed to compute the result. Here, we simply extract the accumulated list once finished.
*/
def scanRight[B](z: B)(f: (A, => B) => B): Stream[B] =
foldRight((z, Stream(z)))((a, p0) => {
// p0 is passed by-name and used in by-name args in f and cons. So use lazy val to ensure only one evaluation...
lazy val p1 = p0
val b2 = f(a, p1._1)
(b2, cons(b2, p1._2))
})._2
@annotation.tailrec
final def find(f: A => Boolean): Option[A] = this match {
case Empty => None
case Cons(h, t) => if (f(h())) Some(h()) else t().find(f)
}
}
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
val ones: Stream[Int] = Stream.cons(1, ones)
// This is more efficient than `cons(a, constant(a))` since it's just
// one object referencing itself.
def constant[A](a: A): Stream[A] = {
lazy val tail: Stream[A] = Cons(() => a, () => tail)
tail
}
def from(n: Int): Stream[Int] =
cons(n, from(n+1))
val fibs = {
def go(f0: Int, f1: Int): Stream[Int] =
cons(f0, go(f1, f0+f1))
go(0, 1)
}
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] =
f(z) match {
case Some((h,s)) => cons(h, unfold(s)(f))
case None => empty
}
/*
The below two implementations use `fold` and `map` functions in the Option class to implement unfold, thereby doing away with the need to manually pattern match as in the above solution.
*/
def unfoldViaFold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] =
f(z).fold(empty[A])((p: (A,S)) => cons(p._1,unfold(p._2)(f)))
def unfoldViaMap[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] =
f(z).map((p: (A,S)) => cons(p._1,unfold(p._2)(f))).getOrElse(empty[A])
/*
Scala provides shorter syntax when the first action of a function literal is to match on an expression. The function passed to `unfold` in `fibsViaUnfold` is equivalent to `p => p match { case (f0,f1) => ... }`, but we avoid having to choose a name for `p`, only to pattern match on it.
*/
val fibsViaUnfold =
unfold((0,1)) { case (f0,f1) => Some((f0,(f1,f0+f1))) }
def fromViaUnfold(n: Int) =
unfold(n)(n => Some((n,n+1)))
def constantViaUnfold[A](a: A) =
unfold(a)(_ => Some((a,a)))
// could also of course be implemented as constant(1)
val onesViaUnfold = unfold(1)(_ => Some((1,1)))
}
| anancds/scala-project | fpis/src/main/scala/com/cds/learn/chapter5/Stream.scala | Scala | mit | 9,246 |
package models
case class Ladder(id: Int,
name: String,
domain: String,
creator: String,
created: Int,
active: Boolean)
case class LadderInput(name: String)
| yzernik/office-ladder | client/src/main/scala/models/Ladder.scala | Scala | mit | 249 |
package com.sksamuel.elastic4s.searches
import com.sksamuel.elastic4s.get.HitField
import org.elasticsearch.search.SearchHitField
import scala.collection.JavaConverters._
case class RichSearchHitField(java: SearchHitField) extends HitField {
override def name: String = java.name()
override def value: AnyRef = java.getValue
override def values: Seq[AnyRef] = java.values().asScala.toList
override def isMetadataField: Boolean = java.isMetadataField
// java method aliases
@deprecated("use name", "5.0.0")
def getName: String = name
@deprecated("use value", "5.0.0")
def getValue: AnyRef = value
@deprecated("use values", "5.0.0")
def getValues: Seq[AnyRef] = values
}
| FabienPennequin/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/RichSearchHitField.scala | Scala | apache-2.0 | 697 |
/*
* Copyright 2015 herd contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.herd
import org.apache.spark.sql.SparkSession
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
class HerdOptionsSuite extends FunSuite with Matchers with BeforeAndAfterAll {
private var spark: SparkSession = _
override def beforeAll(): Unit = {
this.spark = SparkSession
.builder()
.appName("catalog-test")
.master("local[*]")
.getOrCreate()
}
override def afterAll(): Unit = {
this.spark.stop()
}
test("validate required params/param-combinations") {
// attempt to initialize with no namespace
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions - "namespace")(spark)
// attempt to initialize with no bDef
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions - "businessObjectName")(spark)
// attempt to initialize with subPartitions but no primary partition
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions - "partitionValue")(spark)
// attempt to initialize with subPartitionKeys but no subPartitionValues
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions - "subPartitionValues")(spark)
// attempt to initialize with subPartitionValues but no subPartitionKeys
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions - "subPartitionKeys")(spark)
}
test("validate default params") {
// initialize with no dataProvider and verify default value
val herdOptionsNoDataProvider: HerdOptions = HerdOptions(getDefaultHerdOptions - "dataProvider")(spark)
assert(!herdOptionsNoDataProvider.dataProvider.isEmpty, "dataProvider should have a default value")
herdOptionsNoDataProvider.dataProvider shouldBe "FINRA"
// initialize with no formatUsage and verify default value
val herdOptionsNoUsage: HerdOptions = HerdOptions(getDefaultHerdOptions - "businessObjectFormatUsage")(spark)
assert(!herdOptionsNoUsage.formatUsage.isEmpty, "businessObjectFormatUsage should have a default value")
herdOptionsNoUsage.formatUsage shouldBe "PRC"
// initialize with no delimiter and verify default value
val herdOptionsNoDelimiter: HerdOptions = HerdOptions(getDefaultHerdOptions - "delimiter")(spark)
assert(!herdOptionsNoDelimiter.delimiter.isEmpty, "delimiter should have a default value")
herdOptionsNoDelimiter.delimiter shouldBe ","
// initialize with no nullValue and verify default value
val herdOptionsNoNullValue: HerdOptions = HerdOptions(getDefaultHerdOptions - "nullValue")(spark)
assert(herdOptionsNoNullValue.nullValue != null, "nullValue should not be null")
herdOptionsNoNullValue.nullValue shouldBe ""
val herdOptionsNoStorage: HerdOptions = HerdOptions(getDefaultHerdOptions - "storage")(spark)
assert(!herdOptionsNoStorage.storageName.isEmpty, "storage should have a default value")
herdOptionsNoStorage.storageName shouldBe "S3_MANAGED"
val herdOptionsNoStoragePrefix: HerdOptions = HerdOptions(getDefaultHerdOptions - "storagePathPrefix")(spark)
assert(!herdOptionsNoStoragePrefix.storagePathPrefix.isEmpty, "storagePathPrefix should have a default value")
herdOptionsNoStoragePrefix.storagePathPrefix shouldBe "s3a://"
}
test("validate that subPartitionKeys/Values are valid that are read correctly") {
val herdOptions: HerdOptions = HerdOptions(getDefaultHerdOptions)(spark)
herdOptions.subPartitionKeys shouldBe Array[String]("BUSINESS_LOCATION", "BUSINESS_DIVISION")
val herdOptionsSubPartKeysTrimAndEmptyCheck = HerdOptions(getDefaultHerdOptions + ("subPartitionKeys" -> " a| b |"))(spark)
herdOptionsSubPartKeysTrimAndEmptyCheck.subPartitionKeys shouldBe Array[String]("a", "b")
val herdOptionsSubPartValuesTrimAndEmptyCheck = HerdOptions(getDefaultHerdOptions + ("subPartitionValues" -> " x| y |"))(spark)
herdOptionsSubPartValuesTrimAndEmptyCheck.subPartitions shouldBe Array[String]("x", "y")
an [IllegalArgumentException] should be thrownBy HerdOptions(getDefaultHerdOptions + ("subPartitionKeys" -> "a") + ("subPartitionValues" -> "x|y"))(spark)
}
test("validate partition value filter") {
val herdOptions: HerdOptions = HerdOptions(getDefaultHerdOptions + ("partitionFilter" -> "2019-01-02--2019-01-03"))(spark)
val herdOptionsPartitionValueList: HerdOptions = HerdOptions(getDefaultHerdOptions + ("partitionFilter" -> "2019-01-02,2019-01-03"))(spark)
herdOptions.partitionFilter shouldBe Some(PartitionRangeFilter("", ("2019-01-02", "2019-01-03")))
val herdOptionsPartitionValues: Option[PartitionValuesFilter] = herdOptionsPartitionValueList.partitionFilter.asInstanceOf[Option[PartitionValuesFilter]]
herdOptionsPartitionValues.get.values shouldBe Array[String]("2019-01-02", "2019-01-03")
}
def getDefaultHerdOptions: Map[String, String] = {
Map(
"namespace" -> "someNamespace",
"businessObjectName" -> "someBusinessObjectName",
"dataProvider" -> "someDataProvider",
"businessObjectFormatUsage" -> "someUsage",
"businessObjectFormatFileType" -> "someFileType",
"registerNewFormat" -> "false",
"delimiter" -> ",",
"nullValue" -> "\\\\N",
"escape" -> "\\\\",
"partitionKey" -> "date",
"partitionValue" -> "2020-01-01",
"partitionKeyGroup" -> "BUSINESS_CALENDAR",
"partitionFilter" -> "",
"subPartitionKeys" -> "BUSINESS_LOCATION|BUSINESS_DIVISION",
"subPartitionValues" -> "USA|HR",
"storage" -> "S3",
"storagePathPrefix" -> "s3a://"
)
}
}
| FINRAOS/herd | herd-code/herd-tools/herd-spark-data-source/src/test/scala/org/apache/spark/sql/herd/HerdOptionsSuite.scala | Scala | apache-2.0 | 6,210 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil.getContextOfType
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.InferUtil.SafeCheckException
import org.jetbrains.plugins.scala.lang.psi.api.base._
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScSuperReference, ScThisReference, ScUnderScoreSectionUtil}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{PsiTypeParameterExt, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypeParametersOwner
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.api.{InferUtil, ScalaElementVisitor}
import org.jetbrains.plugins.scala.lang.psi.impl.base.types.ScSimpleTypeElementImpl._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticClass
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility.Expression
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator._
import org.jetbrains.plugins.scala.lang.psi.types.api.{FunctionType, Nothing, TypeParameter, TypeParameterType}
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{Parameter, ScMethodType, ScTypePolymorphicType}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
import org.jetbrains.plugins.scala.macroAnnotations.{CachedWithRecursionGuard, ModCount}
import scala.collection.immutable.HashMap
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScSimpleTypeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScSimpleTypeElement {
protected def innerType(ctx: TypingContext): TypeResult[ScType] = innerNonValueType(ctx, inferValueType = true)
override def getTypeNoConstructor(ctx: TypingContext): TypeResult[ScType] = innerNonValueType(ctx, inferValueType = true, noConstructor = true)
@CachedWithRecursionGuard[ScSimpleTypeElement](this, Failure("Recursive non value type of type element", Some(this)),
ModCount.getBlockModificationCount)
override def getNonValueType(ctx: TypingContext, withUnnecessaryImplicitsUpdate: Boolean = false): TypeResult[ScType] =
innerNonValueType(ctx, inferValueType = false, withUnnecessaryImplicitsUpdate = withUnnecessaryImplicitsUpdate)
private def innerNonValueType(ctx: TypingContext, inferValueType: Boolean, noConstructor: Boolean = false, withUnnecessaryImplicitsUpdate: Boolean = false): TypeResult[ScType] = {
ProgressManager.checkCanceled()
def parametrise(tp: ScType, clazz: PsiClass, subst: ScSubstitutor): ScType = {
if (clazz.getTypeParameters.isEmpty) {
tp
} else {
ScParameterizedType(tp, clazz.getTypeParameters.map(TypeParameterType(_, Some(subst))))
}
}
def getConstructorParams(constr: PsiMethod, subst: ScSubstitutor): (Seq[Seq[Parameter]], Boolean) = {
constr match {
case fun: ScFunction =>
(fun.effectiveParameterClauses.map(_.effectiveParameters.map { p =>
val paramType: ScType = subst.subst(p.getType(TypingContext.empty).getOrAny)
new Parameter(p.name, p.deprecatedName, paramType, paramType, p.isDefaultParam,p.isRepeatedParameter,
p.isCallByNameParameter, p.index, Some(p), p.getDefaultExpression.flatMap(_.getType().toOption))
}),
fun.parameterList.clauses.lastOption.exists(_.isImplicit))
case f: ScPrimaryConstructor =>
(f.effectiveParameterClauses.map(_.effectiveParameters.map { p =>
val paramType: ScType = subst.subst(p.getType(TypingContext.empty).getOrAny)
new Parameter(p.name, p.deprecatedName, paramType, paramType, p.isDefaultParam, p.isRepeatedParameter,
p.isCallByNameParameter, p.index, Some(p), p.getDefaultExpression.flatMap(_.getType().toOption))
}),
f.parameterList.clauses.lastOption.exists(_.isImplicit))
case m: PsiMethod =>
(Seq(m.getParameterList.getParameters.map { p =>
new Parameter("", None, p.exactParamType(), false, p.isVarArgs, false, p.index)
}), false)
}
}
def updateImplicits(tp: ScType, withExpected: Boolean, params: Seq[Seq[Parameter]], lastImplicit: Boolean): ScType = {
if (lastImplicit) {
//Let's add implicit parameters
val newTp = tp match {
case ScTypePolymorphicType(i, p) =>
ScTypePolymorphicType(ScMethodType(i, params.last, isImplicit = true)(getProject, getResolveScope), p)
case _ => ScMethodType(tp, params.last, isImplicit = true)(getProject, getResolveScope)
}
val res = InferUtil.updateTypeWithImplicitParameters(newTp, this, None, withExpected, fullInfo = false)
implicitParameters = res._2
res._1
} else tp
}
def typeForConstructor(ref: ScStableCodeReferenceElement, constr: PsiMethod,
_subst: ScSubstitutor, parentElement: PsiNamedElement): ScType = {
val clazz = constr.containingClass
val (constrTypParameters: Seq[ScTypeParam], constrSubst: ScSubstitutor) = parentElement match {
case ta: ScTypeAliasDefinition => (Seq.empty, ScSubstitutor.empty)
case s: ScTypeParametersOwner if s.typeParameters.nonEmpty =>
constr match {
case method: ScMethodLike =>
val params = method.getConstructorTypeParameters.map(_.typeParameters).getOrElse(Seq.empty)
val subst = new ScSubstitutor(s.typeParameters.zip(params).map {
case (tpClass: ScTypeParam, tpConstr: ScTypeParam) =>
(tpClass.nameAndId, TypeParameterType(tpConstr))
}.toMap, Map.empty, None)
(params, subst)
case _ => (Seq.empty, ScSubstitutor.empty)
}
case _ => (Seq.empty, ScSubstitutor.empty)
}
val subst = _subst followed constrSubst
val tp = parentElement match {
case ta: ScTypeAliasDefinition =>
ta.aliasedType.getOrElse(return Nothing)
case _ =>
parametrise(calculateReferenceType(ref).
getOrElse(return Nothing), clazz, subst)
}
val res = subst.subst(tp)
val (params: Seq[Seq[Parameter]], lastImplicit: Boolean) = getConstructorParams(constr, subst)
val typeParameters: Seq[TypeParameter] = parentElement match {
case tp: ScTypeParametersOwner if constrTypParameters.nonEmpty =>
constrTypParameters.map(TypeParameter(_))
case tp: ScTypeParametersOwner if tp.typeParameters.nonEmpty =>
tp.typeParameters.map(TypeParameter(_))
case ptp: PsiTypeParameterListOwner if ptp.getTypeParameters.nonEmpty =>
ptp.getTypeParameters.toSeq.map(TypeParameter(_))
case _ =>
updateImplicits(tp, withExpected = false, params = params, lastImplicit = lastImplicit)
return res
}
getContext match {
case p: ScParameterizedTypeElement =>
val zipped = p.typeArgList.typeArgs.zip(typeParameters)
val appSubst = new ScSubstitutor(new HashMap[(String, PsiElement), ScType] ++ zipped.map{
case (arg, typeParam) =>
(typeParam.nameAndId, arg.getType(TypingContext.empty).getOrAny)
}, Map.empty, None)
val newRes = appSubst.subst(res)
updateImplicits(newRes, withExpected = false, params = params, lastImplicit = lastImplicit)
return newRes
case _ =>
}
findConstructor match {
case Some(c) =>
var nonValueType = ScTypePolymorphicType(res, typeParameters)
var i = 0
//We need to update type info for generics in the following order:
//1. All clauses without last params clause or last arguments clause
//2. According to expected type
//3. Last argument clause
//4. Implicit clauses if applicable
//5. In case of SafeCheckException return to 3 to complete update without expected type
while (i < params.length - 1 && i < c.arguments.length - 1) {
nonValueType = InferUtil.localTypeInference(nonValueType.internalType, params(i),
c.arguments(i).exprs.map(new Expression(_)), nonValueType.typeParameters)
i += 1
}
def lastClause(withExpected: Boolean) {
c.expectedType match {
case Some(expected) if withExpected =>
def updateRes(expected: ScType) {
nonValueType = InferUtil.localTypeInference(nonValueType.internalType,
Seq(new Parameter("", None, expected, false, false, false, 0)),
Seq(new Expression(InferUtil.undefineSubstitutor(nonValueType.typeParameters).subst(res.inferValueType))),
nonValueType.typeParameters, shouldUndefineParameters = false, filterTypeParams = false) //here should work in different way:
}
val fromUnderscore = c.newTemplate match {
case Some(n) => ScUnderScoreSectionUtil.underscores(n).nonEmpty
case None => false
}
if (!fromUnderscore) {
updateRes(expected)
} else {
expected match {
case FunctionType(retType, _) => updateRes(retType)
case _ => //do not update res, we haven't expected type
}
}
case _ =>
}
//last clause after expected types
if (i < params.length && i < c.arguments.length) {
nonValueType = InferUtil.localTypeInference(nonValueType.internalType, params(i),
c.arguments(i).exprs.map(new Expression(_)), nonValueType.typeParameters, safeCheck = withExpected)
i += 1
}
if (lastImplicit && i < params.length) {
//Let's add implicit parameters
updateImplicits(nonValueType, withExpected, params, lastImplicit) match {
case t: ScTypePolymorphicType => nonValueType = t
case _ =>
}
}
}
val oldNonValueType = nonValueType
try {
lastClause(withExpected = true)
} catch {
case e: SafeCheckException =>
nonValueType = oldNonValueType
lastClause(withExpected = false)
}
if (inferValueType) {
val pts = nonValueType match {
case t: ScTypePolymorphicType => t.polymorphicTypeSubstitutor
case _ => ScSubstitutor.empty
}
pts.subst(nonValueType.internalType)
} else nonValueType
case None => res
}
}
reference match {
case Some(ref) =>
def updateForParameterized(subst: ScSubstitutor, elem: PsiNamedElement,
p: ScParameterizedTypeElement): (ScType, ScSubstitutor) = {
val tp = elem match {
case ta: ScTypeAliasDefinition =>
ta.aliasedType.getOrElse(return (Nothing, ScSubstitutor.empty))
case clazz: PsiClass =>
parametrise(calculateReferenceType(ref).
getOrElse(return (Nothing, ScSubstitutor.empty)), clazz, subst)
}
val res = subst.subst(tp)
val typeParameters: Seq[TypeParameter] = elem match {
case tp: ScTypeParametersOwner if tp.typeParameters.nonEmpty =>
tp.typeParameters.map(TypeParameter(_))
case ptp: PsiTypeParameterListOwner if ptp.getTypeParameters.nonEmpty =>
ptp.getTypeParameters.toSeq.map(TypeParameter(_))
case _ => return (res, ScSubstitutor.empty)
}
val zipped = p.typeArgList.typeArgs.zip(typeParameters)
val appSubst = new ScSubstitutor(new HashMap[(String, PsiElement), ScType] ++ zipped.map {
case (arg, typeParam) =>
(typeParam.nameAndId, arg.getType(TypingContext.empty).getOrAny)
}, Map.empty, None)
(appSubst.subst(res), appSubst)
}
val constrRef = ref.isConstructorReference && !noConstructor
def updateImplicitsWithoutLocalTypeInference(r: TypeResult[ScType], ss: ScSubstitutor): TypeResult[ScType] = {
if (withUnnecessaryImplicitsUpdate) {
r.map {
tp =>
ref.bind() match {
case Some(r@ScalaResolveResult(method: PsiMethod, subst: ScSubstitutor)) =>
val (params, lastImplicit) = getConstructorParams(method, subst.followed(ss))
updateImplicits(tp, withExpected = false, params = params, lastImplicit = lastImplicit)
tp
case _ => tp
}
}
} else r
}
ref.resolveNoConstructor match {
case Array(ScalaResolveResult(psiTypeParameter: PsiTypeParameter, _)) =>
this.success(TypeParameterType(psiTypeParameter, None))
case Array(ScalaResolveResult(tvar: ScTypeVariableTypeElement, _)) =>
this.success(tvar.getType().getOrAny)
case Array(ScalaResolveResult(synth: ScSyntheticClass, _)) =>
this.success(synth.t)
case Array(ScalaResolveResult(to: ScTypeParametersOwner, subst: ScSubstitutor))
if constrRef && to.isInstanceOf[PsiNamedElement] &&
(to.typeParameters.isEmpty || getContext.isInstanceOf[ScParameterizedTypeElement]) =>
val (tp, ss) = getContext match {
case p: ScParameterizedTypeElement if !to.isInstanceOf[ScTypeAliasDeclaration] =>
val (parameterized, ss) = updateForParameterized(subst, to.asInstanceOf[PsiNamedElement], p)
(this.success(parameterized), ss)
case _ =>
(calculateReferenceType(ref), ScSubstitutor.empty)
}
updateImplicitsWithoutLocalTypeInference(tp, ss)
case Array(ScalaResolveResult(to: PsiTypeParameterListOwner, subst: ScSubstitutor))
if constrRef && to.isInstanceOf[PsiNamedElement] &&
(to.getTypeParameters.isEmpty || getContext.isInstanceOf[ScParameterizedTypeElement]) =>
val (result, ss) = getContext match {
case p: ScParameterizedTypeElement if !to.isInstanceOf[ScTypeAliasDeclaration] =>
val (parameterized, ss) = updateForParameterized(subst, to.asInstanceOf[PsiNamedElement], p)
(this.success(parameterized), ss)
case _ =>
(calculateReferenceType(ref), ScSubstitutor.empty)
}
updateImplicitsWithoutLocalTypeInference(result, ss)
case _ => //resolve constructor with local type inference
ref.bind() match {
case Some(r@ScalaResolveResult(method: PsiMethod, subst: ScSubstitutor)) if !noConstructor =>
this.success(typeForConstructor(ref, method, subst, r.getActualElement))
case Some(r@ScalaResolveResult(ta: ScTypeAlias, subst: ScSubstitutor)) if ta.isExistentialTypeAlias =>
this.success(ScExistentialArgument(ta.name, ta.typeParameters.map(TypeParameterType(_, None)).toList,
ta.lowerBound.getOrNothing, ta.upperBound.getOrAny))
case _ => calculateReferenceType(ref, shapesOnly = false)
}
}
case None => pathElement match {
case ref: ScStableCodeReferenceElement => calculateReferenceType(ref)
case thisRef: ScThisReference => fromThisReference(thisRef, ScThisType)()
case superRef: ScSuperReference => fromSuperReference(superRef, ScThisType)()
}
}
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitSimpleTypeElement(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitSimpleTypeElement(this)
case _ => super.accept(visitor)
}
}
}
object ScSimpleTypeElementImpl {
def calculateReferenceType(ref: ScStableCodeReferenceElement, shapesOnly: Boolean = false): TypeResult[ScType] = {
val (resolvedElement, fromType) = (if (!shapesOnly) {
if (ref.isConstructorReference) {
ref.resolveNoConstructor match {
case Array(r@ScalaResolveResult(to: ScTypeParametersOwner, subst: ScSubstitutor))
if to.isInstanceOf[PsiNamedElement] &&
(to.typeParameters.isEmpty || ref.getContext.isInstanceOf[ScParameterizedTypeElement]) => Some(r)
case Array(r@ScalaResolveResult(to: PsiTypeParameterListOwner, subst: ScSubstitutor))
if to.isInstanceOf[PsiNamedElement] &&
(to.getTypeParameters.isEmpty || ref.getContext.isInstanceOf[ScParameterizedTypeElement]) => Some(r)
case _ => ref.bind()
}
} else ref.bind()
} else {
ref.shapeResolve match {
case Array(r: ScalaResolveResult) => Some(r)
case _ => None
}
}) match {
case Some(r@ScalaResolveResult(n: PsiMethod, _)) if n.isConstructor =>
(n.containingClass, r.fromType)
case Some(r@ScalaResolveResult(n: PsiNamedElement, _)) => (n, r.fromType)
case _ => return Failure("Cannot resolve reference", Some(ref))
}
def makeProjection(`type`: ScType, superReference: Boolean = false) =
ScProjectionType(`type`, resolvedElement, superReference = superReference)
ref.qualifier match {
case Some(qualifier) =>
val result = qualifier.resolve() match {
case pack: PsiPackage =>
Option(getContextOfType(resolvedElement, classOf[ScObject])) match {
case Some(obj) if obj.isPackageObject =>
makeProjection(ScDesignatorType(obj))
case _ => fromType match {
case Some(designator@ScDesignatorType(obj: ScObject)) if obj.isPackageObject =>
makeProjection(designator)
case _ => ScalaType.designator(resolvedElement)
}
}
case _ =>
calculateReferenceType(qualifier, shapesOnly) match {
case Success(tp, _) => makeProjection(tp)
case failure: Failure => return failure
}
}
Success(result, Some(ref))
case _ =>
ref.pathQualifier match {
case Some(thisRef: ScThisReference) =>
fromThisReference(thisRef, template => makeProjection(ScThisType(template)))(ref)
case Some(superRef: ScSuperReference) =>
fromSuperReference(superRef, template => makeProjection(ScThisType(template), resolvedElement.isInstanceOf[PsiClass]))(ref)
case _ =>
val result = resolvedElement match {
case self: ScSelfTypeElement =>
ScThisType(getContextOfType(self, classOf[ScTemplateDefinition]))
case _ => fromType match {
case Some(tp) => makeProjection(tp)
case _ => ScalaType.designator(resolvedElement)
}
}
Success(result, Some(ref))
}
}
}
private def fromTemplate(maybeTemplate: Option[ScTemplateDefinition],
message: String,
path: ScPathElement,
function: ScTemplateDefinition => ScType) = {
val element = Some(path)
maybeTemplate match {
case Some(template) => Success(function(template), element)
case _ => Failure(message, element)
}
}
private def fromThisReference(thisReference: ScThisReference,
function: ScTemplateDefinition => ScType)
(path: ScPathElement = thisReference) =
fromTemplate(thisReference.refTemplate,
"Cannot find template for this reference",
path,
function)
private def fromSuperReference(superReference: ScSuperReference,
function: ScTemplateDefinition => ScType)
(path: ScPathElement = superReference) =
fromTemplate(superReference.drvTemplate,
"Cannot find enclosing container",
path,
function)
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScSimpleTypeElementImpl.scala | Scala | apache-2.0 | 20,747 |
package io.byzantium
/**
* @author Scott Mansfield
*/
package object importers {
}
| ScottMansfield/byzantium | src/main/scala/io/byzantium/importers/package.scala | Scala | apache-2.0 | 89 |
package org.dsa.iot.ignition.spark
import com.ignition.frame.ReduceOp.ReduceOp
import com.ignition.frame.SparkRuntime
import org.dsa.iot.scala.Having
/**
* Performs reduceByKey() function by grouping the rows by the selected key first, and then
* applying a list of reduce functions to the specified data columns.
*/
class Reduce(implicit rt: SparkRuntime) extends RxFrameTransformer {
def columns(values: (String, ReduceOp)*): Reduce = this having (columns <~ values)
def add(tuple: (String, ReduceOp)): Reduce = this having (columns.add <~ tuple)
def %(tuple: (String, ReduceOp)): Reduce = add(tuple)
def add(name: String, value: ReduceOp): Reduce = add(name -> value)
def %(name: String, value: ReduceOp): Reduce = add(name, value)
def groupBy(fields: String*): Reduce = this having (groupBy <~ fields.toList)
val columns = PortList[(String, ReduceOp)]("columns")
val groupBy = Port[List[String]]("groupBy")
protected def compute = (groupBy.in combineLatest columns.combinedIns) flatMap {
case (grp, flds) => doTransform(com.ignition.frame.Reduce(flds, grp))
}
}
/**
* Factory for [[Reduce]] instances.
*/
object Reduce {
/**
* Creates a new Reduce instance.
*/
def apply()(implicit rt: SparkRuntime): Reduce = new Reduce groupBy ()
} | IOT-DSA/dslink-scala-ignition | src/main/scala/org/dsa/iot/ignition/spark/Reduce.scala | Scala | apache-2.0 | 1,287 |
import leon.lang._
object Overflow4 {
def foo4(x: Int): Int = {
x - (-1)
}
}
| epfl-lara/leon | src/test/resources/regression/verification/overflow/invalid/Overflow4.scala | Scala | gpl-3.0 | 88 |
package com.softwaremill.codebrag.rest
import com.softwaremill.codebrag.CodebragServletSpec
import com.softwaremill.codebrag.domain.UserWatchedBranch
import com.softwaremill.codebrag.service.user.RegisterService
import com.softwaremill.codebrag.usecases.branches.WatchedBranchForm
import com.softwaremill.codebrag.usecases.registration.{ListRepoBranchesAfterRegistration, ListRepositoriesAfterRegistration, UnwatchBranchAfterRegistration, WatchBranchAfterRegistration}
import com.softwaremill.codebrag.usecases.user.{RegisterNewUserUseCase, RegisteredUser, RegistrationForm}
import com.softwaremill.codebrag.web.CodebragSpecificJSONFormats.SimpleObjectIdSerializer
import org.bson.types.ObjectId
import org.json4s.JsonDSL._
import org.mockito.Mockito._
import org.scalatest.BeforeAndAfterEach
class RegistrationServletSpec extends CodebragServletSpec with BeforeAndAfterEach {
val registerNewUser = mock[RegisterNewUserUseCase]
val registerService = mock[RegisterService]
val listRepos = mock[ListRepositoriesAfterRegistration]
val listRepoBranches = mock[ListRepoBranchesAfterRegistration]
val watchBranch = mock[WatchBranchAfterRegistration]
val unwatchBranch = mock[UnwatchBranchAfterRegistration]
val servlet = new RegistrationServlet(registerService, registerNewUser, listRepos, listRepoBranches, watchBranch, unwatchBranch)
override def beforeEach {
reset(registerNewUser, registerService, watchBranch, unwatchBranch)
}
"GET /first-registration" should "return firstRegistration flag" in {
//given
addServlet(servlet, "/*")
when(registerService.isFirstRegistration).thenReturn(true)
//when
get("/first-registration") {
//then
status should be(200)
body should be(asJson(Map("firstRegistration" -> true)))
}
}
"POST /signup" should "call the register service and return 200 if registration is successful" in {
addServlet(servlet, "/*")
val newUser = RegistrationForm("adamw", "[email protected]", "123456", "code")
val registered = RegisteredUser(newUser.toUser)
when(registerNewUser.execute(newUser)).thenReturn(Right(registered))
post("/signup",
mapToJson(Map("login" -> "adamw", "email" -> "[email protected]", "password" -> "123456", "invitationCode" -> "code")),
defaultJsonHeaders) {
status should be(200)
}
}
"POST /signup" should "call the register service and return 403 if registration is unsuccessful" in {
addServlet(servlet, "/*")
val newUser = RegistrationForm("adamw", "[email protected]", "123456", "code")
when(registerNewUser.execute(newUser)).thenReturn(Left(Map.empty[String, Seq[String]]))
post("/signup",
mapToJson(Map("login" -> "adamw", "email" -> "[email protected]", "password" -> "123456", "invitationCode" -> "code")),
defaultJsonHeaders) {
status should be(403)
}
}
"POST /signup" should "fallback to empty registration code when one not provided in request" in {
addServlet(servlet, "/*")
val newUser = RegistrationForm("adamw", "[email protected]", "123456", "")
val registered = RegisteredUser(newUser.toUser)
when(registerNewUser.execute(newUser)).thenReturn(Right(registered))
post("/signup",
mapToJson(Map("login" -> "adamw", "email" -> "[email protected]", "password" -> "123456")), defaultJsonHeaders) {
verify(registerNewUser).execute(newUser)
}
}
"POST /repos/repo/branches/branch/watch" should "setup branch as watched" in {
addServlet(servlet, "/*")
val invCode = "123abc"
val repo = "codebrag"
val branch = "git/flow/style/branch"
val userId = new ObjectId
val form = WatchedBranchForm(repo, branch)
val watched = UserWatchedBranch(new ObjectId, userId, repo, branch)
when(watchBranch.execute(invCode, userId, form)).thenReturn(Right(watched))
post(s"/repos/codebrag/branches/git/flow/style/branch/watch?invitationCode=123abc&userId=${userId.toString}") {
status should be(200)
body should be(asJson(watched))
}
}
"DELETE /repos/repo/branches/branch/watch" should "setup branch as watched" in {
addServlet(servlet, "/*")
val invCode = "123abc"
val repo = "codebrag"
val branch = "git/flow/style/branch"
val userId = new ObjectId
val form = WatchedBranchForm(repo, branch)
val watched = UserWatchedBranch(new ObjectId, userId, repo, branch)
when(unwatchBranch.execute(invCode, userId, form)).thenReturn(Right())
delete(s"/repos/codebrag/branches/git/flow/style/branch/watch?invitationCode=123abc&userId=${userId.toString}") {
status should be(200)
}
}
private def asJson[T <: AnyRef](obj: T) = {
import org.json4s._
import org.json4s.jackson.Serialization.write
implicit val formats = DefaultFormats + SimpleObjectIdSerializer
write(obj)
}
}
| frodejohansen/codebrag | codebrag-rest/src/test/scala/com/softwaremill/codebrag/rest/RegistrationServletSpec.scala | Scala | agpl-3.0 | 4,823 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.optim
import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Precision Recall Area Under Curve will compute the precision-recall pairs and
* get the area under the curve.
*
* Note: It will gather all output probabilities and targets to driver and will compute the
* precision, recall and the auc every calling of `result()`
*
* @param ev tensor numeric environments
* @tparam T class tag for tensor numeric
*/
class PrecisionRecallAUC[T: ClassTag]()(implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity): ValidationResult = {
require(output.isTensor && target.isTensor, s"only support tensor output and tensor target")
require(!output.toTensor.isEmpty && !target.toTensor.isEmpty,
s"the output and target should not be empty")
val array = List(output, target).map(_.toTensor[Float].storage().array())
val results = array.head.zip(array.last).toArray
new PRAUCResult(results)
}
override protected def format(): String = s"PrecisionRecallAUC"
}
class PRAUCResult(val results: Array[(Float, Float)]) extends ValidationResult {
override def result(): (Float, Int) = {
val sorted = results.sortBy(_._1).reverse
val totalPositive = sorted.count(_._2 == 1)
var truePositive = 0.0f
var falsePositive = 0.0f
var areaUnderCurve = 0.0f
var prevPrecision = 1.0f
var prevRecall = 0.0f
var i = 0
while (truePositive != totalPositive) {
val target = sorted(i)._2
if (target == 1.0f) {
truePositive += 1
} else {
falsePositive += 1
}
val precision = truePositive / (truePositive + falsePositive)
val recall = truePositive / totalPositive
areaUnderCurve += (recall - prevRecall) * (precision + prevPrecision)
prevRecall = recall
prevPrecision = precision
i += 1
}
(areaUnderCurve / 2, results.length)
}
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
new PRAUCResult(results ++ other.asInstanceOf[PRAUCResult].results)
}
// scalastyle:on
override protected def format(): String = {
val getResult = result()
s"Precision Recall AUC is ${getResult._1} on ${getResult._2}"
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/optim/PrecisionRecallAUC.scala | Scala | apache-2.0 | 3,031 |
import akka.actor.{ActorSystem, Props}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
/**
* Created by huanwuji on 2017/3/26.
*/
object FutureExample extends App {
val system = ActorSystem()
system.actorOf(Props().withDispatcher("block-dispatcher"))
def futureTest(): Unit = {
val fu = Future {
1
}
fu.onComplete {
case Success(value) ⇒
case Failure(ex) ⇒
}
}
}
| huanwuji/akka-stream-in-action | src/main/scala/FutureExample.scala | Scala | apache-2.0 | 493 |
/*
*
* o o o o o
* | o | |\\ /| | /
* | o-o o--o o-o oo | | O | oo o-o OO o-o o o
* | | | | | | | | | | | | | | | | \\ | | \\ /
* O---oo-o o--O | o-o o-o-o o o o-o-o o o o-o o
* |
* o--o
* o--o o o--o o o
* | | | | o | |
* O-Oo oo o-o o-O o-o o-O-o O-o o-o | o-O o-o
* | \\ | | | | | | | | | | | | | |-' | | | \\
* o o o-o-o o o-o o-o o o o o | o-o o o-o o-o
*
* Logical Markov Random Fields (LoMRF).
*
*
*/
package lomrf.util
import java.util
/**
* Array utilities.
*/
object ArrayUtils {
/**
* Specialized hash code calculation for arbitrary array type.
*
* @return the hash code for any array type
*/
def hashCodeOf(array: Array[_]): Int = array match {
case x: Array[Char] => util.Arrays.hashCode(x)
case x: Array[Byte] => util.Arrays.hashCode(x)
case x: Array[Short] => util.Arrays.hashCode(x)
case x: Array[Int] => util.Arrays.hashCode(x)
case x: Array[Boolean] => util.Arrays.hashCode(x)
case x: Array[Float] => util.Arrays.hashCode(x)
case x: Array[Long] => util.Arrays.hashCode(x)
case x: Array[Double] => util.Arrays.hashCode(x)
case x: Array[_] => util.Arrays.hashCode(x.asInstanceOf[Array[AnyRef]])
case _ => throw new RuntimeException("possible bug?")
}
/**
* Specialized equality between arrays for arbitrary types. It checks
* if the arrays have the same length and contain elements from the
* same class type.
*
* @param array1 one array
* @param array2 another array
*
* @return true if arrays are equal, false otherwise
*/
def equals(array1: Array[_], array2: Array[_]): Boolean = {
// length checking
if (array1.length != array2.length) return false
val classOfArray1 = array1.getClass
val classOfArray2 = array2.getClass
// class type checking
if (classOfArray1 == classOfArray2) array1 match {
case x: Array[Char] => util.Arrays.equals(array1.asInstanceOf[Array[Char]], array2.asInstanceOf[Array[Char]])
case x: Array[Byte] => util.Arrays.equals(array1.asInstanceOf[Array[Byte]], array2.asInstanceOf[Array[Byte]])
case x: Array[Short] => util.Arrays.equals(array1.asInstanceOf[Array[Short]], array2.asInstanceOf[Array[Short]])
case x: Array[Int] => util.Arrays.equals(array1.asInstanceOf[Array[Int]], array2.asInstanceOf[Array[Int]])
case x: Array[Boolean] => util.Arrays.equals(array1.asInstanceOf[Array[Boolean]], array2.asInstanceOf[Array[Boolean]])
case x: Array[Float] => util.Arrays.equals(array1.asInstanceOf[Array[Float]], array2.asInstanceOf[Array[Float]])
case x: Array[Long] => util.Arrays.equals(array1.asInstanceOf[Array[Long]], array2.asInstanceOf[Array[Long]])
case x: Array[Double] => util.Arrays.equals(array1.asInstanceOf[Array[Double]], array2.asInstanceOf[Array[Double]])
case x: Array[_] => util.Arrays.equals(array1.asInstanceOf[Array[AnyRef]], array2.asInstanceOf[Array[AnyRef]])
case _ => throw new RuntimeException("possible bug?")
}
else false
}
}
| anskarl/LoMRF | src/main/scala/lomrf/util/ArrayUtils.scala | Scala | apache-2.0 | 3,357 |
package com.thoughtworks.datacommons.prepbuddy.normalizers
import com.thoughtworks.datacommons.prepbuddy.rdds.TransformableRDD
import org.apache.spark.rdd.RDD
/**
* A normalizer strategy which normalizes the data by multiplying it to pow(10,i-1).
* where i is the length of the number.
*/
class DecimalScalingNormalizer extends NormalizationStrategy {
private var length = 0
override def prepare(transformableRDD: TransformableRDD, columnIndex: Int): Unit = {
val doubleRDD: RDD[Double] = transformableRDD.toDoubleRDD(columnIndex)
length = String.valueOf(doubleRDD.max().intValue()).length()
}
override def normalize(rawValue: String): String = {
String.valueOf(rawValue.toDouble / Math.pow(10, length - 1))
}
}
| data-commons/prep-buddy | src/main/scala/com/thoughtworks/datacommons/prepbuddy/normalizers/DecimalScalingNormalizer.scala | Scala | apache-2.0 | 779 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.json4s.{DefaultFormats, JObject}
import org.json4s.JsonDSL._
import org.apache.spark.annotation.Since
import org.apache.spark.internal.Logging
import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{DenseVector, SparseVector, Vector, Vectors}
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.regression.DecisionTreeRegressionModel
import org.apache.spark.ml.tree._
import org.apache.spark.ml.tree.impl.GradientBoostedTrees
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.DefaultParamsReader.Metadata
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTModel}
import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._
/**
* Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting)
* learning algorithm for classification.
* It supports binary labels, as well as both continuous and categorical features.
*
* The implementation is based upon: J.H. Friedman. "Stochastic Gradient Boosting." 1999.
*
* Notes on Gradient Boosting vs. TreeBoost:
* - This implementation is for Stochastic Gradient Boosting, not for TreeBoost.
* - Both algorithms learn tree ensembles by minimizing loss functions.
* - TreeBoost (Friedman, 1999) additionally modifies the outputs at tree leaf nodes
* based on the loss function, whereas the original gradient boosting method does not.
* - We expect to implement TreeBoost in the future:
* [https://issues.apache.org/jira/browse/SPARK-4240]
*
* @note Multiclass labels are not currently supported.
*/
@Since("1.4.0")
class GBTClassifier @Since("1.4.0") (
@Since("1.4.0") override val uid: String)
extends ProbabilisticClassifier[Vector, GBTClassifier, GBTClassificationModel]
with GBTClassifierParams with DefaultParamsWritable with Logging {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("gbtc"))
// Override parameter setters from parent trait for Java API compatibility.
// Parameters from TreeClassifierParams:
/** @group setParam */
@Since("1.4.0")
override def setMaxDepth(value: Int): this.type = set(maxDepth, value)
/** @group setParam */
@Since("1.4.0")
override def setMaxBins(value: Int): this.type = set(maxBins, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value)
/** @group setParam */
@Since("1.4.0")
override def setMinInfoGain(value: Double): this.type = set(minInfoGain, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value)
/** @group expertSetParam */
@Since("1.4.0")
override def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value)
/**
* Specifies how often to checkpoint the cached node IDs.
* E.g. 10 means that the cache will get checkpointed every 10 iterations.
* This is only used if cacheNodeIds is true and if the checkpoint directory is set in
* [[org.apache.spark.SparkContext]].
* Must be at least 1.
* (default = 10)
* @group setParam
*/
@Since("1.4.0")
override def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/**
* The impurity setting is ignored for GBT models.
* Individual trees are built using impurity "Variance."
*
* @group setParam
*/
@Since("1.4.0")
override def setImpurity(value: String): this.type = {
logWarning("GBTClassifier.setImpurity should NOT be used")
this
}
// Parameters from TreeEnsembleParams:
/** @group setParam */
@Since("1.4.0")
override def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value)
/** @group setParam */
@Since("1.4.0")
override def setSeed(value: Long): this.type = set(seed, value)
// Parameters from GBTParams:
/** @group setParam */
@Since("1.4.0")
override def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("1.4.0")
override def setStepSize(value: Double): this.type = set(stepSize, value)
/** @group setParam */
@Since("2.3.0")
override def setFeatureSubsetStrategy(value: String): this.type =
set(featureSubsetStrategy, value)
// Parameters from GBTClassifierParams:
/** @group setParam */
@Since("1.4.0")
def setLossType(value: String): this.type = set(lossType, value)
/** @group setParam */
@Since("2.4.0")
def setValidationIndicatorCol(value: String): this.type = {
set(validationIndicatorCol, value)
}
override protected def train(
dataset: Dataset[_]): GBTClassificationModel = instrumented { instr =>
val categoricalFeatures: Map[Int, Int] =
MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
val withValidation = isDefined(validationIndicatorCol) && $(validationIndicatorCol).nonEmpty
// We copy and modify this from Classifier.extractLabeledPoints since GBT only supports
// 2 classes now. This lets us provide a more precise error message.
val convert2LabeledPoint = (dataset: Dataset[_]) => {
dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) =>
require(label == 0 || label == 1, s"GBTClassifier was given" +
s" dataset with invalid label $label. Labels must be in {0,1}; note that" +
s" GBTClassifier currently only supports binary classification.")
LabeledPoint(label, features)
}
}
val (trainDataset, validationDataset) = if (withValidation) {
(
convert2LabeledPoint(dataset.filter(not(col($(validationIndicatorCol))))),
convert2LabeledPoint(dataset.filter(col($(validationIndicatorCol))))
)
} else {
(convert2LabeledPoint(dataset), null)
}
val numFeatures = trainDataset.first().features.size
val boostingStrategy = super.getOldBoostingStrategy(categoricalFeatures, OldAlgo.Classification)
val numClasses = 2
if (isDefined(thresholds)) {
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".train() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, labelCol, featuresCol, predictionCol, impurity, lossType,
maxDepth, maxBins, maxIter, maxMemoryInMB, minInfoGain, minInstancesPerNode,
seed, stepSize, subsamplingRate, cacheNodeIds, checkpointInterval, featureSubsetStrategy,
validationIndicatorCol)
instr.logNumFeatures(numFeatures)
instr.logNumClasses(numClasses)
val (baseLearners, learnerWeights) = if (withValidation) {
GradientBoostedTrees.runWithValidation(trainDataset, validationDataset, boostingStrategy,
$(seed), $(featureSubsetStrategy))
} else {
GradientBoostedTrees.run(trainDataset, boostingStrategy, $(seed), $(featureSubsetStrategy))
}
new GBTClassificationModel(uid, baseLearners, learnerWeights, numFeatures)
}
@Since("1.4.1")
override def copy(extra: ParamMap): GBTClassifier = defaultCopy(extra)
}
@Since("1.4.0")
object GBTClassifier extends DefaultParamsReadable[GBTClassifier] {
/** Accessor for supported loss settings: logistic */
@Since("1.4.0")
final val supportedLossTypes: Array[String] = GBTClassifierParams.supportedLossTypes
@Since("2.0.0")
override def load(path: String): GBTClassifier = super.load(path)
}
/**
* Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting)
* model for classification.
* It supports binary labels, as well as both continuous and categorical features.
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
*
* @note Multiclass labels are not currently supported.
*/
@Since("1.6.0")
class GBTClassificationModel private[ml](
@Since("1.6.0") override val uid: String,
private val _trees: Array[DecisionTreeRegressionModel],
private val _treeWeights: Array[Double],
@Since("1.6.0") override val numFeatures: Int,
@Since("2.2.0") override val numClasses: Int)
extends ProbabilisticClassificationModel[Vector, GBTClassificationModel]
with GBTClassifierParams with TreeEnsembleModel[DecisionTreeRegressionModel]
with MLWritable with Serializable {
require(_trees.nonEmpty, "GBTClassificationModel requires at least 1 tree.")
require(_trees.length == _treeWeights.length, "GBTClassificationModel given trees, treeWeights" +
s" of non-matching lengths (${_trees.length}, ${_treeWeights.length}, respectively).")
/**
* Construct a GBTClassificationModel
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
* @param numFeatures The number of features.
*/
private[ml] def this(
uid: String,
_trees: Array[DecisionTreeRegressionModel],
_treeWeights: Array[Double],
numFeatures: Int) =
this(uid, _trees, _treeWeights, numFeatures, 2)
/**
* Construct a GBTClassificationModel
*
* @param _trees Decision trees in the ensemble.
* @param _treeWeights Weights for the decision trees in the ensemble.
*/
@Since("1.6.0")
def this(uid: String, _trees: Array[DecisionTreeRegressionModel], _treeWeights: Array[Double]) =
this(uid, _trees, _treeWeights, -1, 2)
@Since("1.4.0")
override def trees: Array[DecisionTreeRegressionModel] = _trees
/**
* Number of trees in ensemble
*/
@Since("2.0.0")
val getNumTrees: Int = trees.length
@Since("1.4.0")
override def treeWeights: Array[Double] = _treeWeights
override protected def transformImpl(dataset: Dataset[_]): DataFrame = {
val bcastModel = dataset.sparkSession.sparkContext.broadcast(this)
val predictUDF = udf { (features: Any) =>
bcastModel.value.predict(features.asInstanceOf[Vector])
}
dataset.withColumn($(predictionCol), predictUDF(col($(featuresCol))))
}
override def predict(features: Vector): Double = {
// If thresholds defined, use predictRaw to get probabilities, otherwise use optimization
if (isDefined(thresholds)) {
super.predict(features)
} else {
if (margin(features) > 0.0) 1.0 else 0.0
}
}
override protected def predictRaw(features: Vector): Vector = {
val prediction: Double = margin(features)
Vectors.dense(Array(-prediction, prediction))
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
dv.values(0) = loss.computeProbability(dv.values(0))
dv.values(1) = 1.0 - dv.values(0)
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in GBTClassificationModel:" +
" raw2probabilityInPlace encountered SparseVector")
}
}
/** Number of trees in ensemble */
val numTrees: Int = trees.length
@Since("1.4.0")
override def copy(extra: ParamMap): GBTClassificationModel = {
copyValues(new GBTClassificationModel(uid, _trees, _treeWeights, numFeatures, numClasses),
extra).setParent(parent)
}
@Since("1.4.0")
override def toString: String = {
s"GBTClassificationModel (uid=$uid) with $numTrees trees"
}
/**
* Estimate of the importance of each feature.
*
* Each feature's importance is the average of its importance across all trees in the ensemble
* The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
* (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
* and follows the implementation from scikit-learn.
* See `DecisionTreeClassificationModel.featureImportances`
*/
@Since("2.0.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures)
/** Raw prediction for the positive class. */
private def margin(features: Vector): Double = {
val treePredictions = _trees.map(_.rootNode.predictImpl(features).prediction)
blas.ddot(numTrees, treePredictions, 1, _treeWeights, 1)
}
/** (private[ml]) Convert to a model in the old API */
private[ml] def toOld: OldGBTModel = {
new OldGBTModel(OldAlgo.Classification, _trees.map(_.toOld), _treeWeights)
}
// hard coded loss, which is not meant to be changed in the model
private val loss = getOldLossType
/**
* Method to compute error or loss for every iteration of gradient boosting.
*
* @param dataset Dataset for validation.
*/
@Since("2.4.0")
def evaluateEachIteration(dataset: Dataset[_]): Array[Double] = {
val data = dataset.select(col($(labelCol)), col($(featuresCol))).rdd.map {
case Row(label: Double, features: Vector) => LabeledPoint(label, features)
}
GradientBoostedTrees.evaluateEachIteration(data, trees, treeWeights, loss,
OldAlgo.Classification
)
}
@Since("2.0.0")
override def write: MLWriter = new GBTClassificationModel.GBTClassificationModelWriter(this)
}
@Since("2.0.0")
object GBTClassificationModel extends MLReadable[GBTClassificationModel] {
private val numFeaturesKey: String = "numFeatures"
private val numTreesKey: String = "numTrees"
@Since("2.0.0")
override def read: MLReader[GBTClassificationModel] = new GBTClassificationModelReader
@Since("2.0.0")
override def load(path: String): GBTClassificationModel = super.load(path)
private[GBTClassificationModel]
class GBTClassificationModelWriter(instance: GBTClassificationModel) extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata: JObject = Map(
numFeaturesKey -> instance.numFeatures,
numTreesKey -> instance.getNumTrees)
EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata)
}
}
private class GBTClassificationModelReader extends MLReader[GBTClassificationModel] {
/** Checked against metadata when loading model */
private val className = classOf[GBTClassificationModel].getName
private val treeClassName = classOf[DecisionTreeRegressionModel].getName
override def load(path: String): GBTClassificationModel = {
implicit val format = DefaultFormats
val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) =
EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName)
val numFeatures = (metadata.metadata \\ numFeaturesKey).extract[Int]
val numTrees = (metadata.metadata \\ numTreesKey).extract[Int]
val trees: Array[DecisionTreeRegressionModel] = treesData.map {
case (treeMetadata, root) =>
val tree =
new DecisionTreeRegressionModel(treeMetadata.uid, root, numFeatures)
treeMetadata.getAndSetParams(tree)
tree
}
require(numTrees == trees.length, s"GBTClassificationModel.load expected $numTrees" +
s" trees based on metadata but found ${trees.length} trees.")
val model = new GBTClassificationModel(metadata.uid,
trees, treeWeights, numFeatures)
metadata.getAndSetParams(model)
model
}
}
/** Convert a model from the old API */
private[ml] def fromOld(
oldModel: OldGBTModel,
parent: GBTClassifier,
categoricalFeatures: Map[Int, Int],
numFeatures: Int = -1,
numClasses: Int = 2): GBTClassificationModel = {
require(oldModel.algo == OldAlgo.Classification, "Cannot convert GradientBoostedTreesModel" +
s" with algo=${oldModel.algo} (old API) to GBTClassificationModel (new API).")
val newTrees = oldModel.trees.map { tree =>
// parent for each tree is null since there is no good way to set this.
DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
}
val uid = if (parent != null) parent.uid else Identifiable.randomUID("gbtc")
new GBTClassificationModel(uid, newTrees, oldModel.treeWeights, numFeatures, numClasses)
}
}
| ahnqirage/spark | mllib/src/main/scala/org/apache/spark/ml/classification/GBTClassifier.scala | Scala | apache-2.0 | 17,176 |
package models.quiz
import com.artclod.slick.NumericBoolean
import models.support.HasOrder
import models.user.User
import models.{QuestionId, QuestionPartId, QuestionSectionId}
import play.twirl.api.Html
import models.quiz.UserConstant.EnhancedHtml
case class QuestionPartChoice(id: QuestionPartId, sectionId: QuestionSectionId, questionId: QuestionId, summaryRaw: String, summaryHtml: Html, correctChoice: Short, order: Short) extends HasOrder[QuestionPartChoice] {
def correct = NumericBoolean(correctChoice)
def fixConstants(user: User, userConstants: QuestionUserConstantsFrame) = this.copy(summaryHtml = summaryHtml.fixConstants(user, questionId, userConstants))
} | kristiankime/calc-tutor | app/models/quiz/QuestionPartChoice.scala | Scala | mit | 676 |
package org.scalex
package document
import scala._
object ScaladocUrl {
def apply(doc: Doc): Option[String] = doc match {
case x: Template ⇒ apply(x.member, x.typeParams)
case x: Def ⇒ apply(x.member, x.typeParams)
case x: Val ⇒ apply(x.member, Nil)
}
// http://www.scala-lang.org/api/2.10.3/index.html#
// scala.collection.immutable.List@
// orElse[A1<:Int,B1>:A](PartialFunction[A1,B1]):PartialFunction[A1,B1]
def apply(
member: Member,
typeParams: List[model.TypeParam]): Option[String] =
member.project.scaladocUrl map { base ⇒
val parent = member.parent.entity.qualifiedName
val name = member.entity.name
val sig = typeParams.shows
s"$base/index.html#$parent@$name$sig"
}
}
| kzys/scalex | src/main/scala/document/ScaladocUrl.scala | Scala | mit | 765 |
package com.github.mehmetakiftutuncu.errors
import com.github.mehmetakiftutuncu.errors.base.ErrorBase
/**
* A simple error implementation for really simple and trivial cases
*
* @param name Name of the error
*
* @author Mehmet Akif Tütüncü
*/
case class SimpleError(name: String) extends ErrorBase {
/**
* Represents this error as Json formatted String
*
* @param includeWhen If set to true, when value of the error will be included in the representation
*
* @return Representation of this error
*/
override def represent(includeWhen: Boolean): String = {
val nameRepresentation: String = s""""name":"${name.replaceAll("\\"", "\\\\\\\\\\"")}""""
val whenRepresentation: String = if (!includeWhen) "" else s""","when":$when"""
s"""{$nameRepresentation$whenRepresentation}"""
}
}
/** A container object for some predefined [[com.github.mehmetakiftutuncu.errors.SimpleError]]s */
object SimpleError {
def database: SimpleError = SimpleError("database")
def notFound: SimpleError = SimpleError("notFound")
def invalidData: SimpleError = SimpleError("invalidData")
def invalidRequest: SimpleError = SimpleError("invalidRequest")
def requestFailed: SimpleError = SimpleError("requestFailed")
def timeout: SimpleError = SimpleError("timeout")
def authorization: SimpleError = SimpleError("authorization")
def authentication: SimpleError = SimpleError("authentication")
}
| mehmetakiftutuncu/Errors | src/main/scala/com/github/mehmetakiftutuncu/errors/SimpleError.scala | Scala | mit | 1,459 |
/*
* Copyright 2015 Johannes Rudolph
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.virtualvoid.sbt.graph
package rendering
import util.AsciiTreeLayout
import util.ConsoleUtils._
object AsciiTree {
def asciiTree(graph: ModuleGraph): String = {
val deps = graph.dependencyMap
// there should only be one root node (the project itself)
val roots = graph.roots
roots.map { root ⇒
AsciiTreeLayout.toAscii[Module](root, node ⇒ deps.getOrElse(node.id, Seq.empty[Module]), displayModule)
}.mkString("\n")
}
def displayModule(module: Module): String =
red(module.id.idString +
module.extraInfo +
module.error.map(" (error: " + _ + ")").getOrElse("") +
module.evictedByVersion.map(_ formatted " (evicted by: %s)").getOrElse(""), module.hadError)
}
| jrudolph/sbt-dependency-graph | src/main/scala/net/virtualvoid/sbt/graph/rendering/AsciiTree.scala | Scala | apache-2.0 | 1,357 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package adapters
package registry
// Joda-Time
import org.joda.time.DateTime
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.json4s.scalaz.JsonScalaz._
// Snowplow
import loaders.{
CollectorApi,
CollectorSource,
CollectorContext,
CollectorPayload
}
import utils.ConversionUtils
import SpecHelpers._
// Specs2
import org.specs2.{Specification, ScalaCheck}
import org.specs2.matcher.DataTables
import org.specs2.scalaz.ValidationMatchers
class MailchimpAdapterSpec extends Specification with DataTables with ValidationMatchers with ScalaCheck { def is =
"This is a specification to test the MailchimpAdapter functionality" ^
p^
"toKeys should return a valid List of Keys from a string containing braces (or not)" ! e1^
"toNestedJField should return a valid JField nested to contain all keys and then the supplied value" ! e2^
"toJFields should return a valid list of JFields based on the Map supplied" ! e3^
"mergeJFields should return a correctly merged JSON which matches the expectation" ! e4^
"reformatParameters should return a parameter Map with correctly formatted values" ! e5^
"toRawEvents must return a Nel Success with a correctly formatted ue_pr json" ! e6^
"toRawEvents must return a Nel Success with a correctly merged and formatted ue_pr json" ! e7^
"toRawEvents must return a Nel Success for a supported event type" ! e8^
"toRawEvents must return a Nel Failure error for an unsupported event type" ! e9^
"toRawEvents must return a Nel Success containing an unsubscribe event and query string parameters" ! e10^
"toRawEvents must return a Nel Failure if the request body is missing" ! e11^
"toRawEvents must return a Nel Failure if the content type is missing" ! e12^
"toRawEvents must return a Nel Failure if the content type is incorrect" ! e13^
"toRawEvents must return a Nel Failure if the request body does not contain a type parameter" ! e14^
end
implicit val resolver = SpecHelpers.IgluResolver
object Shared {
val api = CollectorApi("com.mailchimp", "v1")
val cljSource = CollectorSource("clj-tomcat", "UTF-8", None)
val context = CollectorContext(DateTime.parse("2013-08-29T00:18:48.000+00:00"), "37.157.33.123".some, None, None, Nil, None)
}
val ContentType = "application/x-www-form-urlencoded"
def e1 = {
val keys = "data[merges][LNAME]"
val expected = NonEmptyList("data", "merges", "LNAME")
MailchimpAdapter.toKeys(keys) mustEqual expected
}
def e2 = {
val keys = NonEmptyList("data", "merges", "LNAME")
val value = "Beemster"
val expected = JField("data", JObject(List(("merges", JObject(List(("LNAME", JString("Beemster"))))))))
MailchimpAdapter.toNestedJField(keys, value) mustEqual expected
}
def e3 = {
val map = Map(
"data[merges][LNAME]" -> "Beemster",
"data[merges][FNAME]" -> "Joshua"
)
val expected = List(
JField("data", JObject(List(("merges", JObject(List(("LNAME", JString("Beemster")))))))),
JField("data", JObject(List(("merges", JObject(List(("FNAME", JString("Joshua"))))))))
)
MailchimpAdapter.toJFields(map) mustEqual expected
}
def e4 = {
val a = JField("l1", JField("l2", JField("l3", JField("str", "hi"))))
val b = JField("l1", JField("l2", JField("l3", JField("num", 42))))
val expected = JObject(List(("l1", JObject(List(("l2", JObject(List(("l3", JObject(List(
("str", JString("hi")),
("num", JInt(42))
)))))))))))
MailchimpAdapter.mergeJFields(List(a, b)) mustEqual expected
}
def e5 =
"SPEC NAME" || "PARAMS" | "EXPECTED OUTPUT" |
"Return Updated Params" !! Map("type" -> "subscribe", "fired_at" -> "2014-10-22 13:50:00") ! Map("type" -> "subscribe", "fired_at" -> "2014-10-22T13:50:00.000Z") |
"Return Same Params" !! Map("type" -> "subscribe", "id" -> "some_id") ! Map("type" -> "subscribe", "id" -> "some_id") |> {
(_, params, expected) =>
val actual = MailchimpAdapter.reformatParameters(params)
actual mustEqual expected
}
def e6 = {
val body = "type=subscribe&data%5Bmerges%5D%5BLNAME%5D=Beemster"
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, body.some, Shared.cljSource, Shared.context)
val expectedJson =
"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.mailchimp/subscribe/jsonschema/1-0-0",
|"data":{
|"type":"subscribe",
|"data":{
|"merges":{
|"LNAME":"Beemster"
|}
|}
|}
|}
|}""".stripMargin.replaceAll("[\\n\\r]","")
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beSuccessful(NonEmptyList(RawEvent(Shared.api, Map("tv" -> "com.mailchimp-v1", "e" -> "ue", "p" -> "srv", "ue_pr" -> expectedJson), ContentType.some, Shared.cljSource, Shared.context)))
}
def e7 = {
val body = "type=subscribe&data%5Bmerges%5D%5BFNAME%5D=Agent&data%5Bmerges%5D%5BLNAME%5D=Smith"
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, body.some, Shared.cljSource, Shared.context)
val expectedJson =
"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.mailchimp/subscribe/jsonschema/1-0-0",
|"data":{
|"type":"subscribe",
|"data":{
|"merges":{
|"FNAME":"Agent",
|"LNAME":"Smith"
|}
|}
|}
|}
|}""".stripMargin.replaceAll("[\\n\\r]","")
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beSuccessful(NonEmptyList(RawEvent(Shared.api, Map("tv" -> "com.mailchimp-v1", "e" -> "ue", "p" -> "srv", "ue_pr" -> expectedJson), ContentType.some, Shared.cljSource, Shared.context)))
}
def e8 =
"SPEC NAME" || "SCHEMA TYPE" | "EXPECTED SCHEMA" |
"Valid, type subscribe" !! "subscribe" ! "iglu:com.mailchimp/subscribe/jsonschema/1-0-0" |
"Valid, type unsubscribe" !! "unsubscribe" ! "iglu:com.mailchimp/unsubscribe/jsonschema/1-0-0" |
"Valid, type profile" !! "profile" ! "iglu:com.mailchimp/profile_update/jsonschema/1-0-0" |
"Valid, type email" !! "upemail" ! "iglu:com.mailchimp/email_address_change/jsonschema/1-0-0" |
"Valid, type cleaned" !! "cleaned" ! "iglu:com.mailchimp/cleaned_email/jsonschema/1-0-0" |
"Valid, type campaign" !! "campaign" ! "iglu:com.mailchimp/campaign_sending_status/jsonschema/1-0-0" |> {
(_, schema, expected) =>
val body = "type="+schema
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, body.some, Shared.cljSource, Shared.context)
val expectedJson = "{\\"schema\\":\\"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0\\",\\"data\\":{\\"schema\\":\\""+expected+"\\",\\"data\\":{\\"type\\":\\""+schema+"\\"}}}"
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beSuccessful(NonEmptyList(RawEvent(Shared.api, Map("tv" -> "com.mailchimp-v1", "e" -> "ue", "p" -> "srv", "ue_pr" -> expectedJson), ContentType.some, Shared.cljSource, Shared.context)))
}
def e9 =
"SPEC NAME" || "SCHEMA TYPE" | "EXPECTED OUTPUT" |
"Invalid, bad type" !! "bad" ! "MailChimp event failed: type parameter [bad] not recognized" |
"Invalid, no type" !! "" ! "MailChimp event failed: type parameter is empty - cannot determine event type" |> {
(_, schema, expected) =>
val body = "type="+schema
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, body.some, Shared.cljSource, Shared.context)
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList(expected))
}
def e10 = {
val body = "type=unsubscribe&fired_at=2014-10-22+13%3A10%3A40&data%5Baction%5D=unsub&data%5Breason%5D=manual&data%5Bid%5D=94826aa750&data%5Bemail%5D=josh%40snowplowanalytics.com&data%5Bemail_type%5D=html&data%5Bip_opt%5D=82.225.169.220&data%5Bweb_id%5D=203740265&data%5Bmerges%5D%5BEMAIL%5D=josh%40snowplowanalytics.com&data%5Bmerges%5D%5BFNAME%5D=Joshua&data%5Bmerges%5D%5BLNAME%5D=Beemster&data%5Blist_id%5D=f1243a3b12"
val qs = toNameValuePairs("nuid" -> "123")
val payload = CollectorPayload(Shared.api, qs, ContentType.some, body.some, Shared.cljSource, Shared.context)
val expectedJson =
"""|{
|"schema":"iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0",
|"data":{
|"schema":"iglu:com.mailchimp/unsubscribe/jsonschema/1-0-0",
|"data":{
|"data":{
|"ip_opt":"82.225.169.220",
|"merges":{
|"LNAME":"Beemster",
|"FNAME":"Joshua",
|"EMAIL":"josh%40snowplowanalytics.com"
|},
|"email":"josh%40snowplowanalytics.com",
|"list_id":"f1243a3b12",
|"email_type":"html",
|"reason":"manual",
|"id":"94826aa750",
|"action":"unsub",
|"web_id":"203740265"
|},
|"fired_at":"2014-10-22T13%3A10%3A40.000Z",
|"type":"unsubscribe"
|}
|}
|}""".stripMargin.replaceAll("[\\n\\r]","")
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beSuccessful(NonEmptyList(RawEvent(Shared.api, Map("tv" -> "com.mailchimp-v1", "e" -> "ue", "p" -> "srv", "ue_pr" -> expectedJson, "nuid" -> "123"), ContentType.some, Shared.cljSource, Shared.context)))
}
def e11 = {
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, None, Shared.cljSource, Shared.context)
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("Request body is empty: no MailChimp event to process"))
}
def e12 = {
val payload = CollectorPayload(Shared.api, Nil, None, "stub".some, Shared.cljSource, Shared.context)
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("Request body provided but content type empty, expected application/x-www-form-urlencoded for MailChimp"))
}
def e13 = {
val payload = CollectorPayload(Shared.api, Nil, "application/json".some, "stub".some, Shared.cljSource, Shared.context)
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("Content type of application/json provided, expected application/x-www-form-urlencoded for MailChimp"))
}
def e14 = {
val body = "fired_at=2014-10-22+13%3A10%3A40"
val payload = CollectorPayload(Shared.api, Nil, ContentType.some, body.some, Shared.cljSource, Shared.context)
val actual = MailchimpAdapter.toRawEvents(payload)
actual must beFailing(NonEmptyList("No MailChimp type parameter provided: cannot determine event type"))
}
}
| krahman/snowplow | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/adapters/registry/MailchimpAdapterSpec.scala | Scala | apache-2.0 | 13,129 |
package coltfred.jsongenerator
import scalaz._, Scalaz._
import argonaut.DecodeJson
/**
* A graph is a root variable which is some number of Variables that could be
* chosen as the starting point of traversal. This is separate from adjacencyList
* because the outside caller needs to know the possible starting places.
*/
trait Graph[A] {
def rootVariable: VariableChoices[A]
def adjacencyList: AdjacencyList[A]
}
object Graph {
def apply[A](r: VariableChoices[A], adj: AdjacencyList[A]): Graph[A] = new Graph[A] {
val rootVariable = r
val adjacencyList = adj
}
def fromJson[A: DecodeJson](data: String): Error \\/ Graph[A] = {
for {
parsedVariable <- ParsedVariable.parse[A](data)
graph <- fromRootParsedVariable(parsedVariable)
} yield graph
}
private[jsongenerator] def fromRootParsedVariable[A](p: ParsedVariable[A]): Error \\/ Graph[A] = {
for {
root <- VariableChoices(p)
adjList <- fromParsedVariables(expandChoices(root, p.choices))
} yield apply(root, adjList)
}
//Return the extracted Variables with their children
private def expandChoices[A](choice: VariableChoices[A], p: List[ParsedVariableChoice[A]]): List[(Variable[A], List[ParsedVariable[A]])] = {
choice.values.zip(p.map(_.dependents))
}
private def combine[A](adjs: AdjacencyList[A]*) = {
adjs.reduceOption(_ union _).getOrElse(==>>.empty)
}
/**
* Given variables and the parsed variables they lead to, create an adjacency list
*/
private def fromParsedVariables[A](l: List[(Variable[A], List[ParsedVariable[A]])]): Error \\/ AdjacencyList[A] = {
l.traverseU {
case (variable, parsedVariables) => fromParsedVariable(variable, parsedVariables)
}.map(combine)
}
/**
* For a single variable create an node in the graph and call fromParsedVariables on all sub paths
* from that node.
*/
private[jsongenerator] def fromParsedVariable[A](r: Variable[A], p: List[ParsedVariable[A]]): Error \\/ AdjacencyList[A] = {
for {
nextVariableChoices <- p.traverseU(VariableChoices(_, r.some))
nextLevel = nextVariableChoices.zip(p.map(_.choices)).flatMap(t => expandChoices(t._1, t._2))
recursiveAdjList <- fromParsedVariables(nextLevel)
adjacencyList = choicesToAdjacencyList(r, nextVariableChoices)
} yield combine(recursiveAdjList, adjacencyList)
}
private[jsongenerator] def choicesToAdjacencyList[A](r: Variable[A], l: List[VariableChoices[A]]): AdjacencyList[A] = {
l match {
case Nil => ==>>.empty
case ll => ==>>(r -> ll)
}
}
}
| coltfred/jsongenerator | src/main/scala/coltfred/jsongenerator/Graph.scala | Scala | apache-2.0 | 2,578 |
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import scalautil._
object PredicateTest extends Properties("Predicate"){
property("apply") = forAll{ (p: Int => Boolean, i: Int) =>
Predicate(p)(i) == p(i)
}
property("map") = forAll{ (p: Int => Boolean, f: Boolean => Boolean, i: Int) =>
Predicate(p).map(f).run(i) == f(p(i))
}
property("map2") = forAll{ (p1: Int => Boolean, p2: Int => Boolean, f: (Boolean, Boolean) => Boolean, i: Int) =>
Predicate.map2(Predicate(p1), Predicate(p2))(f).run(i) == f(p1(i), p2(i))
}
property("flatMap") = forAll{ (p: Int => Boolean, f: Boolean => (Int => Boolean), i: Int) =>
Predicate(p).flatMap(b => Predicate(f(b))).run(i) == f(p(i))(i)
}
property("and") = forAll{ (p1: Int => Boolean, p2: Int => Boolean, i: Int) =>
(Predicate(p1) and Predicate(p2)).run(i) == (p1(i) && p2(i))
}
property("or") = forAll{ (p1: Int => Boolean, p2: Int => Boolean, i: Int) =>
(Predicate(p1) or Predicate(p2)).run(i) == (p1(i) || p2(i))
}
property("inverse") = forAll{ (p: Int => Boolean, i: Int) =>
Predicate(p).inverse.run(i) == !(p(i))
}
property("nand") = forAll{ (p1: Int => Boolean, p2: Int => Boolean, i: Int) =>
(Predicate(p1) nand Predicate(p2)).run(i) == !(p1(i) && p2(i))
}
property("nor") = forAll{ (p1: Int => Boolean, p2: Int => Boolean, i: Int) =>
(Predicate(p1) nor Predicate(p2)).run(i) == !(p1(i) || p2(i))
}
}
| morikuni/scalautil | src/test/scala/PredicateTest.scala | Scala | apache-2.0 | 1,409 |