code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import org.scalatest.BeforeAndAfter
import org.apache.spark.sql.hive.test.TestHive
/* Implicit conversions */
import scala.collection.JavaConversions._
/**
* A set of test cases that validate partition and column pruning.
*/
class PruningSuite extends HiveComparisonTest with BeforeAndAfter {
TestHive.cacheTables = false
// Column/partition pruning is not implemented for `InMemoryColumnarTableScan` yet, need to reset
// the environment to ensure all referenced tables in this suites are not cached in-memory.
// Refer to https://issues.apache.org/jira/browse/SPARK-2283 for details.
TestHive.reset()
// Column pruning tests
createPruningTest("Column pruning - with partitioned table",
"SELECT key FROM srcpart WHERE ds = '2008-04-08' LIMIT 3",
Seq("key"),
Seq("key"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-08", "12")))
createPruningTest("Column pruning - with non-partitioned table",
"SELECT key FROM src WHERE key > 10 LIMIT 3",
Seq("key"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - with multiple projects",
"SELECT c1 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - projects alias substituting",
"SELECT c1 AS c2 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("c2"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - filter alias in-lining",
"SELECT c1 FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 WHERE c1 < 100 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - without filters",
"SELECT c1 FROM (SELECT key AS c1 FROM src) t1 LIMIT 3",
Seq("c1"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - simple top project without aliases",
"SELECT key FROM (SELECT key FROM src WHERE key > 10) t1 WHERE key < 100 LIMIT 3",
Seq("key"),
Seq("key"),
Seq.empty)
createPruningTest("Column pruning - non-trivial top project with aliases",
"SELECT c1 * 2 AS double FROM (SELECT key AS c1 FROM src WHERE key > 10) t1 LIMIT 3",
Seq("double"),
Seq("key"),
Seq.empty)
// Partition pruning tests
createPruningTest("Partition pruning - non-partitioned, non-trivial project",
"SELECT key * 2 AS double FROM src WHERE value IS NOT NULL",
Seq("double"),
Seq("key", "value"),
Seq.empty)
createPruningTest("Partition pruning - non-partitioned table",
"SELECT value FROM src WHERE key IS NOT NULL",
Seq("value"),
Seq("value", "key"),
Seq.empty)
createPruningTest("Partition pruning - with filter on string partition key",
"SELECT value, hr FROM srcpart1 WHERE ds = '2008-04-08'",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-08", "12")))
createPruningTest("Partition pruning - with filter on int partition key",
"SELECT value, hr FROM srcpart1 WHERE hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-09", "11")))
createPruningTest("Partition pruning - left only 1 partition",
"SELECT value, hr FROM srcpart1 WHERE ds = '2008-04-08' AND hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11")))
createPruningTest("Partition pruning - all partitions pruned",
"SELECT value, hr FROM srcpart1 WHERE ds = '2014-01-27' AND hr = 11",
Seq("value", "hr"),
Seq("value", "hr"),
Seq.empty)
createPruningTest("Partition pruning - pruning with both column key and partition key",
"SELECT value, hr FROM srcpart1 WHERE value IS NOT NULL AND hr < 12",
Seq("value", "hr"),
Seq("value", "hr"),
Seq(
Seq("2008-04-08", "11"),
Seq("2008-04-09", "11")))
def createPruningTest(
testCaseName: String,
sql: String,
expectedOutputColumns: Seq[String],
expectedScannedColumns: Seq[String],
expectedPartValues: Seq[Seq[String]]) = {
test(s"$testCaseName - pruning test") {
val plan = new TestHive.HiveQLQueryExecution(sql).executedPlan
val actualOutputColumns = plan.output.map(_.name)
val (actualScannedColumns, actualPartValues) = plan.collect {
case p @ HiveTableScan(columns, relation, _) =>
val columnNames = columns.map(_.name)
val partValues = p.prunePartitions(relation.hiveQlPartitions).map(_.getValues)
(columnNames, partValues)
}.head
assert(actualOutputColumns === expectedOutputColumns, "Output columns mismatch")
assert(actualScannedColumns === expectedScannedColumns, "Scanned columns mismatch")
assert(
actualPartValues.length === expectedPartValues.length,
"Partition value count mismatches")
for ((actual, expected) <- actualPartValues.zip(expectedPartValues)) {
assert(actual sameElements expected, "Partition values mismatch")
}
}
// Creates a query test to compare query results generated by Hive and Catalyst.
createQueryTest(s"$testCaseName - query test", sql)
}
}
| hengyicai/OnlineAggregationUCAS | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PruningSuite.scala | Scala | apache-2.0 | 6,002 |
/*§
===========================================================================
GraphsJ - SDK
===========================================================================
Copyright (C) 2009-2016 Gianluca Costa
===========================================================================
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
===========================================================================
*/
package info.gianlucacosta.graphsj
/**
* Text output console
*/
trait OutputConsole {
/**
* Prints the string representation of a value
*
* @param value
*/
def write(value: Any)
/**
* Prints the string representation of a value, followed by a newline character
*
* @param value
*/
def writeln(value: Any)
/**
* Prints a newline character
*/
def writeln()
/**
* Prints a header
*
* @param header The header text
*/
def writeHeader(header: String): Unit = {
val headerLine =
"-" * header.length
writeln(headerLine)
writeln(header)
writeln(headerLine)
}
}
| giancosta86/GraphsJ-sdk | src/main/scala/info/gianlucacosta/graphsj/OutputConsole.scala | Scala | apache-2.0 | 1,579 |
/*
* WindowIndexWhere.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape
package graph
import de.sciss.fscape.Graph.{ProductReader, RefMapIn}
import de.sciss.fscape.UGenSource.unwrap
import de.sciss.fscape.stream.{StreamIn, StreamOut}
import scala.collection.immutable.{IndexedSeq => Vec}
object WindowIndexWhere extends ProductReader[WindowIndexWhere] {
override def read(in: RefMapIn, key: String, arity: Int): WindowIndexWhere = {
require (arity == 2)
val _p = in.readGE()
val _size = in.readGE()
new WindowIndexWhere(_p, _size)
}
}
/** A UGen that determines for each input window the first index where a predicate holds.
* It outputs one integer value per window; if the predicate does not hold across the entire
* window or if the window size is zero, the index will be `-1`.
*
* @param p a predicate to detect the index
* @param size the window size.
*/
final case class WindowIndexWhere(p: GE, size: GE) extends UGenSource.SingleOut {
protected def makeUGens(implicit b: UGenGraph.Builder): UGenInLike =
unwrap(this, Vector(p.expand, size.expand))
protected def makeUGen(args: Vec[UGenIn])(implicit b: UGenGraph.Builder): UGenInLike =
UGen.SingleOut(this, args)
private[fscape] def makeStream(args: Vec[StreamIn])(implicit b: stream.Builder): StreamOut = {
val Vec(p, size) = args: @unchecked
stream.WindowIndexWhere(p = p.toInt, size = size.toInt)
}
}
| Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/graph/WindowIndexWhere.scala | Scala | agpl-3.0 | 1,676 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.vertices.query
import scala.concurrent.Promise
import com.signalcollect.GraphEditor
import com.signalcollect.triplerush.OperationIds
import com.signalcollect.triplerush.TriplePattern
/**
* If execution is complete returns Some(numberOfResults), else returns None.
*/
final class ResultCountingQueryVertex(
query: Seq[TriplePattern],
tickets: Long,
resultPromise: Promise[Option[Long]])
extends AbstractQueryVertex[Long](query, tickets, numberOfSelectVariables = 0) {
val id = OperationIds.embedInLong(OperationIds.nextCountQueryId)
override def afterInitialization(graphEditor: GraphEditor[Long, Any]): Unit = {
state = 0
super.afterInitialization(graphEditor)
}
def handleBindings(bindings: Array[Array[Int]]): Unit = {
throw new UnsupportedOperationException("Result counting vertex should never receive bindings.")
}
def handleResultCount(resultCount: Long): Unit = {
state += resultCount
}
override def reportResults(complete: Boolean): Unit = {
if (complete) {
resultPromise.success(Some(state))
} else {
resultPromise.success(None)
}
}
}
| hicolour/triplerush | src/main/scala/com/signalcollect/triplerush/vertices/query/ResultCountingQueryVertex.scala | Scala | apache-2.0 | 1,835 |
package finatra.quickstart
import com.google.inject.testing.fieldbinder.Bind
import com.twitter.finagle.http.Status._
import com.twitter.finatra.http.EmbeddedHttpServer
import com.twitter.inject.Mockito
import com.twitter.inject.server.FeatureTest
import com.twitter.util.Future
import finatra.quickstart.domain.TweetId
import finatra.quickstart.domain.http.{TweetLocation, TweetResponse}
import finatra.quickstart.firebase.FirebaseClient
import finatra.quickstart.services.IdService
class TwitterCloneFeatureTest extends FeatureTest with Mockito {
override val server = new EmbeddedHttpServer(new TwitterCloneServer)
@Bind val firebaseClient = smartMock[FirebaseClient]
@Bind val idService = smartMock[IdService]
/* Mock GET Request performed in TwitterCloneWarmup */
firebaseClient.get("/tweets/123.json")(manifest[TweetResponse]) returns Future(None)
"tweet creation" in {
idService.getId returns Future(TweetId("123"))
val savedStatus = TweetResponse(
id = TweetId("123"),
message = "Hello #FinagleCon",
location = Some(TweetLocation(37.7821120598956, -122.400612831116)),
nsfw = false)
firebaseClient.put("/tweets/123.json", savedStatus) returns Future.Unit
firebaseClient.get("/tweets/123.json")(manifest[TweetResponse]) returns Future(Option(savedStatus))
firebaseClient.get("/tweets/124.json")(manifest[TweetResponse]) returns Future(None)
firebaseClient.get("/tweets/125.json")(manifest[TweetResponse]) returns Future(None)
val result = server.httpPost(
path = "/tweet",
postBody = """
{
"message": "Hello #FinagleCon",
"location": {
"lat": "37.7821120598956",
"long": "-122.400612831116"
},
"nsfw": false
}""",
andExpect = Created,
withJsonBody = """
{
"id": "123",
"message": "Hello #FinagleCon",
"location": {
"lat": "37.7821120598956",
"long": "-122.400612831116"
},
"nsfw": false
}""")
server.httpGetJson[TweetResponse](
path = result.location.get,
andExpect = Ok,
withJsonBody = result.contentString)
}
"Post bad tweet" in {
server.httpPost(
path = "/tweet",
postBody = """
{
"message": "",
"location": {
"lat": "9999"
},
"nsfw": "abc"
}""",
andExpect = BadRequest,
withJsonBody = """
{
"errors" : [
"location.lat: [9999.0] is not between -85 and 85",
"location.long: field is required",
"message: size [0] is not between 1 and 140",
"nsfw: 'abc' is not a valid Boolean"
]
}
""")
}
}
| syamantm/finatra | examples/twitter-clone/src/test/scala/finatra/quickstart/TwitterCloneFeatureTest.scala | Scala | apache-2.0 | 2,785 |
package controllers
import java.net.URLEncoder
import controllers.AdministrationDashboardController._
import utils.Global._
import utils.Implicits._
import play.api.mvc.{ Action, Controller }
import utils.Security.Authentication
import utils.semantic.{ Resource, RDFNode, SPARQLTools }
import utils.semantic.Vocabulary.{ lwm, rdfs, foaf }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.control.NonFatal
object SearchController extends Controller with Authentication {
def search(param: String) = hasPermissions(Permissions.AdminRole.permissions.toList: _*) { session ⇒
Action.async { implicit request ⇒
val regex = s"${URLEncoder.encode(param, "UTF-8")}".toCharArray.toList.map(e ⇒ s"[$e]").mkString("")
val futureFirstName = {
val q = s"""
Select ?s (${rdfs.label} as ?p) ?o where {
?s ${foaf.firstName} ?name .
?s ${rdfs.label} ?o
filter regex(?name, "$regex", "i")
} order by asc(?o)
""".stripMargin
sparqlExecutionContext.executeQuery(q).map { result ⇒
SPARQLTools.statementsFromString(result).map(e ⇒ (e.s, e.o))
}
}
val futureLastName = {
val q = s"""
Select ?s (${rdfs.label} as ?p) ?o where {
?s ${foaf.lastName} ?name .
?s ${rdfs.label} ?o
filter regex(?name, "$regex", "i")
} order by asc(?o)
""".stripMargin
sparqlExecutionContext.executeQuery(q).map { result ⇒
SPARQLTools.statementsFromString(result).map(e ⇒ (e.s, e.o))
}
}
val futureLabel = {
val q = s"""
Select ?s (${rdfs.label} as ?p) ?o where {
?s ${rdfs.label} ?o
filter regex(?o, "$regex", "i")
} order by asc(?o)
""".stripMargin
sparqlExecutionContext.executeQuery(q).map { result ⇒
SPARQLTools.statementsFromString(result).map(e ⇒ (e.s, e.o))
}
}
val futureRegistrationId = {
val q = s"""
Select ?s (${rdfs.label} as ?p) ?o where {
?s ${lwm.hasRegistrationId} ?id .
?s ${rdfs.label} ?o
filter regex(?id, "$regex", "i")
} order by asc(?o)
""".stripMargin
sparqlExecutionContext.executeQuery(q).map { result ⇒
SPARQLTools.statementsFromString(result).map(e ⇒ (e.s, e.o))
}
}
val futureGmId = {
val q = s"""
Select ?s (${lwm.hasGmId} as ?p) ?o {
?s ${lwm.hasGmId} ?param .
?s ${rdfs.label} ?o
filter regex(?param, '^$param','i')
} order by asc(?o)
""".stripMargin
sparqlExecutionContext.executeQuery(q).map { result ⇒
SPARQLTools.statementsFromString(result).map(e ⇒ (e.s, e.o))
}
}
(for {
firstName ← futureFirstName
lastName ← futureLastName
registrationId ← futureRegistrationId
gmId ← futureGmId
} yield {
val complete = firstName ++ lastName ++ registrationId ++ gmId
Ok(views.html.complex_search(complete))
}).recover {
case NonFatal(e) ⇒
InternalServerError(s"Oops. There seems to be a problem ($e) with the server. We are working on it!")
}
}
}
}
| FHK-ADV/lwm | app/controllers/SearchController.scala | Scala | mit | 3,527 |
package no.uio.musit.models
import play.api.libs.json.{JsNumber, Reads, Writes, _}
case class DatabaseId(underlying: Long) extends AnyVal
object DatabaseId {
implicit val reads: Reads[DatabaseId] = __.read[Long].map(DatabaseId.apply)
implicit val writes: Writes[DatabaseId] = Writes(did => JsNumber(did.underlying))
implicit def fromLong(l: Long): DatabaseId = DatabaseId(l)
implicit def toLong(id: DatabaseId): Long = id.underlying
implicit def fromOptLong(ml: Option[Long]): Option[DatabaseId] = ml.map(fromLong)
implicit def toOptLong(mdid: Option[DatabaseId]): Option[Long] = mdid.map(toLong)
}
| MUSIT-Norway/musit | musit-models/src/main/scala/no/uio/musit/models/DatabaseId.scala | Scala | gpl-2.0 | 621 |
package com.harrys.hyppo.worker.exec
/**
* Created by jpetty on 12/7/15.
*/
sealed trait TaskLogStrategy {
def configName: String
}
object TaskLogStrategy {
case object PipeTaskLogStrategy extends TaskLogStrategy {
override def configName: String = "PIPE"
}
case object FileTaskLogStrategy extends TaskLogStrategy {
override def configName: String = "FILE"
}
case object NullTaskLogStrategy extends TaskLogStrategy {
override def configName: String = "NULL"
}
final val strategies: Seq[TaskLogStrategy] = Seq(PipeTaskLogStrategy, FileTaskLogStrategy, NullTaskLogStrategy)
def strategyForName(value: String): TaskLogStrategy = {
strategies.find(_.configName.equalsIgnoreCase(value)).getOrElse(throw new IllegalArgumentException(s"Unknown TaskLogStrategy: $value. Must be one of: ${ strategies.map(_.configName).mkString(", ") }"))
}
}
| harrystech/hyppo-worker | worker/src/main/scala/com/harrys/hyppo/worker/exec/TaskLogStrategy.scala | Scala | mit | 881 |
package com.codacy.client.stash.client
import java.net.URL
import com.codacy.client.stash.client.auth.Authenticator
import com.codacy.client.stash.util.HTTPStatusCodes
import play.api.libs.json._
import scalaj.http.{Http, HttpOptions, HttpRequest, HttpResponse, StringBodyConnectFunc}
import scala.util.Properties
import scala.util.control.NonFatal
class StashClient(apiUrl: String, authenticator: Option[Authenticator] = None, acceptAllCertificates: Boolean = false) {
/*
* Does an API request and parses the json output into a class
*/
def execute[T](
request: Request[T]
)(params: Map[String, String] = Map.empty)(implicit reader: Reads[T]): RequestResponse[T] = {
get[T](request.path, params) match {
case Right(json) => RequestResponse(json.asOpt[T])
case Left(error) => error
}
}
/*
* Does an paginated API request and parses the json output into a sequence of classes
*/
def executePaginated[T](
request: Request[Seq[T]]
)(params: Map[String, String] = Map.empty)(implicit reader: Reads[T]): RequestResponse[Seq[T]] = {
val cleanUrl = request.path.takeWhile(_ != '?')
get[Seq[T]](cleanUrl, params) match {
case Right(json) =>
val nextRepos = (for {
isLastPage <- (json \ "isLastPage").asOpt[Boolean] if !isLastPage
nextPageStart <- (json \ "nextPageStart").asOpt[Int]
} yield {
executePaginated(Request(cleanUrl, request.classType))(params + ("start" -> nextPageStart.toString)).value
.getOrElse(Seq.empty)
}).getOrElse(Seq.empty)
RequestResponse(Some((json \ "values").as[Seq[T]] ++ nextRepos))
case Left(resp) => resp
}
}
def executePaginatedWithPageRequest[T](request: Request[Seq[T]], pageRequest: PageRequest)(
params: Map[String, String] = Map.empty
)(implicit reader: Reads[T]): RequestResponse[Seq[T]] = {
val cleanUrl = request.path.takeWhile(_ != '?')
get[Seq[T]](
cleanUrl,
params ++ Map("start" -> pageRequest.getStart.toString, "limit" -> pageRequest.getLimit.toString)
) match {
case Right(json) =>
RequestResponse(
value = Some((json \ "values").as[Seq[T]]),
nextPageStart = (json \ "nextPageStart").asOpt[Int],
size = (json \ "size").asOpt[Int],
limit = (json \ "limit").asOpt[Int],
isLastPage = (json \ "isLastPage").asOpt[Boolean]
)
case Left(error) =>
error
}
}
def postJson[T](request: Request[T], values: JsValue)(implicit reader: Reads[T]): RequestResponse[T] = {
performRequest("POST", request, values)
}
def putJson[T](request: Request[T], values: JsValue)(implicit reader: Reads[T]): RequestResponse[T] = {
performRequest("PUT", request, values)
}
// Wrap request with authentication options
private def withAuthentication(request: HttpRequest): HttpRequest =
authenticator match {
case Some(auth) => auth.withAuthentication(request)
case None => request
}
/*
* Does an API request
*/
private def performRequest[T](method: String, request: Request[T], values: JsValue)(
implicit reader: Reads[T]
): RequestResponse[T] = {
doRequest[T](request.path, method, Map.empty, Option(values)) match {
case Right((HTTPStatusCodes.OK | HTTPStatusCodes.CREATED, body)) =>
parseJson[T](body).fold(identity, { jsValue =>
valueOrError[T](jsValue)
})
case Right((HTTPStatusCodes.NO_CONTENT, _)) =>
RequestResponse[T](None)
case Right((statusCode, body)) =>
getError[T](statusCode, statusCode.toString, body)
case Left(requestResponse) =>
requestResponse
}
}
def delete(requestUrl: String)(params: Map[String, String] = Map.empty): RequestResponse[Boolean] = {
doRequest[Boolean](requestUrl, "DELETE", params, None) match {
case Right((HTTPStatusCodes.NO_CONTENT, _)) =>
RequestResponse[Boolean](Option(true))
case Right((HTTPStatusCodes.OK, body)) =>
parseJson[JsObject](body).fold({ error =>
RequestResponse[Boolean](None, message = error.message, hasError = true)
}, { _ =>
RequestResponse[Boolean](Option(true))
})
case Right((statusCode, body)) =>
getError[Boolean](statusCode, statusCode.toString, body)
case Left(requestResponse) =>
requestResponse
}
}
private def get[T](
requestUrl: String,
params: Map[String, String] = Map.empty
): Either[RequestResponse[T], JsValue] = {
doRequest[T](requestUrl, "GET", params, None) match {
case Right((HTTPStatusCodes.OK | HTTPStatusCodes.CREATED, body)) =>
parseJson[T](body)
case Right((statusCode, body)) =>
Left(getError[T](statusCode, statusCode.toString, body))
case Left(error) =>
Left(error)
}
}
def doRequest[T](
requestPath: String,
method: String,
params: Map[String, String] = Map.empty,
payload: Option[JsValue] = None
): Either[RequestResponse[T], (Int, String)] = {
val url = generateUrl(requestPath)
try {
val baseRequestWithoutCertificatesOption = Http(url).method(method).params(params)
val baseRequest =
if (acceptAllCertificates) {
baseRequestWithoutCertificatesOption.option(HttpOptions.allowUnsafeSSL)
} else {
baseRequestWithoutCertificatesOption
}
val request = payload
.fold(baseRequest)(
p =>
// Supports PUT and POST of JSON
baseRequest
.header("content-type", "application/json")
.copy(connectFunc = StringBodyConnectFunc(Json.stringify(p)))
)
val authenticatedRequest = withAuthentication(request)
val response = authenticatedRequest.asString
//if the response code is a redirect, follow it
if (HTTPStatusCodes.Redirects.all.contains(response.code)) {
followRedirect(method, params, payload, response)
} else {
Right((response.code, response.body))
}
} catch {
case NonFatal(exception) =>
Left(RequestResponse[T](value = None, message = exception.getMessage, hasError = true))
}
}
/**
* Checks the new location on the header of the [[HttpResponse]] passed by parameter
* and follows the redirect by making a new request. It returns the response if it is unable
* to get the location from the header.
* @param method The method of the request
* @param params The parameters of the request
* @param payload The payload of the request
* @param response The response to extract the new location or to return as default.
*/
private def followRedirect[T](
method: String,
params: Map[String, String],
payload: Option[JsValue],
response: HttpResponse[String]
): Either[RequestResponse[T], (Int, String)] = {
response.headers.get("Location") match {
case Some(Vector(newLocation)) =>
val newPath = new URL(newLocation).getPath
doRequest(newPath, method, params, payload)
case _ => Right((response.code, response.body))
}
}
private def valueOrError[T](json: JsValue)(implicit reader: Reads[T]): RequestResponse[T] = {
json.validate[T] match {
case s: JsSuccess[T] =>
RequestResponse(Some(s.value))
case e: JsError =>
val msg =
s"""|Failed to validate json:
|$json
|JsError errors:
|${e.errors.mkString(System.lineSeparator)}
""".stripMargin
RequestResponse[T](None, message = msg, hasError = true)
}
}
private def getError[T](status: Int, statusText: String, body: String): RequestResponse[T] = {
val msg =
s"""|$status: $statusText
|Body:
|$body
""".stripMargin
RequestResponse[T](None, message = msg, hasError = true)
}
private def parseJson[T](input: String): Either[RequestResponse[T], JsValue] = {
val json = Json.parse(input)
val errorOpt = (json \ "errors")
.asOpt[Seq[ResponseError]]
.map(_.map { error =>
s"""|Context: ${error.context.getOrElse("None")}
|Exception: ${error.exceptionName.getOrElse("None")}
|Message: ${error.message}
""".stripMargin
}.mkString(Properties.lineSeparator))
errorOpt
.map { error =>
Left(RequestResponse[T](None, message = error, hasError = true))
}
.getOrElse(Right(json))
}
private def generateUrl(endpoint: String) = {
s"$apiUrl$endpoint"
}
}
| codacy/stash-scala-client | src/main/scala/com/codacy/client/stash/client/StashClient.scala | Scala | apache-2.0 | 8,628 |
package frameless
package ml
package feature
import frameless.ml.feature.TypedStringIndexer.HandleInvalid
import frameless.ml.internals.UnaryInputsChecker
import org.apache.spark.ml.feature.{StringIndexer, StringIndexerModel}
/**
* A label indexer that maps a string column of labels to an ML column of label indices.
* The indices are in [0, numLabels), ordered by label frequencies.
* So the most frequent label gets index 0.
*
* @see `TypedIndexToString` for the inverse transformation
*/
final class TypedStringIndexer[Inputs] private[ml](stringIndexer: StringIndexer, inputCol: String)
extends TypedEstimator[Inputs, TypedStringIndexer.Outputs, StringIndexerModel] {
val estimator: StringIndexer = stringIndexer
.setInputCol(inputCol)
.setOutputCol(AppendTransformer.tempColumnName)
def setHandleInvalid(value: HandleInvalid): TypedStringIndexer[Inputs] = copy(stringIndexer.setHandleInvalid(value.sparkValue))
private def copy(newStringIndexer: StringIndexer): TypedStringIndexer[Inputs] =
new TypedStringIndexer[Inputs](newStringIndexer, inputCol)
}
object TypedStringIndexer {
case class Outputs(indexedOutput: Double)
sealed abstract class HandleInvalid(val sparkValue: String)
object HandleInvalid {
case object Error extends HandleInvalid("error")
case object Skip extends HandleInvalid("skip")
case object Keep extends HandleInvalid("keep")
}
def apply[Inputs](implicit inputsChecker: UnaryInputsChecker[Inputs, String]): TypedStringIndexer[Inputs] = {
new TypedStringIndexer[Inputs](new StringIndexer(), inputsChecker.inputCol)
}
} | adelbertc/frameless | ml/src/main/scala/frameless/ml/feature/TypedStringIndexer.scala | Scala | apache-2.0 | 1,612 |
package ch02
import org.specs2.mutable.Specification
class Ex23CurrySpec extends Specification{
"Curry function " should {
"" in {
//Given
//When
//Then
ok
}
}
}
| IrfanAnsari/fpinscala | src/test/scala/ch02/Ex23CurrySpec.scala | Scala | mit | 210 |
package storage
import scaldi.Module
import scala.language.existentials
package object postgres {
val dbProfile = EnhancedPostgresDriver.profile
val dbSimple = EnhancedPostgresDriver.simple
class DbModule extends Module {
import scala.slick.jdbc.JdbcBackend.Database
bind[Database] to Database.forDataSource(ConnectionPool())
}
}
| onurzdg/spray-app | src/main/scala/storage/postgres/package.scala | Scala | apache-2.0 | 351 |
/**
* This file contains all DPF concepts that aren't graph.
* I tried to keep all definitions very close to what have been defined in
* Adrian Rutle's PhD thesis https://bora.uib.no/handle/1956/4469 with a few exceptions:
* 1. The constraint semantics (i.e. the OCL) is mapped to a signature and not to the model.
* I had it first different but I found it very impractical to keep a "meta-model" and define
* the semantic for predicates model specific.
* 2. I allow to use different signatures in each specification.
* This allows to group and reuse specifications according to user defined topics.
*/
package no.hib.dpf.text.scala.ct;
/**
* A validator for a constraint (as defined in a signature)
*/
case class Validator(id: RId, n: String, ps_var: List[String], arity: List[Element],tLanguage: String, template: String, errorMsg: String)
/**
* A constraint (as used in a specification)
*/
case class Constraint(ps: List[String], arity_d: List[Element], validator: Validator)
/**
* A specification ("S" like in Adrian Rutle's thesis)
*/
case class S(g: AbstractGraph, csLists: List[Set[Constraint]]) { //Specification
private def inv(_g: AbstractGraph, _cs: Set[Constraint]): Boolean = {
_cs foreach { _p =>
for (_e <- _p.arity_d) {
_e match{
case Arrow(_,sr,tg,TypeArrow.TInheritance) =>
_g match {
case ig:IGraph => if(!(ig.inh contains sr)) return false
if(!(ig.inh(sr) contains tg)) return false
case _ => return false
}
case _ => if (!(_g contains _e)) return false
}
}
}
return true;
}
if (!inv(g, cs)) {
sys.error("not valid specification")
}
def cs:Set[Constraint]=csLists.flatten.toSet
override def toString = "S(\\n" + g + "\\n\\t" + constraintsToString + "\\n)";
protected def constraintsToString = (for (c <- cs) yield format(c))
protected def elementsToString(es: List[Element]) = (for (e <- es) yield g.format(e))
def format(c: Constraint): String = c.validator.n + "@" + c.validator.id.v + "(" + c.ps.mkString + ")" + "{Elements(" + elementsToString(c.arity_d) + ")}";
}
/**
* A specification instance. mm = meta-specification / meta-model
*/
case class IS(mm: S, g: AbstractGraph) { //Instance specification
private def inv(_mm: S, _g: AbstractGraph): Boolean = _mm.g == _g.tgraph
if (!inv(mm, g)) {
sys.error("not valid instance specification")
}
}
/**
* Specification Descriptor
*/
case class SInfo(key:FKey,t:FKey, signatures:List[FKey]);
/**
* Companion object adding additional "constructors" for case class SInfo
*/
object SInfo {
def apply(key:FKey,t:FKey):SInfo = {
SInfo(key,t,Nil)
}
def apply(name: String, path: String,
version: String,
vNo: Int,
vNext: Int,
tName: String,
tVersionName: String,
signatureList: List[FKey] = Nil):SInfo = {
val id = FKey(name, path, version, vNo, vNext)
val t = FKey(tName, path,tVersionName)
val sList = if (null == signatureList) {Nil} else {signatureList}
SInfo(id, t, sList)
}
}
/**
* The predefined DPF specification containing a node and an arrow
* Additionally support for attributes have been added.
*/
object Dpf {
val Spec = S(Dpf.Graph, Nil)
case object Graph extends AbstractGraph() {
val node = Node(RId(Set(0)), TypeNode.TSelf)
val arrow = Arrow(RId(Set(1)), node, node, TypeArrow.TSelf)
val datatype = TypeNode.TAttribute
val attribute = TypeArrow.TAttribute
override val tKey = FKey("DPF", None, Some(Version("1",1,2)))
override val tgraph = this
override val nodes = Map[EId, Node](node.id -> node, datatype.id -> datatype)
override val arrows = Map[EId, Arrow](arrow.id -> arrow, attribute.id -> attribute)
override val names = Map[EId, String](node.id -> "Vertex", arrow.id -> "edge", datatype.id -> datatype.toString, attribute.id -> attribute.toString)
override val in = Map[Node, Map[TypeArrow, Set[Arrow]]](node -> Map(arrow -> Set(arrow)), datatype -> Map(attribute -> Set(attribute)))
override val out = Map[Node, Map[TypeArrow, Set[Arrow]]](node -> Map(arrow -> Set(arrow), attribute -> Set(attribute)))
override val toString = super.toString
}
}
| fmantz/DPF_Text | no.hib.dpf.text/src_scala/no/hib/dpf/text/scala/ct/4_DPF.scala | Scala | epl-1.0 | 4,263 |
package freecli
package option
package dsl
import shapeless._
import shapeless.ops.hlist.{Diff, Intersection, LeftFolder}
import core.api.CanProduce
import option.api.StringValue
trait StringValueImplicits {
type StringValueTypes = StringValue :: HNil
implicit def canProduceStringValue[T, H <: HList, Out0 <: HList, Rem <: HList](
implicit ev: Intersection.Aux[H, StringValueTypes, Out0],
ev2: LeftFolder.Aux[Out0, Option[StringValue], stringValueBuilder.type, StringValue],
ev3: Diff.Aux[H, Out0, Rem]) = {
new CanProduce[H] {
type Out = (StringValue, Rem)
def apply(list: H): Out = {
val inters = ev.apply(list)
val field = inters.foldLeft(Option.empty[StringValue])(stringValueBuilder)
val remaining = ev3.apply(list)
field -> remaining
}
}
}
object stringValueBuilder extends Poly2 {
implicit def caseStringValue:
Case.Aux[Option[StringValue], StringValue, StringValue] =
at[Option[StringValue], StringValue] {
case (_, d: StringValue) => d
}
}
} | pavlosgi/freecli | core/src/main/scala/freecli/option/dsl/StringValueImplicits.scala | Scala | apache-2.0 | 1,070 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.utils.instrumentation
import com.netflix.servo.monitor.Monitor
import java.io.PrintWriter
/**
* Helper functions for instrumentation
*/
object InstrumentationFunctions {
def renderTable(out: PrintWriter, name: String, timers: Seq[Monitor[_]], header: Seq[TableHeader]) = {
val monitorTable = new MonitorTable(header.toArray, timers.toArray)
out.println(name)
monitorTable.print(out)
}
def formatNanos(number: Any): String = {
// We need to do some dynamic type checking here, as monitors return an Object
number match {
case number: Number => DurationFormatting.formatNanosecondDuration(number)
case null => "-"
case _ => throw new IllegalArgumentException("Cannot format non-numeric value [" + number + "]")
}
}
}
| nfergu/bdg-utils | utils-metrics/src/main/scala/org/bdgenomics/utils/instrumentation/InstrumentationFunctions.scala | Scala | apache-2.0 | 1,612 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.expressions
import org.apache.flink.api.common.typeinfo.Types
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.DataTypes
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.TimeIntervalUnit
import org.apache.flink.table.planner.expressions.utils.ExpressionTestBase
import org.apache.flink.table.planner.utils.DateTimeTestUtil
import org.apache.flink.table.planner.utils.DateTimeTestUtil._
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
import org.apache.flink.types.Row
import org.junit.Test
import java.sql.Timestamp
import java.text.SimpleDateFormat
import java.time.{Instant, ZoneId}
import java.util.{Locale, TimeZone}
class TemporalTypesTest extends ExpressionTestBase {
@Test
def testTimePointLiterals(): Unit = {
testAllApis(
"1990-10-14".toDate,
"'1990-10-14'.toDate",
"DATE '1990-10-14'",
"1990-10-14")
testTableApi(
localDate2Literal(localDate("2040-09-11")),
"'2040-09-11'.toDate",
"2040-09-11")
testAllApis(
"1500-04-30".cast(DataTypes.DATE),
"'1500-04-30'.cast(SQL_DATE)",
"CAST('1500-04-30' AS DATE)",
"1500-04-30")
testAllApis(
"15:45:59".toTime,
"'15:45:59'.toTime",
"TIME '15:45:59'",
"15:45:59")
testTableApi(
localTime2Literal(DateTimeTestUtil.localTime("00:00:00")),
"'00:00:00'.toTime",
"00:00:00")
testAllApis(
"1:30:00".cast(DataTypes.TIME),
"'1:30:00'.cast(SQL_TIME)",
"CAST('1:30:00' AS TIME)",
"01:30:00")
testAllApis(
"1990-10-14 23:00:00.123".toTimestamp,
"'1990-10-14 23:00:00.123'.toTimestamp",
"TIMESTAMP '1990-10-14 23:00:00.123'",
"1990-10-14 23:00:00.123")
testTableApi(
localDateTime2Literal(localDateTime("2040-09-11 00:00:00.000")),
"'2040-09-11 00:00:00.000'.toTimestamp",
"2040-09-11 00:00:00.000")
testAllApis(
"1500-04-30 12:00:00".cast(DataTypes.TIMESTAMP(3)),
"'1500-04-30 12:00:00'.cast(SQL_TIMESTAMP)",
"CAST('1500-04-30 12:00:00' AS TIMESTAMP)",
"1500-04-30 12:00:00.000")
}
@Test
def testTimeIntervalLiterals(): Unit = {
testAllApis(
1.year,
"1.year",
"INTERVAL '1' YEAR",
"+1-00")
testAllApis(
1.month,
"1.month",
"INTERVAL '1' MONTH",
"+0-01")
testAllApis(
12.days,
"12.days",
"INTERVAL '12' DAY",
"+12 00:00:00.000")
testAllApis(
1.hour,
"1.hour",
"INTERVAL '1' HOUR",
"+0 01:00:00.000")
testAllApis(
3.minutes,
"3.minutes",
"INTERVAL '3' MINUTE",
"+0 00:03:00.000")
testAllApis(
3.seconds,
"3.seconds",
"INTERVAL '3' SECOND",
"+0 00:00:03.000")
testAllApis(
3.millis,
"3.millis",
"INTERVAL '0.003' SECOND",
"+0 00:00:00.003")
}
@Test
def testTimePointInput(): Unit = {
testAllApis(
'f0,
"f0",
"f0",
"1990-10-14")
testAllApis(
'f1,
"f1",
"f1",
"10:20:45")
testAllApis(
'f2,
"f2",
"f2",
"1990-10-14 10:20:45.123")
}
@Test
def testTimeIntervalInput(): Unit = {
testAllApis(
'f9,
"f9",
"f9",
"+2-00")
testAllApis(
'f10,
"f10",
"f10",
"+0 00:00:12.000")
}
@Test
def testTimePointCasting(): Unit = {
testAllApis(
'f0.cast(DataTypes.TIMESTAMP(3)),
"f0.cast(SQL_TIMESTAMP)",
"CAST(f0 AS TIMESTAMP)",
"1990-10-14 00:00:00.000")
testAllApis(
'f1.cast(DataTypes.TIMESTAMP(3)),
"f1.cast(SQL_TIMESTAMP)",
"CAST(f1 AS TIMESTAMP)",
"1970-01-01 10:20:45.000")
testAllApis(
'f2.cast(DataTypes.DATE),
"f2.cast(SQL_DATE)",
"CAST(f2 AS DATE)",
"1990-10-14")
testAllApis(
'f2.cast(DataTypes.TIME),
"f2.cast(SQL_TIME)",
"CAST(f2 AS TIME)",
"10:20:45")
testAllApis(
'f2.cast(DataTypes.TIME),
"f2.cast(SQL_TIME)",
"CAST(f2 AS TIME)",
"10:20:45")
testTableApi(
'f7.cast(DataTypes.DATE),
"f7.cast(SQL_DATE)",
"2002-11-09")
testTableApi(
'f7.cast(DataTypes.DATE).cast(DataTypes.INT),
"f7.cast(SQL_DATE).cast(INT)",
"12000")
testTableApi(
'f7.cast(DataTypes.TIME),
"f7.cast(SQL_TIME)",
"00:00:12")
testTableApi(
'f7.cast(DataTypes.TIME).cast(DataTypes.INT),
"f7.cast(SQL_TIME).cast(INT)",
"12000")
testTableApi(
'f15.cast(DataTypes.TIMESTAMP(3)),
"f15.cast(SQL_TIMESTAMP)",
"2016-06-27 07:23:33.000")
testTableApi(
'f15.toTimestamp,
"f15.toTimestamp",
"2016-06-27 07:23:33.000")
testTableApi(
'f8.cast(DataTypes.TIMESTAMP(3)).cast(DataTypes.BIGINT()),
"f8.cast(SQL_TIMESTAMP).cast(LONG)",
"1467012213000")
}
@Test
def testTimeIntervalCasting(): Unit = {
testTableApi(
'f7.cast(DataTypes.INTERVAL(DataTypes.MONTH)),
"f7.cast(INTERVAL_MONTHS)",
"+1000-00")
testTableApi(
'f8.cast(DataTypes.INTERVAL(DataTypes.MINUTE())),
"f8.cast(INTERVAL_MILLIS)",
"+16979 07:23:33.000")
}
@Test
def testTimePointComparison(): Unit = {
testAllApis(
'f0 < 'f3,
"f0 < f3",
"f0 < f3",
"false")
testAllApis(
'f0 < 'f4,
"f0 < f4",
"f0 < f4",
"true")
testAllApis(
'f1 < 'f5,
"f1 < f5",
"f1 < f5",
"false")
testAllApis(
'f0.cast(DataTypes.TIMESTAMP(3)) !== 'f2,
"f0.cast(SQL_TIMESTAMP) !== f2",
"CAST(f0 AS TIMESTAMP) <> f2",
"true")
testAllApis(
'f0.cast(DataTypes.TIMESTAMP(3)) === 'f6,
"f0.cast(SQL_TIMESTAMP) === f6",
"CAST(f0 AS TIMESTAMP) = f6",
"true")
}
@Test
def testTimeIntervalArithmetic(): Unit = {
// interval months comparison
testAllApis(
12.months < 24.months,
"12.months < 24.months",
"INTERVAL '12' MONTH < INTERVAL '24' MONTH",
"true")
testAllApis(
8.years === 8.years,
"8.years === 8.years",
"INTERVAL '8' YEAR = INTERVAL '8' YEAR",
"true")
// interval millis comparison
testAllApis(
8.millis > 10.millis,
"8.millis > 10.millis",
"INTERVAL '0.008' SECOND > INTERVAL '0.010' SECOND",
"false")
testAllApis(
8.millis === 8.millis,
"8.millis === 8.millis",
"INTERVAL '0.008' SECOND = INTERVAL '0.008' SECOND",
"true")
// interval months addition/subtraction
testAllApis(
8.years + 10.months,
"8.years + 10.months",
"INTERVAL '8' YEAR + INTERVAL '10' MONTH",
"+8-10")
testAllApis(
2.years - 12.months,
"2.years - 12.months",
"INTERVAL '2' YEAR - INTERVAL '12' MONTH",
"+1-00")
testAllApis(
-2.years,
"-2.years",
"-INTERVAL '2' YEAR",
"-2-00")
// interval millis addition/subtraction
testAllApis(
8.hours + 10.minutes + 12.seconds + 5.millis,
"8.hours + 10.minutes + 12.seconds + 5.millis",
"INTERVAL '8' HOUR + INTERVAL '10' MINUTE + INTERVAL '12.005' SECOND",
"+0 08:10:12.005")
testAllApis(
1.minute - 10.seconds,
"1.minute - 10.seconds",
"INTERVAL '1' MINUTE - INTERVAL '10' SECOND",
"+0 00:00:50.000")
testAllApis(
-10.seconds,
"-10.seconds",
"-INTERVAL '10' SECOND",
"-0 00:00:10.000")
// addition to date
// interval millis
testAllApis(
'f0 + 2.days,
"f0 + 2.days",
"f0 + INTERVAL '2' DAY",
"1990-10-16")
// interval millis
testAllApis(
30.days + 'f0,
"30.days + f0",
"INTERVAL '30' DAY + f0",
"1990-11-13")
// interval months
testAllApis(
'f0 + 2.months,
"f0 + 2.months",
"f0 + INTERVAL '2' MONTH",
"1990-12-14")
// interval months
testAllApis(
2.months + 'f0,
"2.months + f0",
"INTERVAL '2' MONTH + f0",
"1990-12-14")
// addition to time
// interval millis
testAllApis(
'f1 + 12.hours,
"f1 + 12.hours",
"f1 + INTERVAL '12' HOUR",
"22:20:45")
// interval millis
testAllApis(
12.hours + 'f1,
"12.hours + f1",
"INTERVAL '12' HOUR + f1",
"22:20:45")
// addition to timestamp
// interval millis
testAllApis(
'f2 + 10.days + 4.millis,
"f2 + 10.days + 4.millis",
"f2 + INTERVAL '10 00:00:00.004' DAY TO SECOND",
"1990-10-24 10:20:45.127")
// interval millis
testAllApis(
10.days + 'f2 + 4.millis,
"10.days + f2 + 4.millis",
"INTERVAL '10 00:00:00.004' DAY TO SECOND + f2",
"1990-10-24 10:20:45.127")
// interval months
testAllApis(
'f2 + 10.years,
"f2 + 10.years",
"f2 + INTERVAL '10' YEAR",
"2000-10-14 10:20:45.123")
// interval months
testAllApis(
10.years + 'f2,
"10.years + f2",
"INTERVAL '10' YEAR + f2",
"2000-10-14 10:20:45.123")
// subtraction from date
// interval millis
testAllApis(
'f0 - 2.days,
"f0 - 2.days",
"f0 - INTERVAL '2' DAY",
"1990-10-12")
// interval millis
testAllApis(
-30.days + 'f0,
"-30.days + f0",
"INTERVAL '-30' DAY + f0",
"1990-09-14")
// interval months
testAllApis(
'f0 - 2.months,
"f0 - 2.months",
"f0 - INTERVAL '2' MONTH",
"1990-08-14")
// interval months
testAllApis(
-2.months + 'f0,
"-2.months + f0",
"-INTERVAL '2' MONTH + f0",
"1990-08-14")
// subtraction from time
// interval millis
testAllApis(
'f1 - 12.hours,
"f1 - 12.hours",
"f1 - INTERVAL '12' HOUR",
"22:20:45")
// interval millis
testAllApis(
-12.hours + 'f1,
"-12.hours + f1",
"INTERVAL '-12' HOUR + f1",
"22:20:45")
// subtraction from timestamp
// interval millis
testAllApis(
'f2 - 10.days - 4.millis,
"f2 - 10.days - 4.millis",
"f2 - INTERVAL '10 00:00:00.004' DAY TO SECOND",
"1990-10-04 10:20:45.119")
// interval millis
testAllApis(
-10.days + 'f2 - 4.millis,
"-10.days + f2 - 4.millis",
"INTERVAL '-10 00:00:00.004' DAY TO SECOND + f2",
"1990-10-04 10:20:45.119")
// interval months
testAllApis(
'f2 - 10.years,
"f2 - 10.years",
"f2 - INTERVAL '10' YEAR",
"1980-10-14 10:20:45.123")
// interval months
testAllApis(
-10.years + 'f2,
"-10.years + f2",
"INTERVAL '-10' YEAR + f2",
"1980-10-14 10:20:45.123")
// casting
testAllApis(
-'f9.cast(DataTypes.INTERVAL(DataTypes.MONTH)),
"-f9.cast(INTERVAL_MONTHS)",
"-CAST(f9 AS INTERVAL YEAR)",
"-2-00")
testAllApis(
-'f10.cast(DataTypes.INTERVAL(DataTypes.MINUTE())),
"-f10.cast(INTERVAL_MILLIS)",
"-CAST(f10 AS INTERVAL SECOND)",
"-0 00:00:12.000")
// addition/subtraction of interval millis and interval months
testAllApis(
'f0 + 2.days + 1.month,
"f0 + 2.days + 1.month",
"f0 + INTERVAL '2' DAY + INTERVAL '1' MONTH",
"1990-11-16")
testAllApis(
'f0 - 2.days - 1.month,
"f0 - 2.days - 1.month",
"f0 - INTERVAL '2' DAY - INTERVAL '1' MONTH",
"1990-09-12")
testAllApis(
'f2 + 2.days + 1.month,
"f2 + 2.days + 1.month",
"f2 + INTERVAL '2' DAY + INTERVAL '1' MONTH",
"1990-11-16 10:20:45.123")
testAllApis(
'f2 - 2.days - 1.month,
"f2 - 2.days - 1.month",
"f2 - INTERVAL '2' DAY - INTERVAL '1' MONTH",
"1990-09-12 10:20:45.123")
}
@Test
def testSelectNullValues(): Unit ={
testAllApis(
'f11,
"f11",
"f11",
"null"
)
testAllApis(
'f12,
"f12",
"f12",
"null"
)
testAllApis(
'f13,
"f13",
"f13",
"null"
)
}
@Test
def testTemporalNullValues() = {
testAllApis(
'f13.extract(TimeIntervalUnit.HOUR),
"f13.extract(HOUR)",
"extract(HOUR FROM f13)",
"null"
)
testAllApis(
'f13.floor(TimeIntervalUnit.HOUR),
"f13.floor(HOUR)",
"FLOOR(f13 TO HOUR)",
"null"
)
testSqlApi(
"TO_TIMESTAMP(SUBSTRING('', 2, -1))",
"null"
)
testSqlApi(
"TO_TIMESTAMP(f14, 'yyyy-mm-dd')",
"null"
)
}
@Test
def testDateFormat(): Unit = {
testSqlApi(
"DATE_FORMAT('2018-03-14 01:02:03', 'yyyy/MM/dd HH:mm:ss')",
"2018/03/14 01:02:03")
testSqlApi(
s"DATE_FORMAT(${timestampTz("2018-03-14 01:02:03")}, 'yyyy-MM-dd HH:mm:ss')",
"2018-03-14 01:02:03")
}
@Test
def testDateAndTime(): Unit = {
testSqlApi(
"DATE '2018-03-14'",
"2018-03-14")
testSqlApi(
"TIME '19:01:02.123'",
"19:01:02")
// DATE & TIME
testSqlApi("CAST('12:44:31' AS TIME)", "12:44:31")
testSqlApi("CAST('2018-03-18' AS DATE)", "2018-03-18")
testSqlApi("TIME '12:44:31'", "12:44:31")
testSqlApi("TO_DATE('2018-03-18')", "2018-03-18")
// EXTRACT
//testSqlApi("TO_DATE(1521331200)", "2018-03-18")
testSqlApi("EXTRACT(HOUR FROM TIME '06:07:08')", "6")
testSqlApi("EXTRACT(MINUTE FROM TIME '06:07:08')", "7")
//testSqlApi("EXTRACT(HOUR FROM TO_TIME('06:07:08'))", "6") NO TO_TIME funciton
testSqlApi("EXTRACT(HOUR FROM CAST('06:07:08' AS TIME))", "6")
testSqlApi("EXTRACT(DAY FROM CAST('2018-03-18' AS DATE))", "18")
testSqlApi("EXTRACT(DAY FROM DATE '2018-03-18')", "18")
testSqlApi("EXTRACT(DAY FROM TO_DATE('2018-03-18'))", "18")
testSqlApi("EXTRACT(MONTH FROM TO_DATE('2018-01-01'))", "1")
testSqlApi("EXTRACT(YEAR FROM TO_DATE('2018-01-01'))", "2018")
testSqlApi("EXTRACT(QUARTER FROM TO_DATE('2018-01-01'))", "1")
// Floor & Ceil
// TODO: fix this legacy bug
//testSqlApi("CEIL(TO_DATE('2018-03-18') TO DAY)", "2018-04-01")
//testSqlApi("CEIL(TIMESTAMP '2018-03-20 06:10:31' TO HOUR)", "2018-03-20 07:00:00.000")
}
private def timestampTz(str: String) = {
s"CAST(TIMESTAMP '$str' AS TIMESTAMP_WITH_LOCAL_TIME_ZONE)"
}
@Test
def testTemporalShanghai(): Unit = {
config.setLocalTimeZone(ZoneId.of("Asia/Shanghai"))
testSqlApi(timestampTz("2018-03-14 19:01:02.123"), "2018-03-14 19:01:02.123")
testSqlApi(timestampTz("2018-03-14 19:00:00.010"), "2018-03-14 19:00:00.010")
// DATE_FORMAT
testSqlApi(
"DATE_FORMAT('2018-03-14 01:02:03', 'yyyy/MM/dd HH:mm:ss')",
"2018/03/14 01:02:03")
testSqlApi(
s"DATE_FORMAT(${timestampTz("2018-03-14 01:02:03")}, 'yyyy-MM-dd HH:mm:ss')",
"2018-03-14 01:02:03")
// EXTRACT
val extractT1 = timestampTz("2018-03-20 07:59:59")
testSqlApi(s"EXTRACT(DAY FROM $extractT1)", "20")
testSqlApi(s"EXTRACT(HOUR FROM $extractT1)", "7")
testSqlApi(s"EXTRACT(MONTH FROM $extractT1)", "3")
testSqlApi(s"EXTRACT(YEAR FROM $extractT1)", "2018")
testSqlApi("EXTRACT(DAY FROM INTERVAL '19 12:10:10.123' DAY TO SECOND(3))", "19")
testSqlApi("EXTRACT(HOUR FROM TIME '01:02:03')", "1")
testSqlApi("EXTRACT(DAY FROM INTERVAL '19 12:10:10.123' DAY TO SECOND(3))", "19")
// FLOOR & CEIL
testSqlApi("FLOOR(TIME '12:44:31' TO MINUTE)", "12:44:00")
testSqlApi("FLOOR(TIME '12:44:31' TO HOUR)", "12:00:00")
testSqlApi("CEIL(TIME '12:44:31' TO MINUTE)", "12:45:00")
testSqlApi("CEIL(TIME '12:44:31' TO HOUR)", "13:00:00")
testSqlApi("FLOOR(TIMESTAMP '2018-03-20 06:44:31' TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi("FLOOR(TIMESTAMP '2018-03-20 06:44:31' TO DAY)", "2018-03-20 00:00:00.000")
testSqlApi("FLOOR(TIMESTAMP '2018-03-20 00:00:00' TO DAY)", "2018-03-20 00:00:00.000")
testSqlApi("FLOOR(TIMESTAMP '2018-04-01 06:44:31' TO MONTH)", "2018-04-01 00:00:00.000")
testSqlApi("FLOOR(TIMESTAMP '2018-01-01 06:44:31' TO MONTH)", "2018-01-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-20 06:44:31' TO HOUR)", "2018-03-20 07:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-20 06:00:00' TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-20 06:44:31' TO DAY)", "2018-03-21 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-01 00:00:00' TO DAY)", "2018-03-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-31 00:00:01' TO DAY)", "2018-04-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-01 21:00:01' TO MONTH)", "2018-03-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-03-01 00:00:00' TO MONTH)", "2018-03-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-12-02 00:00:00' TO MONTH)", "2019-01-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-01-01 21:00:01' TO YEAR)", "2018-01-01 00:00:00.000")
testSqlApi("CEIL(TIMESTAMP '2018-01-02 21:00:01' TO YEAR)", "2019-01-01 00:00:00.000")
testSqlApi(s"FLOOR(${timestampTz("2018-03-20 06:44:31")} TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi(s"FLOOR(${timestampTz("2018-03-20 06:44:31")} TO DAY)", "2018-03-20 00:00:00.000")
testSqlApi(s"FLOOR(${timestampTz("2018-03-20 00:00:00")} TO DAY)", "2018-03-20 00:00:00.000")
testSqlApi(s"FLOOR(${timestampTz("2018-04-01 06:44:31")} TO MONTH)", "2018-04-01 00:00:00.000")
testSqlApi(s"FLOOR(${timestampTz("2018-01-01 06:44:31")} TO MONTH)", "2018-01-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-20 06:44:31")} TO HOUR)", "2018-03-20 07:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-20 06:00:00")} TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-20 06:44:31")} TO DAY)", "2018-03-21 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-1 00:00:00")} TO DAY)", "2018-03-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-31 00:00:01")} TO DAY)", "2018-04-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-01 21:00:01")} TO MONTH)", "2018-03-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-03-01 00:00:00")} TO MONTH)", "2018-03-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-12-02 00:00:00")} TO MONTH)", "2019-01-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-01-01 21:00:01")} TO YEAR)", "2018-01-01 00:00:00.000")
testSqlApi(s"CEIL(${timestampTz("2018-01-02 21:00:01")} TO YEAR)", "2019-01-01 00:00:00.000")
// others
testSqlApi("QUARTER(DATE '2016-04-12')", "2")
testSqlApi(
"(TIME '2:55:00', INTERVAL '1' HOUR) OVERLAPS (TIME '3:30:00', INTERVAL '2' HOUR)",
"true")
testSqlApi(
"CEIL(f17 TO HOUR)",
"1990-10-14 08:00:00.000"
)
testSqlApi(
"FLOOR(f17 TO DAY)",
"1990-10-14 00:00:00.000"
)
// TIMESTAMP_ADD
// 1520960523000 "2018-03-14T01:02:03+0800"
testSqlApi("TIMESTAMPADD(HOUR, +8, TIMESTAMP '2017-11-29 10:58:58.998')",
"2017-11-29 18:58:58.998")
val sdf = new SimpleDateFormat("yyyy-MM-dd")
sdf.setTimeZone(TimeZone.getTimeZone("UTC"))
val currMillis = System.currentTimeMillis()
val ts = new Timestamp(currMillis)
val currDateStr = sdf.format(ts)
testSqlApi("CURRENT_DATE", currDateStr)
//testSqlApi("CURRENT_TIME", "")
}
@Test
def testDaylightSavingTimeZone(): Unit = {
// test from MySQL
// https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_unix-timestamp
// due to conventions for local time zone changes such as Daylight Saving Time (DST),
// it is possible for UNIX_TIMESTAMP() to map two values that are distinct in a non-UTC
// time zone to the same Unix timestamp value
config.setLocalTimeZone(ZoneId.of("MET")) // Europe/Amsterdam
testSqlApi("UNIX_TIMESTAMP('2005-03-27 03:00:00')", "1111885200")
testSqlApi("UNIX_TIMESTAMP('2005-03-27 02:00:00')", "1111885200")
testSqlApi("FROM_UNIXTIME(1111885200)", "2005-03-27 03:00:00")
}
@Test
def testHourUnitRangoonTimeZone(): Unit = {
// Asia/Rangoon UTC Offset 6.5
config.setLocalTimeZone(ZoneId.of("Asia/Rangoon"))
val t1 = timestampTz("2018-03-20 06:10:31")
val t2 = timestampTz("2018-03-20 06:00:00")
// 1521502831000, 2018-03-19 23:40:31 UTC, 2018-03-20 06:10:31 +06:30
testSqlApi(s"EXTRACT(HOUR FROM $t1)", "6")
testSqlApi(s"FLOOR($t1 TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi(s"FLOOR($t2 TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi(s"CEIL($t2 TO HOUR)", "2018-03-20 06:00:00.000")
testSqlApi(s"CEIL($t1 TO HOUR)", "2018-03-20 07:00:00.000")
}
@Test
def testNullableCases(): Unit = {
testSqlApi("CONVERT_TZ(cast(NULL as varchar), 'UTC', 'Asia/Shanghai')",
nullable)
testSqlApi("DATE_FORMAT(cast(NULL as varchar), 'yyyy/MM/dd HH:mm:ss')", nullable)
testSqlApi("FROM_UNIXTIME(cast(NULL as bigInt))", nullable)
testSqlApi("TO_DATE(cast(NULL as varchar))", nullable)
}
@Test
def testInvalidInputCase(): Unit = {
val invalidStr = "invalid value"
testSqlApi(s"DATE_FORMAT('$invalidStr', 'yyyy/MM/dd HH:mm:ss')", nullable)
testSqlApi(s"TO_TIMESTAMP('$invalidStr', 'yyyy-mm-dd')", nullable)
testSqlApi(s"TO_DATE('$invalidStr')", nullable)
testSqlApi(
s"CONVERT_TZ('$invalidStr', 'UTC', 'Asia/Shanghai')",
nullable)
}
@Test
def testTypeInferenceWithInvalidInput(): Unit = {
val invalidStr = "invalid value"
val cases = Seq(
s"DATE_FORMAT('$invalidStr', 'yyyy/MM/dd HH:mm:ss')",
s"TO_TIMESTAMP('$invalidStr', 'yyyy-mm-dd')",
s"TO_DATE('$invalidStr')",
s"CONVERT_TZ('$invalidStr', 'UTC', 'Asia/Shanghai')")
cases.foreach {
caseExpr =>
testSqlNullable(caseExpr)
}
}
@Test
def testConvertTZ(): Unit = {
testSqlApi("CONVERT_TZ('2018-03-14 11:00:00', 'UTC', 'Asia/Shanghai')",
"2018-03-14 19:00:00")
}
@Test
def testFromUnixTime(): Unit = {
val sdf1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
val fmt2 = "yyyy-MM-dd HH:mm:ss.SSS"
val sdf2 = new SimpleDateFormat(fmt2, Locale.US)
val fmt3 = "yy-MM-dd HH-mm-ss"
val sdf3 = new SimpleDateFormat(fmt3, Locale.US)
testSqlApi(
"from_unixtime(f21)",
sdf1.format(new Timestamp(44000)))
testSqlApi(
s"from_unixtime(f21, '$fmt2')",
sdf2.format(new Timestamp(44000)))
testSqlApi(
s"from_unixtime(f21, '$fmt3')",
sdf3.format(new Timestamp(44000)))
testSqlApi(
"from_unixtime(f22)",
sdf1.format(new Timestamp(3000)))
testSqlApi(
s"from_unixtime(f22, '$fmt2')",
sdf2.format(new Timestamp(3000)))
testSqlApi(
s"from_unixtime(f22, '$fmt3')",
sdf3.format(new Timestamp(3000)))
// test with null input
testSqlApi(
"from_unixtime(cast(null as int))",
"null")
}
@Test
def testFromUnixTimeInTokyo(): Unit = {
config.setLocalTimeZone(ZoneId.of("Asia/Tokyo"))
val fmt = "yy-MM-dd HH-mm-ss"
testSqlApi(
"from_unixtime(f21)",
"1970-01-01 09:00:44")
testSqlApi(
s"from_unixtime(f21, '$fmt')",
"70-01-01 09-00-44")
testSqlApi(
"from_unixtime(f22)",
"1970-01-01 09:00:03")
testSqlApi(
s"from_unixtime(f22, '$fmt')",
"70-01-01 09-00-03")
}
@Test
def testUnixTimestamp(): Unit = {
val ts1 = Timestamp.valueOf("2015-07-24 10:00:00.3")
val ts2 = Timestamp.valueOf("2015-07-25 02:02:02.2")
val s1 = "2015/07/24 10:00:00.5"
val s2 = "2015/07/25 02:02:02.6"
val ss1 = "2015-07-24 10:00:00"
val ss2 = "2015-07-25 02:02:02"
val fmt = "yyyy/MM/dd HH:mm:ss.S"
testSqlApi(s"UNIX_TIMESTAMP('$ss1')", (ts1.getTime / 1000L).toString)
testSqlApi(s"UNIX_TIMESTAMP('$ss2')", (ts2.getTime / 1000L).toString)
testSqlApi(s"UNIX_TIMESTAMP('$s1', '$fmt')", (ts1.getTime / 1000L).toString)
testSqlApi(s"UNIX_TIMESTAMP('$s2', '$fmt')", (ts2.getTime / 1000L).toString)
}
@Test
def testUnixTimestampInTokyo(): Unit = {
config.setLocalTimeZone(ZoneId.of("Asia/Tokyo"))
testSqlApi(
"UNIX_TIMESTAMP('2015-07-24 10:00:00')",
"1437699600")
testSqlApi(
"UNIX_TIMESTAMP('2015/07/24 10:00:00.5', 'yyyy/MM/dd HH:mm:ss.S')",
"1437699600")
}
// ----------------------------------------------------------------------------------------------
override def testData: Row = {
val testData = new Row(23)
testData.setField(0, localDate("1990-10-14"))
testData.setField(1, DateTimeTestUtil.localTime("10:20:45"))
testData.setField(2, localDateTime("1990-10-14 10:20:45.123"))
testData.setField(3, localDate("1990-10-13"))
testData.setField(4, localDate("1990-10-15"))
testData.setField(5, DateTimeTestUtil.localTime("00:00:00"))
testData.setField(6, localDateTime("1990-10-14 00:00:00.0"))
testData.setField(7, 12000)
testData.setField(8, 1467012213000L)
testData.setField(9, 24)
testData.setField(10, 12000L)
// null selection test.
testData.setField(11, null)
testData.setField(12, null)
testData.setField(13, null)
testData.setField(14, null)
testData.setField(15, 1467012213L)
testData.setField(16,
localDateTime("1990-10-14 10:20:45.123").atZone(ZoneId.of("UTC")).toInstant)
testData.setField(17,
localDateTime("1990-10-14 00:00:00.0").atZone(ZoneId.of("UTC")).toInstant)
testData.setField(18, Instant.ofEpochMilli(1521025200000L))
testData.setField(19, Instant.ofEpochMilli(1520960523000L))
testData.setField(20, Instant.ofEpochMilli(1520827201000L))
testData.setField(21, 44L)
testData.setField(22, 3)
testData
}
override def typeInfo: RowTypeInfo = {
new RowTypeInfo(
/* 0 */ Types.LOCAL_DATE,
/* 1 */ Types.LOCAL_TIME,
/* 2 */ Types.LOCAL_DATE_TIME,
/* 3 */ Types.LOCAL_DATE,
/* 4 */ Types.LOCAL_DATE,
/* 5 */ Types.LOCAL_TIME,
/* 6 */ Types.LOCAL_DATE_TIME,
/* 7 */ Types.INT,
/* 8 */ Types.LONG,
/* 9 */ TimeIntervalTypeInfo.INTERVAL_MONTHS,
/* 10 */ TimeIntervalTypeInfo.INTERVAL_MILLIS,
/* 11 */ Types.LOCAL_DATE,
/* 12 */ Types.LOCAL_TIME,
/* 13 */ Types.LOCAL_DATE_TIME,
/* 14 */ Types.STRING,
/* 15 */ Types.LONG,
/* 16 */ Types.INSTANT,
/* 17 */ Types.INSTANT,
/* 18 */ Types.INSTANT,
/* 19 */ Types.INSTANT,
/* 20 */ Types.INSTANT,
/* 21 */ Types.LONG,
/* 22 */ Types.INT)
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/expressions/TemporalTypesTest.scala | Scala | apache-2.0 | 27,358 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.calls.pileup
import scala.math.pow
object EMForAlleles {
/**
* Main MAF EM function
* IN: Phi - an initial MAF vector of length number of SNPs
* GL - Array of arrays of likelihood triples P( D | g )
* (note these are NOT multiplied by P(g | phi)! )
* OUT: Phi - ML estimate of MAF's across SNPs
*
* Note: GL is currently an array of (numSnps) arrays of length (numInds),
*/
def emForMAF(Phi: Array[Double], GL: Array[Array[(Double, Double, Double)]]): Array[Double] = {
var eps = 1.0
val tol = 0.0001
val L = Phi.length
var phi_updates = Phi
while (eps > tol) {
var Phi_next = Array.fill(L) { 0.0 }
Phi_next.indices.foreach(i => {
GL(i).foreach(l => {
Phi_next(i) += (1.0 / (2.0 * GL(i).length)) * ((1.0 * l._2 * 2.0 * phi_updates(i) * (1 - phi_updates(i)) +
2.0 * l._3 * pow(phi_updates(i), 2.0)) / (l._1 * pow(1.0 - phi_updates(i), 2.0) +
l._2 * 2.0 * phi_updates(i) * (1.0 - phi_updates(i)) +
l._3 * pow(phi_updates(i), 2.0)))
})
})
var eps = 0.0
phi_updates.indices.foreach(i => eps += pow(phi_updates(i) - Phi_next(i), 2.0))
phi_updates = Phi_next
}
return phi_updates
}
/**
* Helper function to compute Y iteratively
* For each site, executes the recursion in 4.2.3. Y(i) is Ynk vector for site i
*/
def compY(GL: Array[Array[(Double, Double, Double)]]): Array[Array[Double]] = {
val L = GL.length
val GLt = GL.transpose
val n = GLt.length
val M = 2 * n
var Y = Array.ofDim[Double](L, n + 1, M + 1)
// NOTE: this ordering may be suboptimal?
for (i <- 0 until L) {
for (k <- 0 to M) {
for (j <- 0 to n) { // 0 = 0 people not first person
if (j == 0) {
Y(i)(j)(k) = 1.0
} else if (k == 0) {
Y(i)(j)(k) = (1.0 / (2.0 * j * (2.0 * j - 1.0))) * ((2.0 * j - k) * (2.0 * j - k - 1.0) * Y(i)(j - 1)(k) * GL(i)(j)._1)
} else if (k == 1) {
Y(i)(j)(k) = (1.0 / (2.0 * j * (2.0 * j - 1.0))) * ((2.0 * j - k) * (2.0 * j - k - 1.0) * Y(i)(j - 1)(k) * GL(i)(j)._1 +
2.0 * k * (2.0 * j - k) * Y(i)(j - 1)(k - 1) * GL(i)(j)._2)
} else {
Y(i)(j)(k) = (1.0 / (2.0 * j * (2.0 * j - 1.0))) * ((2.0 * j - k) * (2.0 * j - k - 1.0) * Y(i)(j - 1)(k) * GL(i)(j)._1 +
2.0 * k * (2.0 * j - k) * Y(i)(j - 1)(k - 1) * GL(i)(j)._2 + k * (k - 1.0) *
Y(i)(j - 1)(k - 2) * GL(i)(j)._2)
}
}
}
}
var Yr = Array.ofDim[Double](L, M)
for (l <- 0 until L) Yr(l) = Y(l)(n)
return Yr
}
/**
* Main AFS EM function
* IN: Phi - an initial MAF vector of length number of SNPs
* GL - Array of arrays of likelihood triples P( D | g )
* (note these are NOT multiplied by P(g | phi)! )
* OUT: Phi - ML estimate of MAF's across SNPs
* Note: GL is currently an array of (numSnps) arrays of length (numInds), which is transposed
*/
def emForAFS(Phik: Array[Double], GL: Array[Array[(Double, Double, Double)]]): Array[Double] = {
val GLt = GL.transpose
val tol = 0.0001
val L = GL.length
val M = Phik.length
var eps = 1.0
var Y = compY(GL)
var phik_updates = Phik
while (eps > tol) {
var sums = Array.fill(L) { 0.0 }
sums.indices.foreach(a => phik_updates.indices.foreach(p => sums(a) += phik_updates(p) * Y(a)(p)))
val Phik_next = Array.fill(M) { 0.0 }
Phik_next.indices.foreach(i => Y.foreach(y => Phik_next(i) += (1.0 / L) * phik_updates(i) * y(i) / sums(i)))
eps = 0.0
phik_updates.indices.foreach(i => eps += pow(phik_updates(i) - Phik_next(i), 2.0))
phik_updates = Phik_next
}
phik_updates
}
}
| tdanford/avocado | avocado-core/src/main/scala/org/bdgenomics/avocado/algorithms/em/EMforAlleles.scala | Scala | apache-2.0 | 4,618 |
package lore.lsp
import org.eclipse.lsp4j.launch.LSPLauncher
object Main {
def main(args: Array[String]): Unit = {
val server = new LoreLanguageServer
val launcher = LSPLauncher.createServerLauncher(server, System.in, System.out)
val client = launcher.getRemoteProxy
server.connect(client)
launcher.startListening()
}
}
| marcopennekamp/lore | lsp/server/src/lore/lsp/Main.scala | Scala | mit | 349 |
/*******************************************************************************
Copyright (c) 2013-2014, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.Tizen
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.typing._
import kr.ac.kaist.jsaf.analysis.cfg.{CFGExpr, CFG}
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T, _}
object TIZENBluetoothSocket extends Tizen {
val name = "BluetoothSocket"
/* predefined locations */
val loc_proto = newSystemRecentLoc(name + "Proto")
override def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_proto, prop_proto)
)
/* prototype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("CallbackObject")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(ObjProtoLoc), F, F, F)))),
("@extensible", AbsConstValue(PropValue(T))),
("writeData", AbsBuiltinFunc("tizen.BluetoothSocket.writeData", 1)),
("readData", AbsBuiltinFunc("tizen.BluetoothSocket.readData", 0)),
("close", AbsBuiltinFunc("tizen.BluetoothSocket.close", 0))
)
override def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("tizen.BluetoothSocket.writeData" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val data = getArgValue(h, ctx, args, "0")
val es_ = data._2.foldLeft(TizenHelper.TizenExceptionBot)((_es, ll) => {
val n_length = Operator.ToUInt32(Helper.Proto(h, ll, AbsString.alpha("length")))
val ess = n_length.getAbsCase match {
case AbsBot =>
TizenHelper.TizenExceptionBot
case _ => AbsNumber.getUIntSingle(n_length) match {
case Some(n) => {
val es__ = (0 until n.toInt).foldLeft(TizenHelper.TizenExceptionBot)((_e, i) => {
val vi = Helper.Proto(h, ll, AbsString.alpha(i.toString))
val esi =
if (vi._1._4 </ NumTop) Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
_e ++ esi
})
es__
}
case _ => {
val vi = Helper.Proto(h, ll, AbsString.alpha(Str_default_number))
val esi =
if (vi._1._4 </ NumTop) Set[WebAPIException](TypeMismatchError)
else TizenHelper.TizenExceptionBot
esi
}
}
}
_es ++ ess
})
val est = Set[WebAPIException](UnknownError, SecurityError, NotSupportedError)
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, es_ ++ est)
((Helper.ReturnStore(h, Value(UInt)), ctx), (he + h_e, ctxe + ctx_e))
}
)),
("tizen.BluetoothSocket.readData" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val est = Set[WebAPIException](UnknownError, SecurityError, NotSupportedError)
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, est)
((Helper.ReturnStore(h, Value(TIZENbluetooth.loc_shortarr)), ctx), (he + h_e, ctxe + ctx_e))
}
)),
("tizen.BluetoothSocket.close" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
val lset_this = h(SinglePureLocalLoc)("@this")._2._2
val h_1 = lset_this.foldLeft(h)((_h, l) => {
Helper.PropStore(_h, l, AbsString.alpha("state"), Value(AbsString.alpha("CLOSED")))
})
val h_2 = lset_this.foldLeft(h_1)((_h, l) => {
Helper.PropStore(_h, l, AbsString.alpha("peer"), Value(NullTop))
})
val h_3 = lset_this.foldLeft(h_2)((_h, l) => {
val v1 = Helper.Proto(_h, l, AbsString.alpha("onclose"))
v1._2.foldLeft(_h)((__h, l) => {
if (Helper.IsCallable(__h, l) <= T)
TizenHelper.addCallbackHandler(__h, AbsString.alpha("successCB"), Value(v1._2), Value(UndefTop))
else __h
})
})
val est = Set[WebAPIException](UnknownError, SecurityError)
val (h_e, ctx_e) = TizenHelper.TizenRaiseException(h, ctx, est)
((h_3, ctx), (he + h_e, ctxe + ctx_e))
}
))
)
}
override def getPreSemanticMap(): Map[String, SemanticFun] = {
Map()
}
override def getDefMap(): Map[String, AccessFun] = {
Map()
}
override def getUseMap(): Map[String, AccessFun] = {
Map()
}
} | darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/Tizen/TIZENBluetoothSocket.scala | Scala | bsd-3-clause | 5,229 |
package im.actor.server.db
import java.sql.Connection
import javax.sql.DataSource
import org.flywaydb.core.Flyway
import org.flywaydb.core.api.MigrationInfo
import org.flywaydb.core.api.callback.FlywayCallback
trait FlywayInit {
def initFlyway(ds: DataSource) = {
val flyway = new Flyway()
flyway.setDataSource(ds)
flyway.setLocations("sql.migration")
flyway.setCallbacks(new BeforeCleanCallback())
flyway.setBaselineOnMigrate(true)
flyway
}
}
class BeforeCleanCallback extends FlywayCallback {
def afterBaseline(connection: Connection): Unit = {}
def afterClean(connection: Connection): Unit = {}
def afterEachMigrate(connection: Connection): Unit = {}
def afterInfo(connection: Connection): Unit = {}
def afterEachMigrate(connection: Connection, migrationInfo: MigrationInfo): Unit = {}
def afterMigrate(connection: Connection): Unit = {}
def afterRepair(connection: Connection): Unit = {}
def afterValidate(connection: Connection): Unit = {}
def beforeBaseline(connection: Connection): Unit = {}
def beforeClean(connection: Connection): Unit = executeStmt(connection, """DROP EXTENSION IF EXISTS "ltree" CASCADE;""")
def beforeEachMigrate(connection: Connection, migrationInfo: MigrationInfo): Unit = {}
def beforeInfo(connection: Connection): Unit = {}
def beforeInit(connection: Connection): Unit = {}
def beforeMigrate(connection: Connection): Unit = {}
def beforeRepair(connection: Connection): Unit = {}
def beforeValidate(connection: Connection): Unit = {}
def afterInit(connection: Connection): Unit = executeStmt(connection, """DROP EXTENSION IF EXISTS "ltree" CASCADE;""")
def executeStmt(connection: Connection, statement: String): Unit = {
if (connection.getMetaData().getDriverName().startsWith("PostgreSQL")) {
val stmt = connection.prepareStatement(statement)
try {
stmt.execute()
} finally {
stmt.close()
}
}
}
} | EaglesoftZJ/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/db/FlywayInit.scala | Scala | agpl-3.0 | 1,950 |
import scuff.FakeType
package object delta {
type Revision = Int // >= 0
type Tick = Long
type Channel = Channel.Type
val Channel: FakeType[String] { type Type <: AnyRef } = new FakeType[String] {
type Type = String
def apply(str: String) = str
}
}
| nilskp/delta | src/main/scala/delta/package.scala | Scala | mit | 271 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import monix.catnap.CancelableF
import monix.catnap.cancelables.BooleanCancelableF
import monix.execution.cancelables.BooleanCancelable
import monix.eval.internal.TaskConnectionRef
import monix.execution.ExecutionModel.SynchronousExecution
object TaskConnectionRefSuite extends BaseTestSuite {
test("assign and cancel a Cancelable") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = BooleanCancelable { () => effect += 1 }
cr := b
assert(!b.isCanceled, "!b.isCanceled")
cr.cancel.runAsyncAndForget; s.tick()
assert(b.isCanceled, "b.isCanceled")
assert(effect == 1)
cr.cancel.runAsyncAndForget; s.tick()
assert(effect == 1)
}
test("assign and cancel a CancelableF") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = CancelableF.wrap(Task { effect += 1 })
cr := b
assertEquals(effect, 0)
cr.cancel.runAsyncAndForget; s.tick()
assert(effect == 1)
}
test("assign and cancel a CancelToken[Task]") { implicit s =>
var effect = 0
val cr = TaskConnectionRef()
val b = Task { effect += 1 }
cr := b
assertEquals(effect, 0)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
}
test("cancel a Cancelable on single assignment") { implicit s =>
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = BooleanCancelable { () => effect += 1 }
cr := b
assert(b.isCanceled)
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
val b2 = BooleanCancelable { () => effect += 1 }
intercept[IllegalStateException] { cr := b2 }
assertEquals(effect, 2)
}
test("cancel a CancelableF on single assignment") { scheduler =>
implicit val s = scheduler.withExecutionModel(SynchronousExecution)
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = BooleanCancelableF(Task { effect += 1 }).runToFuture.value.get.get
cr := b
assert(b.isCanceled.runToFuture.value.get.get)
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
val b2 = BooleanCancelableF(Task { effect += 1 }).runToFuture.value.get.get
intercept[IllegalStateException] { cr := b2 }
assertEquals(effect, 2)
}
test("cancel a Task on single assignment") { implicit s =>
val cr = TaskConnectionRef()
cr.cancel.runAsyncAndForget; s.tick()
var effect = 0
val b = Task { effect += 1 }
cr := b; s.tick()
assertEquals(effect, 1)
cr.cancel.runAsyncAndForget; s.tick()
assertEquals(effect, 1)
intercept[IllegalStateException] {
cr := b
}
assertEquals(effect, 2)
}
}
| Wogan/monix | monix-eval/shared/src/test/scala/monix/eval/TaskConnectionRefSuite.scala | Scala | apache-2.0 | 3,532 |
package rx.lang.scala
import org.junit.Test
import org.scalatestplus.junit.JUnitSuite
import rx.lang.scala.observers.TestSubscriber
import scala.concurrent.Future
import scala.util.{Failure, Success}
class ScalaTypesConversionsTests extends JUnitSuite {
@Test
def testIterableConversion() = {
val it = Seq("1", "2", "3")
val observer = TestSubscriber[String]()
it.toObservable.subscribe(observer)
observer.assertValues("1", "2", "3")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testIterableEmptyConversion() = {
val it = List[String]()
val observer = TestSubscriber[String]()
it.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testTrySuccessConversion() = {
val success = Success("abc")
val observer = TestSubscriber[String]()
success.toObservable.subscribe(observer)
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testTryFailureConversion() = {
val error = new IllegalArgumentException("test error")
val failure = Failure[String](error)
val observer = TestSubscriber[String]()
failure.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertError(error)
observer.assertNotCompleted()
}
@Test
def testOptionSomeConversion() = {
val some = Option("abc")
val observer = TestSubscriber[String]()
some.toObservable.subscribe(observer)
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testOptionNoneConversion() = {
val some = Option.empty[String]
val observer = TestSubscriber[String]()
some.toObservable.subscribe(observer)
observer.assertNoValues()
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureSuccessfulConversion() = {
import scala.concurrent.ExecutionContext.Implicits.global
val fut = Future.successful("abc")
val observer = TestSubscriber[String]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureSuccessfulConversion2() = {
import scala.concurrent.ExecutionContext.Implicits.global
val fut = Future { "abc" }
val observer = TestSubscriber[String]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertValue("abc")
observer.assertNoErrors()
observer.assertCompleted()
}
@Test
def testFutureFailedConversion() = {
import scala.concurrent.ExecutionContext.Implicits.global
val error = new IllegalArgumentException("test error")
val fut = Future.failed[Unit](error)
val observer = TestSubscriber[Unit]()
fut.toObservable.subscribe(observer)
observer.awaitTerminalEvent()
observer.assertNoValues()
observer.assertError(error)
observer.assertNotCompleted()
}
}
| ReactiveX/RxScala | src/test/scala/rx/lang/scala/ScalaTypesConversionsTests.scala | Scala | apache-2.0 | 3,069 |
package unitspec
import org.scalatest._
abstract class UnitSpec extends FlatSpec with Matchers with
OptionValues with Inside with Inspectors | mvogiatzis/freq-count | src/test/scala/unitspec/UnitSpec.scala | Scala | mit | 142 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding.batch
import com.twitter.summingbird.batch.{ BatchID, Batcher }
import com.twitter.algebird.{ Universe, Empty, Interval, Intersection, InclusiveLower, ExclusiveUpper, InclusiveUpper }
import com.twitter.bijection.{ Injection, Bijection, Conversion }
import com.twitter.summingbird.batch.Timestamp
import com.twitter.summingbird.scalding._
import com.twitter.scalding.Mode
import Conversion.asMethod
/**
* Services and Stores are very similar, but not exact.
* This shares the logic for them.
*/
private class BatchedOperations(batcher: Batcher) {
def coverIt[T](timeSpan: Interval[Timestamp]): Iterable[BatchID] = {
val batchInterval = batcher.cover(timeSpan.as[Interval[Timestamp]])
BatchID.toIterable(batchInterval)
}
def batchToTimestamp(bint: Interval[BatchID]): Interval[Timestamp] =
bint.mapNonDecreasing { batcher.earliestTimeOf(_) }
def intersect(batches: Interval[BatchID], ts: Interval[Timestamp]): Interval[Timestamp] =
batchToTimestamp(batches) && ts
def intersect(batches: Iterable[BatchID], ts: Interval[Timestamp]): Option[Interval[Timestamp]] =
BatchID.toInterval(batches).map { intersect(_, ts) }
def readBatched[T](inBatches: Interval[BatchID], mode: Mode, in: PipeFactory[T]): Try[(Interval[BatchID], FlowToPipe[T])] = {
val inTimes = batchToTimestamp(inBatches)
// Read the delta stream for the needed times
in((inTimes, mode))
.right
.map {
case ((availableInput, innerm), f2p) =>
val batchesWeCanBuild = batcher.batchesCoveredBy(availableInput.as[Interval[Timestamp]])
(batchesWeCanBuild, f2p)
}
}
}
| zirpins/summingbird | summingbird-scalding/src/main/scala/com/twitter/summingbird/scalding/batch/BatchedOperations.scala | Scala | apache-2.0 | 2,234 |
/* Title: Pure/Admin/jenkins.scala
Author: Makarius
Support for Jenkins continuous integration service.
*/
package isabelle
import java.net.URL
import scala.util.matching.Regex
object Jenkins
{
/* server API */
def root(): String =
Isabelle_System.getenv_strict("ISABELLE_JENKINS_ROOT")
def invoke(url: String, args: String*): Any =
{
val req = url + "/api/json?" + args.mkString("&")
val result = Url.read(req)
try { JSON.parse(result) }
catch { case ERROR(_) => error("Malformed JSON from " + quote(req)) }
}
/* build jobs */
def build_job_names(): List[String] =
for {
job <- JSON.array(invoke(root()), "jobs").getOrElse(Nil)
_class <- JSON.string(job, "_class")
if _class == "hudson.model.FreeStyleProject"
name <- JSON.string(job, "name")
} yield name
def download_logs(
options: Options, job_names: List[String], dir: Path, progress: Progress = No_Progress)
{
val store = Sessions.store(options)
val infos = job_names.flatMap(build_job_infos(_))
Par_List.map((info: Job_Info) => info.download_log(store, dir, progress), infos)
}
/* build log status */
val build_log_jobs = List("isabelle-nightly-benchmark", "isabelle-nightly-slow")
val build_status_profiles: List[Build_Status.Profile] =
build_log_jobs.map(job_name =>
Build_Status.Profile("jenkins " + job_name,
sql =
Build_Log.Prop.build_engine + " = " + SQL.string(Build_Log.Jenkins.engine) + " AND " +
Build_Log.Data.session_name + " <> " + SQL.string("Pure") + " AND " +
Build_Log.Data.status + " = " + SQL.string(Build_Log.Session_Status.finished.toString) +
" AND " + Build_Log.Data.log_name + " LIKE " + SQL.string("%" + job_name)))
/* job info */
sealed case class Job_Info(
job_name: String,
timestamp: Long,
main_log: URL,
session_logs: List[(String, String, URL)])
{
val date: Date = Date(Time.ms(timestamp), Date.timezone_berlin)
def log_filename: Path =
Build_Log.log_filename(Build_Log.Jenkins.engine, date, List(job_name))
def read_ml_statistics(store: Sessions.Store, session_name: String): List[Properties.T] =
{
def get_log(ext: String): Option[URL] =
session_logs.collectFirst({ case (a, b, url) if a == session_name && b == ext => url })
get_log("db") match {
case Some(url) =>
Isabelle_System.with_tmp_file(session_name, "db") { database =>
Bytes.write(database, Bytes.read(url))
using(SQLite.open_database(database))(db => store.read_ml_statistics(db, session_name))
}
case None =>
get_log("gz") match {
case Some(url) =>
val log_file = Build_Log.Log_File(session_name, Url.read_gzip(url))
log_file.parse_session_info(ml_statistics = true).ml_statistics
case None => Nil
}
}
}
def download_log(store: Sessions.Store, dir: Path, progress: Progress = No_Progress)
{
val log_dir = dir + Build_Log.log_subdir(date)
val log_path = log_dir + log_filename.ext("xz")
if (!log_path.is_file) {
progress.echo(log_path.expand.implode)
Isabelle_System.mkdirs(log_dir)
val ml_statistics =
session_logs.map(_._1).toSet.toList.sorted.flatMap(session_name =>
read_ml_statistics(store, session_name).
map(props => (Build_Log.SESSION_NAME -> session_name) :: props))
File.write_xz(log_path,
terminate_lines(Url.read(main_log) ::
ml_statistics.map(Build_Log.Log_File.print_props(Build_Log.ML_STATISTICS_MARKER, _))),
XZ.options(6))
}
}
}
def build_job_infos(job_name: String): List[Job_Info] =
{
val Session_Log = new Regex("""^.*/log/([^/]+)\.(db|gz)$""")
val infos =
for {
build <-
JSON.array(
invoke(root() + "/job/" + job_name, "tree=allBuilds[number,timestamp,artifacts[*]]"),
"allBuilds").getOrElse(Nil)
number <- JSON.int(build, "number")
timestamp <- JSON.long(build, "timestamp")
} yield {
val job_prefix = root() + "/job/" + job_name + "/" + number
val main_log = Url(job_prefix + "/consoleText")
val session_logs =
for {
artifact <- JSON.array(build, "artifacts").getOrElse(Nil)
log_path <- JSON.string(artifact, "relativePath")
(name, ext) <- (log_path match { case Session_Log(a, b) => Some((a, b)) case _ => None })
} yield (name, ext, Url(job_prefix + "/artifact/" + log_path))
Job_Info(job_name, timestamp, main_log, session_logs)
}
infos.sortBy(info => - info.timestamp)
}
}
| larsrh/libisabelle | modules/pide/2019-RC4/src/main/scala/Admin/jenkins.scala | Scala | apache-2.0 | 4,775 |
package org.concurrency.ch5
import scala.collection.parallel.Combiner
import scala.collection.mutable.ArrayBuffer
import scala.collection.parallel.mutable.ParArray
class ParStringCombiner extends Combiner[Char, ParString] {
private val chunks = new ArrayBuffer += new StringBuilder
private var sz = 0
private var lastc = chunks.last
override def size:Int = sz
override def += (elem: Char) = {
lastc += elem
sz += 1
this
}
override def clear(): Unit = {
chunks.clear()
chunks += new StringBuilder
lastc = chunks.last
sz = 0
}
override def combine[U <: Char, NewTo >: ParString](that: Combiner[U, NewTo]):Combiner[U, NewTo] = {
if(this eq that) this
else that match {
case that: ParStringCombiner => sz += that.size
chunks ++= that.chunks
lastc = chunks.last
this
}
}
override def result(): ParString = {
// //try #1 'manual' pallelizing
// //worst performant of the three
//
// val resArr = new Array[Char](this.size)
// val accSz = new ArrayBuffer[Int]
//
// //calculate accumulate lengths so that I can concat chars in parallel afterwards
// var tmpSz = 0
// accSz += 0
// for(sb <- chunks) {
// accSz += sb.length + tmpSz
// tmpSz += sb.length
// }
//
// for (i <- chunks.indices.par) {
// val sb = chunks(i)
// val shift = accSz(i)
// for(j <- sb.indices) resArr(shift + j) = sb(j)
// }
//
// new ParString(resArr.mkString)
// // try #2 String aggregate
// new ParString(chunks.par.aggregate("") (
// (acc, line) => acc + line,
// (acc1, acc2) => acc1 + acc2
// ))
// // try #3 StringBuilder aggregate
// new ParString(chunks.par.aggregate(new StringBuilder) (
// (acc,line) => acc.append(line),
// (acc1, acc2) => acc1.append(acc2)
// ).toString())
// try #4 sequential result
val rsb = new StringBuilder
for(sb <- chunks) rsb.append(sb)
new ParString(rsb.toString())
}
}
object ParStringCombinerApp extends App {
val valpartxt = new ParString("Get rid of those superfluous whitespaces, please" * 3)
println(valpartxt.filter(_ != ' '));
val txt = "A custom txt" * 25000
val partxt = new ParString(txt)
val seqtime = Timer.warmedTimed(250,2) {txt.filter(_ != ' ')}
val partime = Timer.warmedTimed(250,2) {partxt.filter(_ != ' ')}
println(s"seq: $seqtime ms")
println(s"par: $partime ms")
}
| marcos-sb/concurrent-programming-scala | src/main/scala-2.11/org/concurrency/ch5/ParStringCombiner.scala | Scala | apache-2.0 | 2,471 |
package org.brzy.validator.constraints
import org.scalatest.WordSpec
import org.scalatest.matchers.ShouldMatchers
class PatternSpec extends WordSpec with ShouldMatchers {
"Pattern validator" should {
"validate" in {
val pattern = Pattern("hello")
assert(pattern.isValid("hello you"))
}
"not validate" in {
val pattern = Pattern("hello")
assert(!pattern.isValid(" world"))
}
}
} | m410/brzy | src/test/scala/org/brzy/validator/constraints/PatternSpec.scala | Scala | apache-2.0 | 426 |
package fr.acinq.eclair.blockchain
import akka.actor.ActorRef
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.bitcoin.{ByteVector32, Script, ScriptWitness, Transaction}
import fr.acinq.eclair.channel.BitcoinEvent
import scodec.bits.ByteVector
import scala.util.{Success, Try}
sealed trait Watch {
def replyTo: ActorRef
def event: BitcoinEvent
}
final case class WatchConfirmed(replyTo: ActorRef, txId: ByteVector32, publicKeyScript: ByteVector, minDepth: Long, event: BitcoinEvent) extends Watch
object WatchConfirmed {
// if we have the entire transaction, we can get the publicKeyScript from any of the outputs
def apply(replyTo: ActorRef, tx: Transaction, event: BitcoinEvent, minDepth: Long): WatchConfirmed =
WatchConfirmed(replyTo, tx.txid, tx.txOut.map(_.publicKeyScript).headOption.getOrElse(ByteVector.empty), minDepth, event)
def extractPublicKeyScript(witness: ScriptWitness): ByteVector = Try(PublicKey fromBin witness.stack.last) match {
case Success(pubKey) => Script.write(Script pay2wpkh pubKey) // if last element of the witness is a public key, then this is a p2wpkh
case _ => Script.write(Script pay2wsh witness.stack.last) // otherwise this is a p2wsh
}
}
final case class WatchSpent(replyTo: ActorRef, txId: ByteVector32, outputIndex: Int, publicKeyScript: ByteVector, event: BitcoinEvent, hints: Set[ByteVector32] = Set.empty) extends Watch
object WatchSpent {
// if we have the entire transaction, we can get the publicKeyScript from the relevant output
def apply(replyTo: ActorRef, tx: Transaction, outputIndex: Int, event: BitcoinEvent): WatchSpent =
WatchSpent(replyTo, tx.txid, outputIndex, tx.txOut(outputIndex).publicKeyScript, event)
}
trait WatchEvent {
def event: BitcoinEvent
}
final case class TxConfirmedAt(blockHeight: Int, tx: Transaction)
final case class WatchEventConfirmed(event: BitcoinEvent, txConfirmedAt: TxConfirmedAt, txIndex: Int) extends WatchEvent
final case class WatchEventSpent(event: BitcoinEvent, tx: Transaction) extends WatchEvent
final case class PublishAsap(tx: Transaction)
final case class GetTxWithMeta(txid: ByteVector32)
final case class GetTxWithMetaResponse(txid: ByteVector32, tx_opt: Option[Transaction], lastBlockTimestamp: Long) | btcontract/wallet | app/src/main/java/fr/acinq/eclair/blockchain/WatcherTypes.scala | Scala | apache-2.0 | 2,257 |
package com.karasiq.shadowcloud.serialization.kryo
import com.esotericsoftware.kryo.Kryo
import com.esotericsoftware.kryo.io.{Input, Output}
import com.twitter.chill
import com.karasiq.shadowcloud.model.Timestamp
private[kryo] final class TimestampSerializer extends chill.KSerializer[Timestamp](false, true) {
def read(kryo: Kryo, input: Input, `type`: Class[Timestamp]): Timestamp = {
val created = input.readLong(true)
val modifiedOffset = input.readLong(true)
Timestamp(created, created + modifiedOffset)
}
def write(kryo: Kryo, output: Output, timestamp: Timestamp): Unit = {
val modifiedOffset = timestamp.lastModified - timestamp.created // math.max(0L, timestamp.lastModified - timestamp.created)
output.writeLong(timestamp.created, true)
output.writeLong(modifiedOffset, true)
}
} | Karasiq/shadowcloud | serialization/.jvm/src/main/scala/com/karasiq/shadowcloud/serialization/kryo/TimestampSerializer.scala | Scala | apache-2.0 | 825 |
package com.programmaticallyspeaking.ncd.nashorn
import com.programmaticallyspeaking.ncd.host.ValueNode
import com.sun.jdi._
/**
* Represents an object that doesn't exist in the remote VM.
*/
class LocalObject(val values: Map[String, ValueNode]) extends Value {
override def `type`(): Type = notSupported
override def virtualMachine(): VirtualMachine = notSupported
private def notSupported[R] = throw new UnsupportedOperationException("LocalObject")
} | provegard/ncdbg | src/main/scala/com/programmaticallyspeaking/ncd/nashorn/LocalObject.scala | Scala | bsd-3-clause | 465 |
package net.liftweb.markdown
/*
* Copyright 2013 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Based on https://github.com/chenkelmann/actuarius originally developed by
* Christoph Henkelmann http://henkelmann.eu/
*/
import org.scalatest.junit.JUnitRunner
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import collection.SortedMap
import org.junit.runner.RunWith
/**
* Tests basic parsers that are used by the more complex parsing steps.
*/
@RunWith(classOf[JUnitRunner])
class BaseParsersTest extends FlatSpec with ShouldMatchers with BaseParsers{
"The BaseParsers" should "parse a newline" in {
val p = nl
apply(p, "\n") should equal ("\n")
evaluating(apply(p, "\r\n")) should produce[IllegalArgumentException]
evaluating(apply(p, " \n")) should produce[IllegalArgumentException]
}
it should "parse whitespace" in {
val p = ws
apply(p, " ") should equal (" ")
apply(p, "\t") should equal ("\t")
apply(p, " ") should equal (" ")
apply(p, "\t\t") should equal ("\t\t")
apply(p, " \t \t ") should equal (" \t \t ")
//we want newlines to be treated diferrently from other ws
evaluating (apply(p, "\n")) should produce[IllegalArgumentException]
}
it should "be able to look behind" in {
apply (((elem('a') ~ lookbehind(Set('a')) ~ elem('b'))^^{case a~lb~b=>a+""+b}), "ab") should equal ("ab")
evaluating {apply (((elem('a') ~ lookbehind(Set('b')) ~ elem('b'))^^{case a~b=>a+""+b}), "ab")} should produce[IllegalArgumentException]
apply( (elem('a') ~ not(lookbehind(Set(' ', '\t', '\n'))) ~ '*' ), "a*" )
}
it should "parse chars in ranges" in {
val p = ranges(SortedMap('A' -> 'Z', '0' -> '9'))
apply(p, "B") should equal ('B')
apply(p, "A") should equal ('A')
apply(p, "Z") should equal ('Z')
apply(p, "5") should equal ('5')
apply(p, "0") should equal ('0')
apply(p, "9") should equal ('9')
evaluating (apply(p, "a")) should produce[IllegalArgumentException]
evaluating (apply(p, "z")) should produce[IllegalArgumentException]
evaluating (apply(p, "<")) should produce[IllegalArgumentException]
}
} | lzpfmh/framework-2 | core/markdown/src/test/scala/net/liftweb/markdown/BaseParsersTest.scala | Scala | apache-2.0 | 2,833 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.recommendation
import java.io.File
import java.util.Random
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.existentials
import com.github.fommil.netlib.BLAS.{getInstance => blas}
import org.apache.spark.{Logging, SparkException, SparkFunSuite}
import org.apache.spark.ml.recommendation.ALS._
import org.apache.spark.ml.util.MLTestingUtils
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{Row, SQLContext}
import org.apache.spark.util.Utils
/**
* 参考文档http://blog.csdn.net/liulingyuan6/article/details/53489390
* 协同过滤常被用于推荐系统,
* Spark.ml目前支持基于模型的协同过滤,其中用户和商品以少量的潜在因子来描述,用以预测缺失项
* 注意基于DataFrame的ALS接口目前仅支持整数型的用户和商品编号
* 正则化参数regParam来解决用户在更新用户因子时产生新评分或者商品更新商品因子时收到的新评分带来的最小二乘问题
* 矩阵分解:将用户(user)对商品(item)的评分矩阵分解为两个矩阵:一个是用户对商品隐含特征的偏好矩阵,另一个是商品所包含的隐含特征的矩阵。
* 在这个矩阵分解的过程中,评分缺失项得到了填充,也就是说我们可以基于这个填充的评分来给用户最商品推荐了
*
*/
class ALSSuite extends SparkFunSuite with MLlibTestSparkContext with Logging {
private var tempDir: File = _
override def beforeAll(): Unit = {
super.beforeAll()
tempDir = Utils.createTempDir()
sc.setCheckpointDir(tempDir.getAbsolutePath)
}
override def afterAll(): Unit = {
Utils.deleteRecursively(tempDir)
super.afterAll()
}
test("LocalIndexEncoder") {//本地索引编码
val random = new Random
for (numBlocks <- Seq(1, 2, 5, 10, 20, 50, 100)) {
val encoder = new LocalIndexEncoder(numBlocks)
val maxLocalIndex = Int.MaxValue / numBlocks
val tests = Seq.fill(5)((random.nextInt(numBlocks), random.nextInt(maxLocalIndex))) ++
Seq((0, 0), (numBlocks - 1, maxLocalIndex))
tests.foreach { case (blockId, localIndex) =>
val err = s"Failed with numBlocks=$numBlocks, blockId=$blockId, and localIndex=$localIndex."
val encoded = encoder.encode(blockId, localIndex)
assert(encoder.blockId(encoded) === blockId, err)
assert(encoder.localIndex(encoded) === localIndex, err)
}
}
}
test("normal equation construction") {//标准相等的构建
//线性回归
val k = 2
//NormalEquation 标准方程
val ne0 = new NormalEquation(k).add(Array(1.0f, 2.0f), 3.0).add(Array(4.0f, 5.0f), 6.0, 2.0) // weighted
assert(ne0.k === k)
assert(ne0.triK === k * (k + 1) / 2)
// NumPy code that computes the expected values:
// A = np.matrix("1 2; 4 5")
// b = np.matrix("3; 6")
// C = np.matrix(np.diag([1, 2]))
// ata = A.transpose() * C * A
// atb = A.transpose() * C * b
assert(Vectors.dense(ne0.ata) ~== Vectors.dense(33.0, 42.0, 54.0) relTol 1e-8)
assert(Vectors.dense(ne0.atb) ~== Vectors.dense(51.0, 66.0) relTol 1e-8)
val ne1 = new NormalEquation(2)
.add(Array(7.0f, 8.0f), 9.0)
ne0.merge(ne1)
// NumPy code that computes the expected values:
// A = np.matrix("1 2; 4 5; 7 8")
// b = np.matrix("3; 6; 9")
// C = np.matrix(np.diag([1, 2, 1]))
// ata = A.transpose() * C * A
// atb = A.transpose() * C * b
assert(Vectors.dense(ne0.ata) ~== Vectors.dense(82.0, 98.0, 118.0) relTol 1e-8)
assert(Vectors.dense(ne0.atb) ~== Vectors.dense(114.0, 138.0) relTol 1e-8)
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f), 2.0)
}
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f, 2.0f, 3.0f), 4.0)
}
intercept[IllegalArgumentException] {
ne0.add(Array(1.0f, 2.0f), 0.0, -1.0)
}
intercept[IllegalArgumentException] {
val ne2 = new NormalEquation(3)
ne0.merge(ne2)
}
ne0.reset()
assert(ne0.ata.forall(_ == 0.0))
assert(ne0.atb.forall(_ == 0.0))
}
//Cholesky 分解是把一个对称正定的矩阵表示成一个下三角矩阵L和其转置的乘积的分解
test("CholeskySolver") {
val k = 2
val ne0 = new NormalEquation(k)
.add(Array(1.0f, 2.0f), 4.0)
.add(Array(1.0f, 3.0f), 9.0)
.add(Array(1.0f, 4.0f), 16.0)
val ne1 = new NormalEquation(k)
.merge(ne0)
val chol = new CholeskySolver
val x0 = chol.solve(ne0, 0.0).map(_.toDouble)
// NumPy code that computes the expected solution:
// A = np.matrix("1 2; 1 3; 1 4")
// b = b = np.matrix("3; 6")
// x0 = np.linalg.lstsq(A, b)[0]
assert(Vectors.dense(x0) ~== Vectors.dense(-8.333333, 6.0) relTol 1e-6)
assert(ne0.ata.forall(_ == 0.0))
assert(ne0.atb.forall(_ == 0.0))
val x1 = chol.solve(ne1, 1.5).map(_.toDouble)
// NumPy code that computes the expected solution, where lambda is scaled by n:
// x0 = np.linalg.solve(A.transpose() * A + 1.5 * np.eye(2), A.transpose() * b)
assert(Vectors.dense(x1) ~== Vectors.dense(-0.1155556, 3.28) relTol 1e-6)
}
test("RatingBlockBuilder") {//评级模块生成器
val emptyBuilder = new RatingBlockBuilder[Int]()
assert(emptyBuilder.size === 0)
val emptyBlock = emptyBuilder.build()
assert(emptyBlock.srcIds.isEmpty)
assert(emptyBlock.dstIds.isEmpty)
assert(emptyBlock.ratings.isEmpty)
val builder0 = new RatingBlockBuilder()
.add(Rating(0, 1, 2.0f))
.add(Rating(3, 4, 5.0f))
assert(builder0.size === 2)
val builder1 = new RatingBlockBuilder()
.add(Rating(6, 7, 8.0f))//
.merge(builder0.build())
assert(builder1.size === 3)//合并
val block = builder1.build()
val ratings = Seq.tabulate(block.size) { i =>
(block.srcIds(i), block.dstIds(i), block.ratings(i))
}.toSet
assert(ratings === Set((0, 1, 2.0f), (3, 4, 5.0f), (6, 7, 8.0f)))
}
test("UncompressedInBlock") {//未压缩的块
val encoder = new LocalIndexEncoder(10)
val uncompressed = new UncompressedInBlockBuilder[Int](encoder)
.add(0, Array(1, 0, 2), Array(0, 1, 4), Array(1.0f, 2.0f, 3.0f))
.add(1, Array(3, 0), Array(2, 5), Array(4.0f, 5.0f))
.build()
assert(uncompressed.length === 5)
val records = Seq.tabulate(uncompressed.length) { i =>
val dstEncodedIndex = uncompressed.dstEncodedIndices(i)
val dstBlockId = encoder.blockId(dstEncodedIndex)
val dstLocalIndex = encoder.localIndex(dstEncodedIndex)
(uncompressed.srcIds(i), dstBlockId, dstLocalIndex, uncompressed.ratings(i))
}.toSet
val expected =
Set((1, 0, 0, 1.0f), (0, 0, 1, 2.0f), (2, 0, 4, 3.0f), (3, 1, 2, 4.0f), (0, 1, 5, 5.0f))
assert(records === expected)
val compressed = uncompressed.compress()
assert(compressed.size === 5)
assert(compressed.srcIds.toSeq === Seq(0, 1, 2, 3))
assert(compressed.dstPtrs.toSeq === Seq(0, 2, 3, 4, 5))
var decompressed = ArrayBuffer.empty[(Int, Int, Int, Float)]
var i = 0
while (i < compressed.srcIds.size) {
var j = compressed.dstPtrs(i)
while (j < compressed.dstPtrs(i + 1)) {
val dstEncodedIndex = compressed.dstEncodedIndices(j)
val dstBlockId = encoder.blockId(dstEncodedIndex)
val dstLocalIndex = encoder.localIndex(dstEncodedIndex)
decompressed += ((compressed.srcIds(i), dstBlockId, dstLocalIndex, compressed.ratings(j)))
j += 1
}
i += 1
}
assert(decompressed.toSet === expected)
}
/**
* Generates an explicit feedback dataset for testing ALS.
* 生成用于测试的ALS一个明确的反馈数据
* @param numUsers number of users 用户数
* @param numItems number of items 项目数
* @param rank rank 模型中潜在的特征数
* @param noiseStd the standard deviation of additive Gaussian noise on training data
* 训练数据中加性高斯噪声的标准偏差
* @param seed random seed 随机种子
* @return (training, test)
*/
def genExplicitTestData(
numUsers: Int,//用户数目
numItems: Int,//商品数目
rank: Int,//分解矩阵的排名
noiseStd: Double = 0.0,
seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = {
val trainingFraction = 0.6
val testFraction = 0.3
val totalFraction = trainingFraction + testFraction
val random = new Random(seed)
//rank 在模型中潜在的特征数
val userFactors = genFactors(numUsers, rank, random)
val itemFactors = genFactors(numItems, rank, random)
val training = ArrayBuffer.empty[Rating[Int]]
val test = ArrayBuffer.empty[Rating[Int]]
for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) {
val x = random.nextDouble()
if (x < totalFraction) {
val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1)
if (x < trainingFraction) {
val noise = noiseStd * random.nextGaussian()
training += Rating(userId, itemId, rating + noise.toFloat)
} else {
test += Rating(userId, itemId, rating)
}
}
}
logInfo(s"Generated an explicit feedback dataset with ${training.size} ratings for training " +
s"and ${test.size} for test.")
(sc.parallelize(training, 2), sc.parallelize(test, 2))
}
/**
* Generates an implicit feedback dataset for testing ALS.
* 生成用于测试的ALS的隐式反馈数据
* @param numUsers number of users 用户数
* @param numItems number of items 项目数
* @param rank rank 在模型中潜在的特征数
* @param noiseStd the standard deviation of additive Gaussian noise on training data
* @param seed random seed
* @return (training, test)
*/
def genImplicitTestData(
numUsers: Int,
numItems: Int,
rank: Int,
noiseStd: Double = 0.0,
seed: Long = 11L): (RDD[Rating[Int]], RDD[Rating[Int]]) = {
// The assumption of the implicit feedback model is that unobserved ratings are more likely to
// be negatives.
//隐式反馈模型的假设是不可观测的评级更可能是消极的
val positiveFraction = 0.8 //正分数
val negativeFraction = 1.0 - positiveFraction //负分数
val trainingFraction = 0.6//训练部分
val testFraction = 0.3 //测试分数
val totalFraction = trainingFraction + testFraction
val random = new Random(seed)
//rank 在模型中潜在的特征数
val userFactors = genFactors(numUsers, rank, random)
val itemFactors = genFactors(numItems, rank, random)
val training = ArrayBuffer.empty[Rating[Int]]
val test = ArrayBuffer.empty[Rating[Int]]
for ((userId, userFactor) <- userFactors; (itemId, itemFactor) <- itemFactors) {
val rating = blas.sdot(rank, userFactor, 1, itemFactor, 1)
//在二进制分类中设置阈值,范围为[0,1],如果类标签1的估计概率>Threshold,则预测1,否则0
val threshold = if (rating > 0) positiveFraction else negativeFraction
val observed = random.nextDouble() < threshold
if (observed) {
val x = random.nextDouble()
if (x < totalFraction) {
if (x < trainingFraction) {
val noise = noiseStd * random.nextGaussian()
training += Rating(userId, itemId, rating + noise.toFloat)
} else {
test += Rating(userId, itemId, rating)
}
}
}
}
logInfo(s"Generated an implicit feedback dataset with ${training.size} ratings for training " +
s"and ${test.size} for test.")
(sc.parallelize(training, 2), sc.parallelize(test, 2))
}
/**
* Generates random user/item factors, with i.i.d. values drawn from U(a, b).
* 产生随机的用户/项目因素
* @param size number of users/items 用户/项目数
* @param rank number of features 模型中潜在的特征数
* @param random random number generator 随机数发生器
* @param a min value of the support (default: -1)
* @param b max value of the support (default: 1)
* @return a sequence of (ID, factors) pairs
*/
private def genFactors(
size: Int,
rank: Int,
random: Random,
a: Float = -1.0f,
b: Float = 1.0f): Seq[(Int, Array[Float])] = {
require(size > 0 && size < Int.MaxValue / 3)
require(b > a)
val ids = mutable.Set.empty[Int]
while (ids.size < size) {
ids += random.nextInt()
}
val width = b - a
ids.toSeq.sorted.map(id => (id, Array.fill(rank)(a + random.nextFloat() * width)))
}
/**
* Test ALS using the given training/test splits and parameters.
* @param training training dataset
* @param test test dataset
* @param rank rank of the matrix factorization 矩阵分解中的特征数
* @param maxIter max number of iterations
* @param regParam regularization constant
* @param implicitPrefs whether to use implicit preference
* @param numUserBlocks number of user blocks
* @param numItemBlocks number of item blocks
* @param targetRMSE target test RMSE
*/
def testALS(
training: RDD[Rating[Int]],
test: RDD[Rating[Int]],
rank: Int,//模型中潜在因素的数量
maxIter: Int,//迭代次数
regParam: Double,//正则化参数>=0
implicitPrefs: Boolean = false,//制定是否使用显示反馈ALS变体(或者说是对隐式反馈数据的一种适应)
numUserBlocks: Int = 2,//设置用户数据块的个数和并行度
numItemBlocks: Int = 3,//设置物品数据块个数和并行度
targetRMSE: Double = 0.05): Unit = {
val sqlContext = this.sqlContext
import sqlContext.implicits._
val als = new ALS()
.setRank(rank)//是模型中隐语义因子的个数,分解矩阵的排名
.setRegParam(regParam)//正则化参数
.setImplicitPrefs(implicitPrefs)//决定了是用显性反馈ALS的版本还是用适用隐性反馈数据集的版本
.setNumUserBlocks(numUserBlocks)//用户数目(正数)
.setNumItemBlocks(numItemBlocks)//商品数目(正数)
.setSeed(0) //随机种子
//可以调整这些参数,不断优化结果,使均方差变小。比如:iterations越多,lambda较小,均方差会较小,推荐结果较优。
val alpha = als.getAlpha //是一个针对于隐性反馈 ALS 版本的参数,这个参数决定了偏好行为强度的基准
//fit()方法将DataFrame转化为一个Transformer的算法
val model = als.fit(training.toDF())
//transform()方法将DataFrame转化为另外一个DataFrame的算法
/**
*+-----------+----------+------------+------------+
| user| item| rating| prediction|
+-----------+----------+------------+------------+
| 919154311|1940909433| -0.201669| -0.20149253|
| -79074260|1940909433| 0.44795045| 0.4476502|
| 1525869989|1940909433| -0.11397148| -0.11388531|
| 113375480|-739181961|-0.049516156|-0.049473286|
|-1591288030|-739181961|-0.031804726|-0.031776465|
|-1158177819|-739181961| 0.507647| 0.5070719|
| 1022899383|-739181961| -0.7780471| -0.7773425|
| 1228230215|-739181961| -0.4406039| -0.44020513|
+-----------+----------+------------+------------+
*/
model.transform(test.toDF()).show()
val predictions = model.transform(test.toDF())
.select("rating", "prediction")
//实际评级,预测评级
.map { case Row(rating: Float, prediction: Float) =>
(rating.toDouble, prediction.toDouble)
}
//rmse 均方根误差亦称标准误差,
//均方根误差常用下式表示:√[∑di^2/n]=Re,式中:n为测量次数;di为一组测量值与真值的偏差
//均方根值(RMS)、均方根误差(RMSE)
val rmse =
if (implicitPrefs) {//隐式
// TODO: Use a better (rank-based?) evaluation metric for implicit feedback.
// We limit the ratings and the predictions to interval [0, 1] and compute the weighted RMSE
// with the confidence scores as weights.
val (totalWeight, weightedSumSq) = predictions.map { case (rating, prediction) =>
//math.abs返回数的绝对值
val confidence = 1.0 + alpha * math.abs(rating)
val rating01 = math.max(math.min(rating, 1.0), 0.0)
val prediction01 = math.max(math.min(prediction, 1.0), 0.0)
val err = prediction01 - rating01
(confidence, confidence * err * err)
}.reduce { case ((c0, e0), (c1, e1)) =>
(c0 + c1, e0 + e1)
}
//math.sqrt返回数字的平方根
math.sqrt(weightedSumSq / totalWeight)
} else {//明确
//mse标准差能反映一个数据集的离散程度
val mse = predictions.map { case (rating, prediction) =>
val err = rating - prediction
err * err //平方
}.mean()//平均数
math.sqrt(mse)//平方根
}
logInfo(s"Test RMSE is $rmse.")
//targetRMSE: Double = 0.05
//均方差越小,推荐结果较优
assert(rmse < targetRMSE)
// copied model must have the same parent.
//复制的模型必须有相同的父
MLTestingUtils.checkCopy(model)
}
/* test("exact rank-1 matrix") {//矩阵分解中的特征数=1
val (training, test) = genExplicitTestData(numUsers = 20, numItems = 40, rank = 1)
//rank正则化参数,maxIter迭代次数,numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
testALS(training, test, maxIter = 1, rank = 1, regParam = 1e-5, targetRMSE = 0.001)
testALS(training, test, maxIter = 1, rank = 2, regParam = 1e-5, targetRMSE = 0.001)
}*/
/*
test("approximate rank-1 matrix") {//矩阵分解特征数近似秩1
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 1, noiseStd = 0.01)
testALS(training, test, maxIter = 2, rank = 1, regParam = 0.01, targetRMSE = 0.02)
testALS(training, test, maxIter = 2, rank = 2, regParam = 0.01, targetRMSE = 0.02)
}
test("approximate rank-2 matrix") {//矩阵分解特征数近似2
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
//rank正则化参数,maxIter迭代次数,numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, targetRMSE = 0.03)
testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03)
}*/
/*test("different block settings") {//不同块设置
val (training, test) =
genExplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
for ((numUserBlocks, numItemBlocks) <- Seq((1, 1), (1, 2), (2, 1), (2, 2))) {
//rank正则化参数,maxIter迭代次数,numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
testALS(training, test, maxIter = 4, rank = 3, regParam = 0.01, targetRMSE = 0.03,
//numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
numUserBlocks = numUserBlocks, numItemBlocks = numItemBlocks)
}
}
test("more blocks than ratings") {//多块比评级
val (training, test) =
genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
//rank正则化参数,maxIter迭代次数,numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
testALS(training, test, maxIter = 2, rank = 1, regParam = 1e-4, targetRMSE = 0.002,
//numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
numItemBlocks = 5, numUserBlocks = 5)
}*/
/* test("implicit feedback") {//隐式反馈
val (training, test) =
genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
testALS(training, test, maxIter = 4, rank = 2, regParam = 0.01, implicitPrefs = true,
targetRMSE = 0.3)
}*/
/*test("using generic ID types") {//使用通用的身份标识类型
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val longRatings = ratings.map(r => Rating(r.user.toLong, r.item.toLong, r.rating))
val (longUserFactors, _) = ALS.train(longRatings, rank = 2, maxIter = 4, seed = 0)
assert(longUserFactors.first()._1.getClass === classOf[Long])
val strRatings = ratings.map(r => Rating(r.user.toString, r.item.toString, r.rating))
val (strUserFactors, _) = ALS.train(strRatings, rank = 2, maxIter = 4, seed = 0)
assert(strUserFactors.first()._1.getClass === classOf[String])
}*/
/* test("nonnegative constraint") {//非负约束
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val (userFactors, itemFactors) =
//nonnegative是否需要非负约束
ALS.train(ratings, rank = 2, maxIter = 4, nonnegative = true, seed = 0)
def isNonnegative(factors: RDD[(Int, Array[Float])]): Boolean = {
factors.values.map { _.forall(_ >= 0.0) }.reduce(_ && _)
}
assert(isNonnegative(userFactors))//是否需要非负约束
assert(isNonnegative(itemFactors))//是否需要非负约束
// TODO: Validate the solution.
}*/
test("als partitioner is a projection") {//als分区是一个投影
for (p <- Seq(1, 10, 100, 1000)) {
val part = new ALSPartitioner(p)
var k = 0
while (k < p) {
assert(k === part.getPartition(k))
assert(k === part.getPartition(k.toLong))
k += 1
}
}
}
/* test("partitioner in returned factors") {//返回分区因子
val (ratings, _) = genImplicitTestData(numUsers = 20, numItems = 40, rank = 2, noiseStd = 0.01)
val (userFactors, itemFactors) = ALS.train(
//numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
ratings, rank = 2, maxIter = 4, numUserBlocks = 3, numItemBlocks = 4, seed = 0)
for ((tpe, factors) <- Seq(("User", userFactors), ("Item", itemFactors))) {
assert(userFactors.partitioner.isDefined, s"$tpe factors should have partitioner.")
val part = userFactors.partitioner.get
userFactors.mapPartitionsWithIndex { (idx, items) =>
items.foreach { case (id, _) =>
if (part.getPartition(id) != idx) {
throw new SparkException(s"$tpe with ID $id should not be in partition $idx.")
}
}
Iterator.empty
}.count()
}
}*/
/* test("als with large number of iterations") {//als具有大量的迭代
val (ratings, _) = genExplicitTestData(numUsers = 4, numItems = 4, rank = 1)
//rank正则化参数,maxIter迭代次数,numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2, seed = 0)
//numUserBlocks设置用户数据块的个数和并行度,numProductBlocks设置物品数据块个数和并行度
ALS.train(ratings, rank = 1, maxIter = 50, numUserBlocks = 2, numItemBlocks = 2,
implicitPrefs = true, seed = 0)
}*/
}
| tophua/spark1.52 | mllib/src/test/scala/org/apache/spark/ml/recommendation/ALSSuite.scala | Scala | apache-2.0 | 24,285 |
/* Ayasdi Inc. Copyright 2014 - all rights reserved. */
/**
* @author abhishek, ajith, mohit
* big dataframe on spark: wrappers for python access via py4j
*/
package org.apache.spark
import java.util.{ArrayList => JArrayList}
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import net.razorvine.pickle.Unpickler
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.api.python.SerDeUtil
import org.apache.spark.rdd.RDD
object BigDFPyRDD {
var initialized = false
def initialize(): Unit = {
SerDeUtil.initialize()
synchronized {
if (!initialized) {
initialized = true
}
}
}
initialize()
def pythonRDD(rdd: RDD[_]): JavaRDD[Array[Byte]] = {
rdd.mapPartitions { iter =>
initialize() // lets it be called in executor
new SerDeUtil.AutoBatchedPickler(iter)
}
}
def javaRDD[T: ClassTag](pyrdd: JavaRDD[Array[Byte]]): JavaRDD[T] = {
pyrdd.rdd.mapPartitions { iter =>
initialize()
val unpickle = new Unpickler
iter.flatMap { row =>
val v = unpickle.loads(row)
v.asInstanceOf[JArrayList[T]].asScala
}
}.toJavaRDD()
}
}
| AyasdiOpenSource/bigdf | src/main/scala/com/ayasdi/bigdf/BigDFPyRDD.scala | Scala | apache-2.0 | 1,181 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.client
import org.orbeon.oxf.client.fb.{Permissions, LabelHintEditor}
import org.junit.runners.MethodSorters
import org.junit.FixMethodOrder
import org.orbeon.oxf.client.fr.Currency
// List all client tests which we want to run with a single run of the driver
@FixMethodOrder(MethodSorters.NAME_ASCENDING)
class CombinedClientTest
extends OrbeonClientBase
with XForms
with FormRunnerSummary
with OrbeonFormsDemoPath
with LabelHintEditor
with Permissions
with Currency
| evlist/orbeon-forms | src/test/scala/org/orbeon/oxf/client/CombinedClientTest.scala | Scala | lgpl-2.1 | 1,176 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.io.File
import java.nio.ByteBuffer
import java.util.Properties
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicBoolean
import kafka.api.Request
import kafka.common.UnexpectedAppendOffsetException
import kafka.log.{LogConfig, LogManager, CleanerConfig}
import kafka.server._
import kafka.utils.{MockTime, TestUtils, MockScheduler}
import kafka.zk.KafkaZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.ReplicaNotAvailableException
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.utils.Utils
import org.apache.kafka.common.record._
import org.apache.kafka.common.requests.LeaderAndIsrRequest
import org.junit.{After, Before, Test}
import org.junit.Assert._
import org.scalatest.Assertions.assertThrows
import org.easymock.EasyMock
import scala.collection.JavaConverters._
class PartitionTest {
val brokerId = 101
val topicPartition = new TopicPartition("test-topic", 0)
val time = new MockTime()
val brokerTopicStats = new BrokerTopicStats
val metrics = new Metrics
var tmpDir: File = _
var logDir1: File = _
var logDir2: File = _
var replicaManager: ReplicaManager = _
var logManager: LogManager = _
var logConfig: LogConfig = _
@Before
def setup(): Unit = {
val logProps = new Properties()
logProps.put(LogConfig.SegmentBytesProp, 512: java.lang.Integer)
logProps.put(LogConfig.SegmentIndexBytesProp, 1000: java.lang.Integer)
logProps.put(LogConfig.RetentionMsProp, 999: java.lang.Integer)
logConfig = LogConfig(logProps)
tmpDir = TestUtils.tempDir()
logDir1 = TestUtils.randomPartitionLogDir(tmpDir)
logDir2 = TestUtils.randomPartitionLogDir(tmpDir)
logManager = TestUtils.createLogManager(
logDirs = Seq(logDir1, logDir2), defaultConfig = logConfig, CleanerConfig(enableCleaner = false), time)
logManager.startup()
val brokerProps = TestUtils.createBrokerConfig(brokerId, TestUtils.MockZkConnect)
brokerProps.put(KafkaConfig.LogDirsProp, Seq(logDir1, logDir2).map(_.getAbsolutePath).mkString(","))
val brokerConfig = KafkaConfig.fromProps(brokerProps)
val kafkaZkClient = EasyMock.createMock(classOf[KafkaZkClient])
replicaManager = new ReplicaManager(
config = brokerConfig, metrics, time, zkClient = kafkaZkClient, new MockScheduler(time),
logManager, new AtomicBoolean(false), QuotaFactory.instantiate(brokerConfig, metrics, time, ""),
brokerTopicStats, new MetadataCache(brokerId), new LogDirFailureChannel(brokerConfig.logDirs.size))
EasyMock.expect(kafkaZkClient.getEntityConfigs(EasyMock.anyString(), EasyMock.anyString())).andReturn(logProps).anyTimes()
EasyMock.expect(kafkaZkClient.conditionalUpdatePath(EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject(), EasyMock.anyObject()))
.andReturn((true, 0)).anyTimes()
EasyMock.replay(kafkaZkClient)
}
@After
def tearDown(): Unit = {
brokerTopicStats.close()
metrics.close()
logManager.shutdown()
Utils.delete(tmpDir)
logManager.liveLogDirs.foreach(Utils.delete)
replicaManager.shutdown(checkpointHW = false)
}
@Test
// Verify that partition.removeFutureLocalReplica() and partition.maybeReplaceCurrentWithFutureReplica() can run concurrently
def testMaybeReplaceCurrentWithFutureReplica(): Unit = {
val latch = new CountDownLatch(1)
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir1.getAbsolutePath)
val log1 = logManager.getOrCreateLog(topicPartition, logConfig)
logManager.maybeUpdatePreferredLogDir(topicPartition, logDir2.getAbsolutePath)
val log2 = logManager.getOrCreateLog(topicPartition, logConfig, isFuture = true)
val currentReplica = new Replica(brokerId, topicPartition, time, log = Some(log1))
val futureReplica = new Replica(Request.FutureLocalReplicaId, topicPartition, time, log = Some(log2))
val partition = new Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
partition.addReplicaIfNotExists(futureReplica)
partition.addReplicaIfNotExists(currentReplica)
assertEquals(Some(currentReplica), partition.getReplica(brokerId))
assertEquals(Some(futureReplica), partition.getReplica(Request.FutureLocalReplicaId))
val thread1 = new Thread {
override def run(): Unit = {
latch.await()
partition.removeFutureLocalReplica()
}
}
val thread2 = new Thread {
override def run(): Unit = {
latch.await()
partition.maybeReplaceCurrentWithFutureReplica()
}
}
thread1.start()
thread2.start()
latch.countDown()
thread1.join()
thread2.join()
assertEquals(None, partition.getReplica(Request.FutureLocalReplicaId))
}
@Test
def testAppendRecordsAsFollowerBelowLogStartOffset(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, logConfig)
val replica = new Replica(brokerId, topicPartition, time, log = Some(log))
val partition = new Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
partition.addReplicaIfNotExists(replica)
assertEquals(Some(replica), partition.getReplica(replica.brokerId))
val initialLogStartOffset = 5L
partition.truncateFullyAndStartAt(initialLogStartOffset, isFuture = false)
assertEquals(s"Log end offset after truncate fully and start at $initialLogStartOffset:",
initialLogStartOffset, replica.logEndOffset.messageOffset)
assertEquals(s"Log start offset after truncate fully and start at $initialLogStartOffset:",
initialLogStartOffset, replica.logStartOffset)
// verify that we cannot append records that do not contain log start offset even if the log is empty
assertThrows[UnexpectedAppendOffsetException] {
// append one record with offset = 3
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), isFuture = false)
}
assertEquals(s"Log end offset should not change after failure to append", initialLogStartOffset, replica.logEndOffset.messageOffset)
// verify that we can append records that contain log start offset, even when first
// offset < log start offset if the log is empty
val newLogStartOffset = 4L
val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes),
new SimpleRecord("k3".getBytes, "v3".getBytes)),
baseOffset = newLogStartOffset)
partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false)
assertEquals(s"Log end offset after append of 3 records with base offset $newLogStartOffset:", 7L, replica.logEndOffset.messageOffset)
assertEquals(s"Log start offset after append of 3 records with base offset $newLogStartOffset:", newLogStartOffset, replica.logStartOffset)
// and we can append more records after that
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), isFuture = false)
assertEquals(s"Log end offset after append of 1 record at offset 7:", 8L, replica.logEndOffset.messageOffset)
assertEquals(s"Log start offset not expected to change:", newLogStartOffset, replica.logStartOffset)
// but we cannot append to offset < log start if the log is not empty
assertThrows[UnexpectedAppendOffsetException] {
val records2 = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)),
baseOffset = 3L)
partition.appendRecordsToFollowerOrFutureReplica(records2, isFuture = false)
}
assertEquals(s"Log end offset should not change after failure to append", 8L, replica.logEndOffset.messageOffset)
// we still can append to next offset
partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), isFuture = false)
assertEquals(s"Log end offset after append of 1 record at offset 8:", 9L, replica.logEndOffset.messageOffset)
assertEquals(s"Log start offset not expected to change:", newLogStartOffset, replica.logStartOffset)
}
@Test
def testGetReplica(): Unit = {
val log = logManager.getOrCreateLog(topicPartition, logConfig)
val replica = new Replica(brokerId, topicPartition, time, log = Some(log))
val partition = new
Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
assertEquals(None, partition.getReplica(brokerId))
assertThrows[ReplicaNotAvailableException] {
partition.getReplicaOrException(brokerId)
}
partition.addReplicaIfNotExists(replica)
assertEquals(replica, partition.getReplicaOrException(brokerId))
}
@Test
def testAppendRecordsToFollowerWithNoReplicaThrowsException(): Unit = {
val partition = new Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
assertThrows[ReplicaNotAvailableException] {
partition.appendRecordsToFollowerOrFutureReplica(
createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), isFuture = false)
}
}
@Test
def testMakeFollowerWithNoLeaderIdChange(): Unit = {
val partition = new Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
// Start off as follower
var partitionStateInfo = new LeaderAndIsrRequest.PartitionState(0, 1, 1, List[Integer](0, 1, 2).asJava, 1, List[Integer](0, 1, 2).asJava, false)
partition.makeFollower(0, partitionStateInfo, 0)
// Request with same leader and epoch increases by more than 1, perform become-follower steps
partitionStateInfo = new LeaderAndIsrRequest.PartitionState(0, 1, 3, List[Integer](0, 1, 2).asJava, 1, List[Integer](0, 1, 2).asJava, false)
assertTrue(partition.makeFollower(0, partitionStateInfo, 1))
// Request with same leader and epoch increases by only 1, skip become-follower steps
partitionStateInfo = new LeaderAndIsrRequest.PartitionState(0, 1, 4, List[Integer](0, 1, 2).asJava, 1, List[Integer](0, 1, 2).asJava, false)
assertFalse(partition.makeFollower(0, partitionStateInfo, 2))
// Request with same leader and same epoch, skip become-follower steps
partitionStateInfo = new LeaderAndIsrRequest.PartitionState(0, 1, 4, List[Integer](0, 1, 2).asJava, 1, List[Integer](0, 1, 2).asJava, false)
assertFalse(partition.makeFollower(0, partitionStateInfo, 2))
}
@Test
def testFollowerDoesNotJoinISRUntilCaughtUpToOffsetWithinCurrentLeaderEpoch(): Unit = {
val controllerEpoch = 3
val leader = brokerId
val follower1 = brokerId + 1
val follower2 = brokerId + 2
val controllerId = brokerId + 3
val replicas = List[Integer](leader, follower1, follower2).asJava
val isr = List[Integer](leader, follower2).asJava
val leaderEpoch = 8
val batch1 = TestUtils.records(records = List(new SimpleRecord("k1".getBytes, "v1".getBytes),
new SimpleRecord("k2".getBytes, "v2".getBytes)))
val batch2 = TestUtils.records(records = List(new SimpleRecord("k3".getBytes, "v1".getBytes),
new SimpleRecord("k4".getBytes, "v2".getBytes),
new SimpleRecord("k5".getBytes, "v3".getBytes)))
val batch3 = TestUtils.records(records = List(new SimpleRecord("k6".getBytes, "v1".getBytes),
new SimpleRecord("k7".getBytes, "v2".getBytes)))
val partition = new Partition(topicPartition.topic, topicPartition.partition, time, replicaManager)
assertTrue("Expected first makeLeader() to return 'leader changed'",
partition.makeLeader(controllerId, new LeaderAndIsrRequest.PartitionState(controllerEpoch, leader, leaderEpoch, isr, 1, replicas, true), 0))
assertEquals("Current leader epoch", leaderEpoch, partition.getLeaderEpoch)
assertEquals("ISR", Set[Integer](leader, follower2), partition.inSyncReplicas.map(_.brokerId))
// after makeLeader(() call, partition should know about all the replicas
val leaderReplica = partition.getReplica(leader).get
val follower1Replica = partition.getReplica(follower1).get
val follower2Replica = partition.getReplica(follower2).get
// append records with initial leader epoch
val lastOffsetOfFirstBatch = partition.appendRecordsToLeader(batch1, isFromClient = true).lastOffset
partition.appendRecordsToLeader(batch2, isFromClient = true)
assertEquals("Expected leader's HW not move", leaderReplica.logStartOffset, leaderReplica.highWatermark.messageOffset)
// let the follower in ISR move leader's HW to move further but below LEO
def readResult(fetchInfo: FetchDataInfo, leaderReplica: Replica): LogReadResult = {
LogReadResult(info = fetchInfo,
highWatermark = leaderReplica.highWatermark.messageOffset,
leaderLogStartOffset = leaderReplica.logStartOffset,
leaderLogEndOffset = leaderReplica.logEndOffset.messageOffset,
followerLogStartOffset = 0,
fetchTimeMs = time.milliseconds,
readSize = 10240,
lastStableOffset = None)
}
partition.updateReplicaLogReadResult(
follower2Replica, readResult(FetchDataInfo(LogOffsetMetadata(0), batch1), leaderReplica))
partition.updateReplicaLogReadResult(
follower2Replica, readResult(FetchDataInfo(LogOffsetMetadata(lastOffsetOfFirstBatch), batch2), leaderReplica))
assertEquals("Expected leader's HW", lastOffsetOfFirstBatch, leaderReplica.highWatermark.messageOffset)
// current leader becomes follower and then leader again (without any new records appended)
partition.makeFollower(
controllerId, new LeaderAndIsrRequest.PartitionState(controllerEpoch, follower2, leaderEpoch + 1, isr, 1, replicas, false), 1)
assertTrue("Expected makeLeader() to return 'leader changed' after makeFollower()",
partition.makeLeader(controllerEpoch, new LeaderAndIsrRequest.PartitionState(
controllerEpoch, leader, leaderEpoch + 2, isr, 1, replicas, false), 2))
val currentLeaderEpochStartOffset = leaderReplica.logEndOffset.messageOffset
// append records with the latest leader epoch
partition.appendRecordsToLeader(batch3, isFromClient = true)
// fetch from follower not in ISR from log start offset should not add this follower to ISR
partition.updateReplicaLogReadResult(follower1Replica,
readResult(FetchDataInfo(LogOffsetMetadata(0), batch1), leaderReplica))
partition.updateReplicaLogReadResult(follower1Replica,
readResult(FetchDataInfo(LogOffsetMetadata(lastOffsetOfFirstBatch), batch2), leaderReplica))
assertEquals("ISR", Set[Integer](leader, follower2), partition.inSyncReplicas.map(_.brokerId))
// fetch from the follower not in ISR from start offset of the current leader epoch should
// add this follower to ISR
partition.updateReplicaLogReadResult(follower1Replica,
readResult(FetchDataInfo(LogOffsetMetadata(currentLeaderEpochStartOffset), batch3), leaderReplica))
assertEquals("ISR", Set[Integer](leader, follower1, follower2), partition.inSyncReplicas.map(_.brokerId))
}
def createRecords(records: Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = {
val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava))
val builder = MemoryRecords.builder(
buf, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.LOG_APPEND_TIME,
baseOffset, time.milliseconds, partitionLeaderEpoch)
records.foreach(builder.append)
builder.build()
}
}
| mihbor/kafka | core/src/test/scala/unit/kafka/cluster/PartitionTest.scala | Scala | apache-2.0 | 16,995 |
package org.bitcoins.core.protocol.ln
import org.bitcoins.crypto._
import scodec.bits.ByteVector
/** Payment preimage for generating LN invoices.
*/
final case class PaymentPreimage(bytes: ByteVector) extends NetworkElement {
require(bytes.size == 32, s"Payment preimage size must be 32 bytes")
lazy val hash: Sha256Digest = CryptoUtil.sha256(bytes)
}
object PaymentPreimage extends Factory[PaymentPreimage] {
override def fromBytes(bytes: ByteVector): PaymentPreimage = {
new PaymentPreimage(bytes)
}
def random: PaymentPreimage = fromBytes(ECPrivateKey.freshPrivateKey.bytes)
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/protocol/ln/PaymentPreimage.scala | Scala | mit | 603 |
package io.buoyant.router.http
import com.twitter.finagle.http.Fields._
import com.twitter.finagle.http.{Message, Request, Response}
import com.twitter.finagle.{Service, ServiceFactory, SimpleFilter, Stack}
object StripHopByHopHeadersFilter {
object HopByHopHeaders {
def scrub(msg: Message): Unit = {
val headers = msg.headerMap
headers.remove(ProxyAuthenticate)
headers.remove(ProxyAuthorization)
headers.remove(Te)
headers.remove(Trailer)
headers.remove(TransferEncoding)
headers.remove(Upgrade)
val headersListedInConnection: Seq[String] = headers.remove(Connection) match {
case Some(s) => s.split(",").map(_.trim).filter(_.nonEmpty)
case None => Nil
}
headersListedInConnection.foreach(headers.remove(_))
}
}
/**
* Removes all Hop-by-Hop headers and any header listed in `Connection` header from requests.
*/
object filter extends SimpleFilter[Request, Response] {
def apply(req: Request, svc: Service[Request, Response]) = {
HopByHopHeaders.scrub(req)
svc(req).map(scrubResponse)
}
private[this] val scrubResponse: Response => Response = { resp =>
HopByHopHeaders.scrub(resp)
resp
}
}
object module extends Stack.Module0[ServiceFactory[Request, Response]] {
val role = Stack.Role("StripHopByHopHeadersFilter")
val description = "Removes all Hop-by-Hop headers and any header listed in `Connection` header from requests"
def make(next: ServiceFactory[Request, Response]) =
filter andThen next
}
}
| denverwilliams/linkerd | router/http/src/main/scala/io/buoyant/router/http/StripHopByHopHeadersFilter.scala | Scala | apache-2.0 | 1,577 |
package pl.newicom.dddd.eventhandling
import akka.actor.ActorRef
import pl.newicom.dddd.messaging.event.OfficeEventMessage
trait EventHandler {
def handle(senderRef: ActorRef, event: OfficeEventMessage)
}
| odd/akka-ddd | akka-ddd-core/src/main/scala/pl/newicom/dddd/eventhandling/EventHandler.scala | Scala | mit | 209 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala.application
import generated.scala._
class BinarizedGradientTemplate (
// In the reduced image. The side of the template square is then 2*r+1.
val radius: Int,
// Holds a tighter bounding box of the object in the original image scale
val rect: Rect,
val mask_list: IntDenseVector,
// Pyramid level of the template (reduction_factor = 2^level)
val level: Int,
// The list of gradients in the template
val binary_gradients: DoubleDenseVector,
// indices to use for matching (skips zeros inside binary_gradients)
val match_list: IndexVectorDenseC,
// This is a match list of list of sub-parts. Currently unused.
val occlusions: DenseVector[IntDenseVector],
val templates: DenseVector[BinarizedGradientTemplate],
val hist: FloatDenseVector
)
| TiarkRompf/lancet | src/main/scala/generated/scala/BinarizedGradientTemplate.scala | Scala | agpl-3.0 | 1,792 |
package com.scalableminds.webknossos.datastore.models
import com.scalableminds.webknossos.datastore.models.datasource.DataLayer
import com.scalableminds.util.geometry.{BoundingBox, Vec3Int}
import org.apache.commons.lang3.builder.HashCodeBuilder
trait GenericPosition {
def x: Int
def y: Int
def z: Int
}
class VoxelPosition(
protected val globalX: Int,
protected val globalY: Int,
protected val globalZ: Int,
val resolution: Vec3Int
) extends GenericPosition {
val x: Int = globalX / resolution.x
val y: Int = globalY / resolution.y
val z: Int = globalZ / resolution.z
def toBucket: BucketPosition =
BucketPosition(globalX, globalY, globalZ, resolution)
def move(dx: Int, dy: Int, dz: Int) =
new VoxelPosition(globalX + dx, globalY + dy, globalZ + dz, resolution)
override def toString = s"($globalX, $globalY, $globalZ) / $resolution"
override def equals(obj: scala.Any): Boolean =
obj match {
case other: VoxelPosition =>
other.globalX == globalX &&
other.globalY == globalY &&
other.globalZ == globalZ &&
other.resolution == resolution
case _ =>
false
}
override def hashCode(): Int =
new HashCodeBuilder(17, 31).append(globalX).append(globalY).append(globalZ).append(resolution).toHashCode
}
case class BucketPosition(
globalX: Int,
globalY: Int,
globalZ: Int,
resolution: Vec3Int
) extends GenericPosition {
val bucketLength: Int = DataLayer.bucketLength
val x: Int = globalX / bucketLength / resolution.x
val y: Int = globalY / bucketLength / resolution.y
val z: Int = globalZ / bucketLength / resolution.z
def volume: Int = bucketLength * bucketLength * bucketLength
def toCube(cubeLength: Int): CubePosition =
new CubePosition(globalX, globalY, globalZ, resolution, cubeLength)
def topLeft: VoxelPosition = {
val tlx: Int = globalX - globalX % (bucketLength * resolution.x)
val tly: Int = globalY - globalY % (bucketLength * resolution.y)
val tlz: Int = globalZ - globalZ % (bucketLength * resolution.z)
new VoxelPosition(tlx, tly, tlz, resolution)
}
def nextBucketInX: BucketPosition =
BucketPosition(globalX + (bucketLength * resolution.x), globalY, globalZ, resolution)
def nextBucketInY: BucketPosition =
BucketPosition(globalX, globalY + (bucketLength * resolution.y), globalZ, resolution)
def nextBucketInZ: BucketPosition =
BucketPosition(globalX, globalY, globalZ + (bucketLength * resolution.z), resolution)
def toHighestResBoundingBox: BoundingBox =
new BoundingBox(
Vec3Int(topLeft.x * resolution.x, topLeft.y * resolution.y, topLeft.z * resolution.z),
bucketLength * resolution.x,
bucketLength * resolution.y,
bucketLength * resolution.z
)
override def toString: String =
s"BucketPosition($globalX, $globalY, $globalZ, mag$resolution)"
}
class CubePosition(
protected val globalX: Int,
protected val globalY: Int,
protected val globalZ: Int,
val resolution: Vec3Int,
val cubeLength: Int
) extends GenericPosition {
val x: Int = globalX / cubeLength / resolution.x
val y: Int = globalY / cubeLength / resolution.y
val z: Int = globalZ / cubeLength / resolution.z
def topLeft: VoxelPosition = {
val tlx: Int = globalX - globalX % (cubeLength * resolution.x)
val tly: Int = globalY - globalY % (cubeLength * resolution.y)
val tlz: Int = globalZ - globalZ % (cubeLength * resolution.z)
new VoxelPosition(tlx, tly, tlz, resolution)
}
def toHighestResBoundingBox: BoundingBox =
new BoundingBox(Vec3Int(globalX, globalY, globalZ),
cubeLength * resolution.x,
cubeLength * resolution.y,
cubeLength * resolution.z)
override def toString: String =
s"CPos($x,$y,$z,res=$resolution)"
}
| scalableminds/webknossos | webknossos-datastore/app/com/scalableminds/webknossos/datastore/models/Positions.scala | Scala | agpl-3.0 | 3,871 |
package se.chimps.cameltow.modules
import se.chimps.cameltow.{CameltowApp}
import com.google.inject.{Guice => Juice, Injector, Module}
import se.chimps.cameltow.lifecycle.Lifecycle
import scala.collection.mutable
import scala.collection.JavaConversions._
import scala.Some
import se.chimps.cameltow.exceptions.NotStartedException
import org.springframework.context.annotation.AnnotationConfigApplicationContext
/**
* Created by meduzz on 16/05/14.
*/
trait DependencyInjection { self:CameltowApp =>
def instance[T>:K, K](clazz:Class[K]):T
}
object DependencyInjection {
trait Guice extends DependencyInjection { self:CameltowApp =>
private val moduleLifecycle = new GuiceLifecycle
self.registerLifecycle(moduleLifecycle)
override def instance[T >: K, K](clazz: Class[K]): T = moduleLifecycle.getInstance(clazz)
def addModule(module: Module): Unit = {
moduleLifecycle.injectable(module)
}
}
trait Spring extends DependencyInjection { self:CameltowApp =>
private val springLifecycle = new SpringLifecycle
self.registerLifecycle(springLifecycle)
def scan(packages:String):Unit = {
springLifecycle.scan(packages)
}
def registerSpringAnnotatedConfig(configuration:Class[_]):Unit = {
springLifecycle.registerConfig(configuration)
}
override def instance[T >: K, K](clazz: Class[K]): T = springLifecycle.getInstance(clazz)
}
private class GuiceLifecycle extends Lifecycle {
private val modules:mutable.MutableList[Module] = mutable.MutableList[Module]()
private var injector:Option[Injector] = None
def injectable(module: Module): Unit = {
modules += module
}
override def start():Unit = injector = Some(Juice.createInjector(asJavaIterable(modules.toIterable)))
override def stop():Unit = injector = None
def getInstance[T >: K, K](clazz: Class[K]):T = injector match {
case Some(injector:Injector) => injector.getInstance(clazz)
case None => throw new NotStartedException("App has not started yet.")
}
}
private class SpringLifecycle extends Lifecycle {
val spring = new AnnotationConfigApplicationContext()
def scan(packages:String) = {
spring.scan(packages)
}
def registerConfig(config:Class[_]) = {
spring.register(config)
}
def getInstance[T >: K, K](clazz: Class[K]): T = spring.getBean(clazz)
override def start():Unit = {
spring.refresh()
if (spring.getBeanDefinitionCount > 0) {
spring.start()
}
}
override def stop():Unit = spring.stop()
}
} | Meduzz/Cameltow | src/main/scala/se/chimps/cameltow/modules/DependencyInjection.scala | Scala | gpl-2.0 | 2,577 |
/* ____ ___ *\\
** / __ | ___ ____ /__/___ A library of building blocks **
** / __ / __ |/ ___|/ / ___| **
** / /_/ / /_/ /\\__ \\/ /\\__ \\ (c) 2012-2013 Reify It **
** |_____/\\_____\\____/__/\\____/ http://basis.reify.it **
\\* */
package basis.dispatch
package process
trait AsyncState { async: Async =>
type RouteState >: Null <: RouteStateApi
trait RouteStateApi { this: RouteState =>
def runningCount: Int
def workingCount: Int
def waitingCount: Int
def blockedCount: Int
def ceasingCount: Int
def apply(index: Int): Track
def apply(track: Track): TrackState
def rouse(track: Track): RouteState
def rouse: RouteState
def accelerate(parallelism: Int): RouteState
def decelerate: RouteState
def setWorking(track: Track): RouteState
def setWaiting(track: Track): RouteState
def setBlocked(track: Track): RouteState
def setCeasing(track: Track): RouteState
def cease(track: Track): RouteState
def signal(): Unit
}
def RouteState(route: Route): RouteState
type TrackState >: Null <: TrackStateApi
trait TrackStateApi { this: TrackState =>
def isWorking: Boolean
def isWaiting: Boolean
def isBlocked: Boolean
def isCeasing: Boolean
def working: TrackState
def waiting: TrackState
def blocked: TrackState
def ceasing: TrackState
}
}
| ReifyIt/ortho-basis | basis-dispatch/src/main/scala/basis/dispatch/process/AsyncState.scala | Scala | mit | 1,692 |
package io.iohk.ethereum.metrics
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
/**
* A gauge that starts at `0` and can be triggered to go to `1`.
* Next time it is sampled, it goes back to `0`.
* This is normally used for either one-off signals (e.g. when an application starts)
* or slowly re-appearing signals. Specifically, the sampling rate must be greater
* than the rate the signal is triggered.
*/
class DeltaSpikeGauge(name: String, metrics: Metrics) {
private[this] final val isTriggeredRef = new AtomicBoolean(false)
private[this] final val valueRef = new AtomicInteger(0)
private[this] def getValue(): Double = {
if (isTriggeredRef.compareAndSet(true, false)) {
valueRef.getAndSet(0)
} else {
valueRef.get()
}
}
private[this] final val gauge = metrics.gauge(name, () => getValue)
def trigger(): Unit = {
if (isTriggeredRef.compareAndSet(false, true)) {
valueRef.set(1)
// Let one of the exporting metric registries pick up the `1`.
// As soon as that happens, `getValue` will make sure that we go back to `0`.
}
}
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/metrics/DeltaSpikeGauge.scala | Scala | mit | 1,129 |
/*
* Copyright (c) 2013 Daniel Krzywicki <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package pl.edu.agh.scalamas.stats
import pl.edu.agh.scalamas.app.ConcurrentAgentRuntimeComponent
/**
* Mixin component for a application statistics factory method.
*/
trait StatsFactoryComponent {
/**
* The factory method.
* @return
*/
def statsFactory: StatsFactory
trait StatsFactory {
/**
* Creates some statistics with the given initial value and update function.
*/
def apply[T](initialValue: T)(updateFunction: (T, T) => T): Stats[T]
}
}
/**
* Factory for simple stats.
*/
trait SimpleStatsFactory extends StatsFactoryComponent {
def statsFactory = SimpleStatsFactoryImpl
object SimpleStatsFactoryImpl extends StatsFactory {
def apply[T](initialValue: T)(updateFunction: (T, T) => T) = Stats.simple(initialValue)(updateFunction)
}
}
/**
* Factory for concurrent stats.
*/
trait ConcurrentStatsFactory extends StatsFactoryComponent {
this: ConcurrentAgentRuntimeComponent =>
def statsFactory = ConcurrentStatsFactoryImpl
object ConcurrentStatsFactoryImpl extends StatsFactory {
def apply[T](initialValue: T)(updateFunction: (T, T) => T) = Stats.concurrent(initialValue)(updateFunction)(agentRuntime.system.dispatcher)
}
} | eleaar/scala-mas | core/src/main/scala/pl/edu/agh/scalamas/stats/StatsFactory.scala | Scala | mit | 2,348 |
package org.tesserae
import org.slf4j.LoggerFactory
import java.util.concurrent.locks.ReentrantReadWriteLock
import org.iq80.leveldb.{CompressionType, Options, DB}
import java.io.File
import org.fusesource.leveldbjni.JniDBFactory._
import scala.Some
/**
* Manage any LevelDB database connections. Once established, a connection remains open
* for the lifetime of the JVM. All loading is lazy.
*/
object LevelDBManager {
private lazy val logger = LoggerFactory.getLogger(getClass)
private val lock = new ReentrantReadWriteLock(true)
private var databases: Map[String, (DB, ReentrantReadWriteLock)] = Map.empty
/**
* Load a LevelDB database
*
* @param dbLocation The location of the database
* @param createIfMissing if true and the database doesn't exist, it will be created
* @return A LevelDB DB instance
*/
private def loadDB(dbLocation: File, createIfMissing: Boolean) = {
val opts = new Options
opts.createIfMissing(createIfMissing)
opts.compressionType(CompressionType.NONE)
val _db = factory.open(dbLocation, opts)
if (_db == null) {
throw new IllegalStateException("db is null (couldn't load database?)")
}
logger.info("Loaded database: " + dbLocation.getPath)
_db
}
/**
* Get a database connection for a given location
*
* @param dbLocation The location of the database
* @param createIfMissing if true and the database doesn't exist, it will be created
* @return A LevelDB DB instance
*/
def dbFor(dbLocation: File, createIfMissing: Boolean = false) = {
val key = dbLocation.getAbsoluteFile.getCanonicalPath
lock.readLock().lock()
try {
databases.get(key) match {
case Some(tuple) => tuple
case None => {
lock.readLock().unlock()
lock.writeLock().lock()
try {
databases.get(key) match {
case Some(tuple) => tuple
case None => {
val db = loadDB(dbLocation, createIfMissing)
val dbLock = new ReentrantReadWriteLock(true)
val tuple = (db, dbLock)
databases += key -> tuple
tuple
}
}
} finally {
lock.writeLock().unlock()
lock.readLock().lock()
}
}
}
} finally {
lock.readLock().unlock()
}
}
}
| eberle1080/tesserae-ng | text-analysis/src/main/scala/db/LevelDBManager.scala | Scala | bsd-2-clause | 2,380 |
class a {
ne/*caret*/
}
/*
new
*/ | ilinum/intellij-scala | testdata/keywordCompletion/expressions/new.scala | Scala | apache-2.0 | 35 |
package org.pfcoperez.dailyalgorithm.numericmethods.random
import org.pfcoperez.dailyalgorithm.numericmethods.random.DistributionRandomGenerator.DensityFunction
import org.pfcoperez.dailyalgorithm.numericmethods.random.impl.DoubleRandomGen
class DistributionRandomGenerator private (
private val randomDoubleGen: RandomGenerator[Double],
val densityFunction: DensityFunction) extends RandomGenerator[Double] {
final override def next: (DistributionRandomGenerator, Double) = {
def normalize(x: Double): Double =
if (x < 0.0) normalize(-x)
else if (x > 1.0) normalize(x - x.floor)
else x
def withinRange(x: Double, range: (Double, Double)): Double = {
val (a, b) = range
a + normalize(x) * (b - a)
}
val (nextDoubleGenA, rawX) = randomDoubleGen.next
val (nextDoubleGenB, rawY) = nextDoubleGenA.next
val updatedGenerator = new DistributionRandomGenerator(nextDoubleGenB, densityFunction)
val Seq(x, y) = Seq(rawX, rawY) map (withinRange(_, densityFunction.domain))
val candidate = densityFunction.lift(x) collect {
case maxY if y >= 0.0 && y <= maxY => updatedGenerator -> x
}
if (candidate.isDefined) candidate.get
else updatedGenerator.next
}
}
object DistributionRandomGenerator {
import scala.runtime.AbstractPartialFunction
case class DensityFunction(domain: (Double, Double))(
f: Double => Double) extends AbstractPartialFunction[Double, Double] {
override def applyOrElse[A1 <: Double, B1 >: Double](x: A1, default: (A1) => B1): B1 = {
if (isDefinedAt(x)) f(x) else default(x)
}
override def isDefinedAt(x: Double): Boolean = {
val (from, to) = domain
from <= x && x <= to
}
def validateDensity(step: Double, tolerance: Option[Double] = None): Boolean = {
val (a, b) = domain
val acceptedError = tolerance.getOrElse(2 * step)
val p = (a to b by step).map(f).sum * step
math.abs(p - 1.0) <= acceptedError
}
}
object DensityFunctions {
def uniform(range: (Double, Double)): DensityFunction = {
val (from, to) = range
DensityFunction(range)(_ => 1.0 / (from - to))
}
def normal(mean: Double, variance: Double)(range: (Double, Double)): DensityFunction = {
import math.{ sqrt, exp, Pi }
val (from, to) = range
val mainFactor = 1.0 / sqrt(2.0 * Pi * variance)
val expDem = -2.0 * variance
DensityFunction(from, to) { x =>
val desv = x - mean
mainFactor * exp(desv * desv / expDem)
}
}
}
def apply(df: DensityFunction, seed: Long): DistributionRandomGenerator = {
new DistributionRandomGenerator(new DoubleRandomGen(seed), df)
}
}
| pfcoperez/algorithmaday | src/main/scala/org/pfcoperez/dailyalgorithm/numericmethods/random/DistributionRandomGenerator.scala | Scala | gpl-3.0 | 2,713 |
package codemodels.incrementalparsers.javaip
import name.lakhin.eliah.projects.papacarlo.lexis.{Matcher, Tokenizer,
Contextualizer, Token}
import name.lakhin.eliah.projects.papacarlo.{Syntax, Lexer}
import name.lakhin.eliah.projects.papacarlo.syntax.Rule
import name.lakhin.eliah.projects.papacarlo.syntax.Expressions._
import name.lakhin.eliah.projects.papacarlo.syntax.NodeAccessor
import name.lakhin.eliah.projects.papacarlo.syntax.rules.{ExpressionRule, NamedRule}
import name.lakhin.eliah.projects.papacarlo.syntax.Rule._
object JavaIP {
def tokenizer = {
val tokenizer = new Tokenizer()
import tokenizer._
import Matcher._
// lexis specification here
tokenCategory(
"whitespace",
oneOrMore(anyOf(" \\t\\f\\n")) // in terms of regexp: [:space:]+
).skip
// For some reason I managed to parse it only as integerLiteral...
tokenCategory(
"floatL",
sequence(
choice( // in terms of regexp: 0|([1-9][0-9]*)
chunk("0"),
sequence(rangeOf('1', '9'), zeroOrMore(rangeOf('0', '9')))
),
optional(sequence(
chunk("."),
oneOrMore(rangeOf('0', '9'))
)),
choice(chunk("f"),chunk("F"))
)
)
tokenCategory(
"double",
sequence(
choice( // in terms of regexp: 0|([1-9][0-9]*)
chunk("0"),
sequence(rangeOf('1', '9'), zeroOrMore(rangeOf('0', '9')))
),
chunk("."),
oneOrMore(rangeOf('0', '9')),
optional(choice(chunk("f"),chunk("F")))
)
)
tokenCategory(
"longL",
sequence(
choice( // in terms of regexp: 0|([1-9][0-9]*)
chunk("0"),
sequence(rangeOf('1', '9'), zeroOrMore(rangeOf('0', '9')))
),
choice(chunk("L"),chunk("l")))
)
tokenCategory(
"integer",
sequence(
choice( // in terms of regexp: 0|([1-9][0-9]*)
chunk("0"),
sequence(rangeOf('1', '9'), zeroOrMore(rangeOf('0', '9')))
),
optional(choice(chunk("f"),chunk("F"))))
)
tokenCategory(
"string",
sequence(
chunk("\\""),
zeroOrMore(choice(
anyExceptOf("\\n\\r\\\\\\""),
sequence(chunk("\\\\"), anyExceptOf("\\n\\r"))
)),
chunk("\\"")
)
).mutable
tokenCategory(
"char",
sequence(
chunk("'"),
choice(
sequence(chunk("\\\\"),any()),
anyExceptOf("'")
),
chunk("'")
)
).mutable
tokenCategory(
"annotationName",
sequence(
chunk("@"),
choice(chunk("_"),rangeOf('a', 'z'),rangeOf('A','Z')),
zeroOrMore(
choice(
chunk("_"),
rangeOf('a', 'z'),
rangeOf('A','Z'),
rangeOf('0', '9'))),
zeroOrMore(
sequence(
chunk("."),choice(chunk("_"),rangeOf('a', 'z'),rangeOf('A','Z')),
zeroOrMore(
choice(
chunk("_"),
rangeOf('a', 'z'),
rangeOf('A','Z'),
rangeOf('0', '9')))))
)
).mutable
tokenCategory(
"identifier",
sequence(
choice(chunk("_"),rangeOf('a', 'z'),rangeOf('A','Z')),
zeroOrMore(
choice(
chunk("_"),
rangeOf('a', 'z'),
rangeOf('A','Z'),
rangeOf('0', '9'))))
).mutable
terminals("(", ")", "%", "++","--","+", "-", "*", "/", "{", "}",";",":",".",",",
"&&","||",
"+=","-=","*=","/=",
"==","!=","<=",">=","<",">",
"!",
"=","&","|","[","]",
"?",
"//","/*","*/")
keywords(
"true", "false",
"null",
"byte","int", "char", "short", "long", "float", "double", "void",
"do", "while", "for", "switch", "case", "break", "return", "throw","continue","default",
"if","else",
"import",
"@interface",
"class","interface","enum",
"private","protected","public",
"static","native","final", "synchronized","abstract","volatile",
"extends","implements","throws",
"try", "catch","finally",
"this",
"new",
"package",
"instanceof"
)
tokenizer
}
private def contextualizer = {
val contextualizer = new Contextualizer
import contextualizer._
// fragment specification here
trackContext("/*", "*/").forceSkip.topContext
trackContext("//", Token.LineBreakKind).forceSkip.topContext
trackContext("[", "]").allowCaching
trackContext("{", "}").allowCaching
trackContext("\\"", "\\"").topContext
contextualizer
}
def lexer = new Lexer(tokenizer, contextualizer)
def syntax(lexer: Lexer) = new {
val syntax = new Syntax(lexer)
import syntax._
import Rule._
val qualifiedIdentifier = rule("qualifiedIdentifier") {
oneOrMore(
capture("part",token("identifier")),
separator = token(".")
)
}
// Expressions start
val doubleLiteral = rule("doubleLiteral") {
capture("value",token("double"))
}
val floatLiteral = rule("floatLiteral") {
capture("value",token("floatL"))
}
val integerLiteral = rule("integerLiteral") {
capture("value",token("integer"))
}
val longLiteral = rule("longLiteral") {
capture("value",token("longL"))
}
val stringLiteral = rule("stringLiteral") {
capture("value",token("string"))
}
val charLiteral = rule("charLiteral") {
capture("value",token("char"))
}
val booleanLiteral = rule("booleanLiteral") {
capture("value",choice(token("true"),token("false")))
}
val thisReference = rule("thisReference") {
token("this")
}
val classReference = rule("classReference") {
token("class")
}
val superReference = rule("superReference") {
sequence(
token("super"),
optional(sequence(
token("("),
zeroOrMore(branch("actualParams",exp),separator = token(",")),
token(")")
))
)
}
val nullReference = rule("nullReference") {
token("null")
}
val variableReference = rule("variableReference") {
capture("name",token("identifier"))
}
val instantiation = rule("instantiation").transform{ orig =>
if (orig.getBranches contains "classInst"){
orig.getBranches("classInst").head
} else {
orig.getBranches("arrayInst").head
}
} {
choice(
branch("classInst",classInstantiation),
branch("arrayInst",arrayInstantiation))
}
val classInstantiation = rule("classInstantiation"){
sequence(
token("new"),
choice(branch("className",qualifiedIdentifier),
branch("typeName",primitiveType)),
optional(branch("genericParams",genericParams)),
token("("),
zeroOrMore(branch("actualParams",exp),separator = token(",")),
token(")"),
optional(sequence(
token("{"),
branch("members",zeroOrMore(memberDecl)),
token("}")
))
)
}
val arrayInstantiation = rule("arrayInstantiation"){
sequence(
token("new"),
choice(branch("className",qualifiedIdentifier),
branch("typeName",primitiveType)),
choice(sequence(
token("["),
capture("size",exp),
token("]")),
sequence(
token("["),
token("]"),
token("{"),
zeroOrMore(exp,separator = token(",")),
token("}")
))
)
}
val paren = rule("paren").transform { orig =>
if (orig.getBranches contains "castedExp"){
orig.accessor.setKind("castExp").node
} else {
orig
}
}{
choice(
sequence(
token("("),
capture("value",choice(typeUsage)),
token(")"),
branch("castedExp",exp)),
sequence(
token("("),
capture("value",choice(exp)),
token(")")
)
)
}
val assignment = rule("assignment"){
sequence(
branch("assigned",choice(qualifiedIdentifier,fieldAccess)),
optional(sequence(
token("["),
exp,
token("]")
)),
token("="),
branch("value",exp)
)
}
val arrayInit = rule("arrayInit"){
sequence(token("{"),
zeroOrMore(branch("value",exp),separator=token(",")),
optional(token(",")),
token("}"))
}
val namedOrSimpleParam = rule("namedOrSimpleParam"){
sequence(
optional(sequence(
token("identifier"),
token("=")
)),
exp
)
}
val annotationInstantiation = rule("annotationInstantiation"){
sequence(
token("annotationName"),
token("("),
zeroOrMore(namedOrSimpleParam,separator=token(",")),
token(")"))
}
val expAtom = subrule("expAtom") {
choice(
annotationInstantiation,
floatLiteral,
arrayInit,
assignment,
thisReference,
classReference,
doubleLiteral,
longLiteral,
integerLiteral,
stringLiteral,
charLiteral,
booleanLiteral,
instantiation,
superReference,
nullReference,
paren,
variableReference
)
}
val expAccess = rule("expAccess").transform { orig =>
if (orig.getValues contains "fieldName"){
orig
} else {
orig.getBranches("value").head
}
} {
sequence(
branch("value",expAtom),
zeroOrMore(sequence(
token("."),
choice(
capture("fieldName",token("identifier")))
))
)
}
val invocation = rule("invocation"){
sequence(
token("("),
zeroOrMore(branch("actualParams",exp),separator = token(",")),
token(")"))
}
val expMethodCall = rule("expMethodCall").transform{ orig =>
if (orig.getBranches contains "invocation"){
orig
} else {
orig.getBranches("base").head
}
} {
sequence(
branch("base",sequence(expAtom)),
optional(branch("invocation",invocation)))
}
val expOpElement = subrule("expOpElement"){
choice(expMethodCall,expAccess)
}
val expOp : NamedRule = rule("expression") {
val rule =
expression(branch("operand", expOpElement))
postfix(rule,"++",1)
postfix(rule,"--",1)
prefix(rule,"++",1)
prefix(rule,"--",1)
prefix(rule,"!",1)
infix(rule, "%", 1)
prefix(rule, "+", 2)
prefix(rule, "-", 2)
infix(rule, "*", 3)
infix(rule, "/", 3, rightAssociativity = true)
infix(rule, "+", 4)
infix(rule, "-", 4)
infix(rule, "==", 5)
//infix(rule,"=",8)
infix(rule, "!=", 5)
infix(rule, ">=", 5)
infix(rule, "<=", 5)
infix(rule, "+=", 5)
infix(rule, "-=", 5)
infix(rule, "/=", 5)
infix(rule, "*=", 5)
infix(rule, "<", 5)
infix(rule, ">", 5)
infix(rule,"&&",7)
infix(rule,"||",7)
infix(rule,"&",7)
infix(rule,"|",7)
rule
}
val arrayAccessPart = rule("arrayAccessPart"){
sequence(token("["),
optional(branch("index",exp)),
token("]"))
}
val expArrayAccess = rule("expArrayAccess").transform { orig =>
if (orig.getBranches contains "arrayAccess"){
orig
} else {
orig.getBranches("value").head
}
} {
sequence(
branch("value",expOp),
optional(branch("arrayAccess",arrayAccessPart)))
}
val chainExp = rule("chainExp").transform { orig =>
if (orig.getBranches contains "chained"){
orig
} else {
orig.getBranches("base").head
}
} {
sequence(
branch("base",expArrayAccess),
zeroOrMore(sequence(
token("."),
optional(branch("genericParams",genericParams)),
branch("chained",expArrayAccess)
))
)
}
val instanceOfExp = rule("instanceOfExpSuffix").transform { orig =>
if (orig.getBranches contains "typeUsage"){
orig
} else {
orig.getBranches("base").head
}
}{
sequence(
branch("base",chainExp),
optional(sequence(
token("instanceof"),
branch("type",typeUsage)
)))
}
val ifElseExp = rule("ifElseExp").transform { orig =>
if (orig.getBranches contains "thenValue"){
orig
} else {
orig.getBranches("base").head
}
}{
sequence(
branch("base",instanceOfExp),
optional(sequence(
token("?"),
branch("thenValue",exp),
token(":"),
branch("elseValue",exp)
))
)
}
val exp : Rule = subrule("expUsage") {
ifElseExp
}
// Expressions end
val accessQualifier = rule("accessQualifier") {
capture("name",choice(
token("public"),
token("protected"),
token("private")
))
}
val qualifier = rule("qualifier").transform{ orig =>
if (orig.getValues contains "static"){
orig.accessor.setKind("staticQualifier")
orig.accessor.setConstant("static",null)
orig
} else {
orig
}
} {
choice(
branch("access", accessQualifier),
capture("static", token("static")),
capture("final", token("final")),
capture("abstract",token("abstract")),
capture("volatile",token("volatile"))
)
}
val classType : NamedRule = rule("classType") {
sequence(
capture("name",token("identifier")),
optional(sequence(token("."),branch("member",classType))),
optional(branch("genericParams",genericParams))
)
}
val primitiveType = rule("primitiveType") {
capture("name",choice(
token("int"),
token("byte"),
token("char"),
token("long"),
token("float"),
token("double"),
token("boolean")
))
}
val arrayTypeMod = rule("arrayType") {
sequence(
token("["),
token("]")
)
}
val genericCapture = rule("genericCapture"){
sequence(
token("?"),
optional(sequence(
token("extends"),
branch("baseType",typeUsage)
))
)
}
val genericParams : NamedRule = rule("genericParams"){
sequence(
token("<"),
zeroOrMore(branch("params",choice(genericCapture,typeUsage)),separator = token(",")),
token(">")
)
}
val typeUsage = rule("typeUsage") {
sequence(
branch("baseType", choice(
primitiveType,
classType
)),
zeroOrMore(branch("array",arrayTypeMod))
)
}
val voidType = rule("voidType") {token("void")}
val fieldDecl = rule("fieldDecl") {
sequence(
branch("qualifiers",zeroOrMore(qualifier)),
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
branch("type",typeUsage),
oneOrMore(capture("name", token("identifier")),separator=token(",")),
optional(
sequence(
token("="),
branch("initializationValue",exp)
)
),
token(";").permissive
)
}
val paramDecl = rule("paramDecl") {
sequence(
zeroOrMore(annotationUsage),
optional(capture("final",token("final"))),
branch("type",typeUsage),
capture("name",token("identifier"))
)
}
val constructorDecl = rule("constructorDecl") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
capture("name", token("identifier")),
token("("),
zeroOrMore(
branch("params", paramDecl),
separator = token(",")
),
recover(token(")"),"Missing closing parenthesis"),
optional(sequence(
token("throws"),
oneOrMore(branch("exceptionsThrown",qualifiedIdentifier),separator = token(","))
)),
choice(
token(";"),
sequence(
token("{"),
zeroOrMore(branch("stmts",statement)),
token("}"),
optional(token(";"))
)
)
)
}
val annotationUsage = rule("annotationUsage"){
sequence(
capture("name",token("annotationName")),
optional(sequence(
token("("),
oneOrMore(capture("val",choice(sequence(token("identifier"),token("="),exp),exp)),separator=token(",")),
token(")")
))
)
}
val methodDecl = rule("methodDecl") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
optional(branch("genericParams",genericParams)),
branch("returnType",
choice(
typeUsage,
voidType
)
),
capture("name", token("identifier")),
token("("),
zeroOrMore(
branch("params", paramDecl),
separator = token(",")
),
recover(token(")"),"Missing closing parenthesis"),
optional(sequence(
token("throws"),
oneOrMore(branch("exceptionsThrown",qualifiedIdentifier),separator = token(","))
)),
choice(
capture("abstractBody",token(";")),
sequence(
token("{"),
zeroOrMore(branch("stmts",statement)),
token("}"),
optional(token(";"))
)
)
)
}
val initializer : NamedRule = rule("initializer") {
sequence(
optional(token("static")),
token("{"),
zeroOrMore(branch("statements",statement)),
token("}")
)
}
val memberDecl : NamedRule = rule("memberDecl") {
choice(
branch("constructor",constructorDecl),
branch("method",methodDecl),
branch("field",fieldDecl),
branch("class",classDeclaration),
branch("interface",javaInterface),
branch("initializer",initializer),
branch("enum",enumDecl)
)
}
val classDeclaration = rule("classDeclaration") {
// Consists of three sequential parts: "[" token, series of nested
// elements separated with "," token, and losing "]" token.
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
token("class"),
capture("name", token("identifier")),
optional(sequence(
token("extends"),
branch("baseClass",typeUsage)
)),
optional(
sequence(
token("implements"),
oneOrMore(
branch("interfaces",typeUsage),
separator = token(",")
)
)
),
token("{"),
branch("members",zeroOrMore(memberDecl)),
recover(token("}"), "class must end with '}'")
)
}
val javaInterface = rule("javaInterface") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
token("interface"),
capture("name", token("identifier")),
optional(sequence(
token("extends"),
oneOrMore(branch("baseClass",typeUsage),separator=token(","))
)),
token("{"),
branch("members",zeroOrMore(memberDecl)),
recover(token("}"), "interface must end with '}'")
)
}
val annotationDeclField = rule("annotationDeclField") {
sequence(
typeUsage,
token("identifier"),
token("("),
token(")"),
optional(sequence(
token("default"),
exp
)),
recover(token(";"), "annotation field must end with '}'")
)
}
val annotationDecl = rule("annotationDecl") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
token("@interface"),
capture("name", token("identifier")),
token("{"),
zeroOrMore(branch("fields",choice(annotationDeclField,enumDecl))),
recover(token("}"), "annotation declaration must end with '}'")
)
}
val importDir = rule("importDir") {
sequence(
token("import"),
optional(capture("static",token("static"))),
capture("part",token("identifier")),
zeroOrMore(
sequence(
token("."),
choice(
capture("part",token("identifier")),
capture("part",token("*"))
)
)
),
token(";").permissive
)
}
val packageDecl = rule("packageDecl") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
token("package"),
branch("packageName",qualifiedIdentifier),
token(";")
)
}
val fieldName = rule("fieldName"){
capture("name",token("identifier"))
}
val enumDecl = rule("enumDecl") {
sequence(
branch("annotations",zeroOrMore(annotationUsage)),
branch("qualifiers",zeroOrMore(qualifier)),
token("enum"),
capture("name", token("identifier")),
optional(sequence(
token("extends"),
branch("baseClass",typeUsage)
)),
optional(
sequence(
token("implements"),
oneOrMore(
branch("interfaces",typeUsage),
separator = token(",")
)
)
),
token("{"),
oneOrMore(branch("fields",fieldName),separator=token(",")),
optional(sequence(token(";"),branch("members",zeroOrMore(memberDecl)))),
recover(token("}"), "class must end with '}'"))
}
val compilationUnit = rule("compilationUnit").main {
sequence(
optional(branch("packageDecl",packageDecl)),
branch("imports",zeroOrMore(importDir)),
zeroOrMore(branch("classDeclaration",choice(javaInterface,classDeclaration,annotationDecl,enumDecl)))
)
}
val fieldAccess = rule("fieldAccess"){
sequence(
token("this"),
token("."),
branch("field",qualifiedIdentifier)
)
}
// Statements
val expressionStatement = rule("expressionStatement"){
sequence(
branch("expression",exp),
recover(token(";"),"semicolon missing")
)
}
val emptyStatement = rule("emptyStatement"){
token(";")
}
val returnStmt = rule("returnStmt"){
sequence(
token("return"),
optional(branch("value",exp)),
recover(token(";"),"semicolon missing")
)
}
val localVarDecl = rule("localVarDecl") {
sequence(
optional(capture("final",token("final"))),
branch("annotations",zeroOrMore(annotationUsage)),
branch("type",typeUsage),
oneOrMore(
sequence(
capture("name", token("identifier")),
zeroOrMore(sequence(token("["),token("]"))),
optional(
sequence(
token("="),
branch("initializationValue",exp)
)
)),separator = token(",")),
recover(token(";"),"semicolon missing")
)
}
val simpleLocalVarDecl = rule("simpleLocalVarDecl") {
sequence(
optional(capture("final",token("final"))),
branch("type",typeUsage),
oneOrMore(capture("name", token("identifier")),separator = token(","))
)
}
val blockStmt : NamedRule = rule("blockStmt") {
sequence(
token("{"),
zeroOrMore(branch("stmts",statement)),
recover(token("}"),"Missing }"),
optional(token(";"))
)
}
val ifStmt : NamedRule = rule("ifStmt") {
sequence(
token("if"),
token("("),
branch("condition",exp),
recover(token(")"),"closing parenthesis expected"),
branch("then",statement),
optional(sequence(
token("else"),
branch("else",statement)
))
)
}
val forStmt = rule("forStmt") {
sequence(
token("for"),
token("("),
branch("init",statement),
optional(exp),
recover(token(";"),"semicolon missing"),
optional(exp),
recover(token(")"),"closing parenthesis expected"),
branch("body",statement)
)
}
val forEachStmt = rule("forEachStmt") {
sequence(
token("for"),
token("("),
branch("iterator",simpleLocalVarDecl),
token(":"),
branch("collection",exp),
recover(token(")"),"closing parenthesis expected"),
branch("body",statement)
)
}
val whileStmt = rule("whileStmt") {
sequence(
token("while"),
token("("),
optional(exp),
recover(token(")"),"closing parenthesis expected"),
branch("body",statement)
)
}
val catchClause = rule("catchClause"){
sequence(
token("catch"),
token("("),
oneOrMore(capture("exceptions",qualifiedIdentifier),separator=token("|")),
token("identifier"),
recover(token(")"),"closing parenthesis expected"),
token("{"),
zeroOrMore(statement),
recover(token("}"),"Missing }"))
}
val tryWithResourceStmt = rule("tryWithResourceStmt") {
sequence(
token("try"),
token("("),
oneOrMore(sequence(optional(token("final")),typeUsage,token("identifier"),token("="),exp),separator=token(";")),
optional(token(";")),
token(")"),
token("{"),
zeroOrMore(statement),
token("}"),
zeroOrMore(branch("catch",catchClause)))
}
val tryStmt = rule("tryStmt") {
sequence(
token("try"),
token("{"),
zeroOrMore(statement),
recover(token("}"),"closing bracket expected"),
choice(
sequence(
oneOrMore(branch("catch",catchClause)),
optional(sequence(
token("finally"),
token("{"),
zeroOrMore(statement),
token("}")
))),
sequence(
token("finally"),
token("{"),
zeroOrMore(statement),
token("}"))
))
}
val throwStmt = rule("throwStmt") {
sequence(
token("throw"),
branch("value",exp)
)
}
val synchronizedStmt = rule("synchronizedStmt") {
sequence(
token("synchronized"),
token("("),
exp,
token(")"),
statement
)
}
val breakStmt = rule("breakStmt") {
sequence(token("break"),token(";"))
}
val continueStmt = rule("continueStmt") {
sequence(token("continue"),token(";"))
}
val caseClause = rule("caseClause") {
sequence(token("case"),branch("label",exp),token(":"),zeroOrMore(branch("statements",statement)))
}
val defaultClause = rule("defaultClause") {
sequence(token("default"),token(":"),zeroOrMore(branch("statements",statement)))
}
val switchStmt = rule("switchStmt") {
sequence(
token("switch"),
token("("),
branch("value",exp),
token(")"),
token("{"),
zeroOrMore(branch("cases",caseClause)),
optional(branch("default",defaultClause)),
token("}")
)
}
val statement : NamedRule = subrule("statement") {
choice(
switchStmt,
localVarDecl,
breakStmt,
continueStmt,
returnStmt,
blockStmt,
expressionStatement,
ifStmt,
tryWithResourceStmt,
tryStmt,
whileStmt,
forStmt,
synchronizedStmt,
forEachStmt,
emptyStatement,
throwStmt
)
}
}.syntax
} | ftomassetti/JavaIncrementalParser | src/main/scala/javaip.scala | Scala | apache-2.0 | 28,254 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.table
import java.io.File
import java.lang.{Boolean => JBool}
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.tuple.{Tuple2 => JTuple2}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.datastream.DataStream
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableEnvironment, TableException, Types}
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamTestData}
import org.apache.flink.table.sinks._
import org.apache.flink.table.utils.MemoryTableSourceSinkUtil
import org.apache.flink.test.util.{AbstractTestBase, TestBaseUtils}
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
import org.junit.Assert._
import org.junit.Test
import scala.collection.JavaConverters._
import scala.collection.mutable
class TableSinkITCase extends AbstractTestBase {
@Test
def testInsertIntoRegisteredTableSink(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
MemoryTableSourceSinkUtil.clear()
val input = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(r => r._2)
val fieldNames = Array("d", "e", "t")
val fieldTypes: Array[TypeInformation[_]] = Array(Types.STRING, Types.SQL_TIMESTAMP, Types.LONG)
val sink = new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink
tEnv.registerTableSink("targetTable", fieldNames, fieldTypes, sink)
input.toTable(tEnv, 'a, 'b, 'c, 't.rowtime)
.where('a < 3 || 'a > 19)
.select('c, 't, 'b)
.insertInto("targetTable")
env.execute()
val expected = Seq(
"Hi,1970-01-01 00:00:00.001,1",
"Hello,1970-01-01 00:00:00.002,2",
"Comment#14,1970-01-01 00:00:00.006,6",
"Comment#15,1970-01-01 00:00:00.006,6").mkString("\\n")
TestBaseUtils.compareResultAsText(MemoryTableSourceSinkUtil.tableData.asJava, expected)
}
@Test
def testStreamTableSink(): Unit = {
val tmpFile = File.createTempFile("flink-table-sink-test", ".tmp")
tmpFile.deleteOnExit()
val path = tmpFile.toURI.toString
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
env.setParallelism(4)
tEnv.registerTableSink(
"csvSink",
new CsvTableSink(path).configure(
Array[String]("c", "b"),
Array[TypeInformation[_]](Types.STRING, Types.SQL_TIMESTAMP)))
val input = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._2)
.map(x => x).setParallelism(4) // increase DOP to 4
input.toTable(tEnv, 'a, 'b.rowtime, 'c)
.where('a < 5 || 'a > 17)
.select('c, 'b)
.insertInto("csvSink")
env.execute()
val expected = Seq(
"Hi,1970-01-01 00:00:00.001",
"Hello,1970-01-01 00:00:00.002",
"Hello world,1970-01-01 00:00:00.002",
"Hello world, how are you?,1970-01-01 00:00:00.003",
"Comment#12,1970-01-01 00:00:00.006",
"Comment#13,1970-01-01 00:00:00.006",
"Comment#14,1970-01-01 00:00:00.006",
"Comment#15,1970-01-01 00:00:00.006").mkString("\\n")
TestBaseUtils.compareResultsByLinesInMemory(expected, path)
}
@Test
def testAppendSinkOnAppendTable(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"appendSink",
new TestAppendSink().configure(
Array[String]("t", "icnt", "nsum"),
Array[TypeInformation[_]](Types.SQL_TIMESTAMP, Types.LONG, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w)
.select('w.end as 't, 'id.count as 'icnt, 'num.sum as 'nsum)
.insertInto("appendSink")
env.execute()
val result = RowCollector.getAndClearValues.map(_.f1.toString).sorted
val expected = List(
"1970-01-01 00:00:00.005,4,8",
"1970-01-01 00:00:00.01,5,18",
"1970-01-01 00:00:00.015,5,24",
"1970-01-01 00:00:00.02,5,29",
"1970-01-01 00:00:00.025,2,12")
.sorted
assertEquals(expected, result)
}
@Test
def testAppendSinkOnAppendTableForInnerJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val ds1 = StreamTestData.getSmall3TupleDataStream(env).toTable(tEnv, 'a, 'b, 'c)
val ds2 = StreamTestData.get5TupleDataStream(env).toTable(tEnv, 'd, 'e, 'f, 'g, 'h)
tEnv.registerTableSink(
"appendSink",
new TestAppendSink().configure(
Array[String]("c", "g"),
Array[TypeInformation[_]](Types.STRING, Types.STRING)))
ds1.join(ds2).where('b === 'e)
.select('c, 'g)
.insertInto("appendSink")
env.execute()
val result = RowCollector.getAndClearValues.map(_.f1.toString).sorted
val expected = List("Hi,Hallo", "Hello,Hallo Welt", "Hello world,Hallo Welt").sorted
assertEquals(expected, result)
}
@Test
def testRetractSinkOnUpdatingTable(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text)
tEnv.registerTableSink(
"retractSink",
new TestRetractSink().configure(
Array[String]("len", "icnt", "nsum"),
Array[TypeInformation[_]](Types.INT, Types.LONG, Types.LONG)))
t.select('id, 'num, 'text.charLength() as 'len)
.groupBy('len)
.select('len, 'id.count as 'icnt, 'num.sum as 'nsum)
.insertInto("retractSink")
env.execute()
val results = RowCollector.getAndClearValues
val retracted = RowCollector.retractResults(results).sorted
val expected = List(
"2,1,1",
"5,1,2",
"11,1,2",
"25,1,3",
"10,7,39",
"14,1,3",
"9,9,41").sorted
assertEquals(expected, retracted)
}
@Test
def testRetractSinkOnAppendTable(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"retractSink",
new TestRetractSink().configure(
Array[String]("t", "icnt", "nsum"),
Array[TypeInformation[_]](Types.SQL_TIMESTAMP, Types.LONG, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w)
.select('w.end as 't, 'id.count as 'icnt, 'num.sum as 'nsum)
.insertInto("retractSink")
env.execute()
val results = RowCollector.getAndClearValues
assertFalse(
"Received retraction messages for append only table",
results.exists(!_.f0))
val retracted = RowCollector.retractResults(results).sorted
val expected = List(
"1970-01-01 00:00:00.005,4,8",
"1970-01-01 00:00:00.01,5,18",
"1970-01-01 00:00:00.015,5,24",
"1970-01-01 00:00:00.02,5,29",
"1970-01-01 00:00:00.025,2,12")
.sorted
assertEquals(expected, retracted)
}
@Test
def testUpsertSinkOnUpdatingTableWithFullKey(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text)
tEnv.registerTableSink(
"upsertSink",
new TestUpsertSink(Array("cnt", "cTrue"), false).configure(
Array[String]("cnt", "lencnt", "cTrue"),
Array[TypeInformation[_]](Types.LONG, Types.LONG, Types.BOOLEAN)))
t.select('id, 'num, 'text.charLength() as 'len, ('id > 0) as 'cTrue)
.groupBy('len, 'cTrue)
.select('len, 'id.count as 'cnt, 'cTrue)
.groupBy('cnt, 'cTrue)
.select('cnt, 'len.count as 'lencnt, 'cTrue)
.insertInto("upsertSink")
env.execute()
val results = RowCollector.getAndClearValues
assertTrue(
"Results must include delete messages",
results.exists(_.f0 == false)
)
val retracted = RowCollector.upsertResults(results, Array(0, 2)).sorted
val expected = List(
"1,5,true",
"7,1,true",
"9,1,true").sorted
assertEquals(expected, retracted)
}
@Test
def testUpsertSinkOnAppendingTableWithFullKey1(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"upsertSink",
new TestUpsertSink(Array("wend", "num"), true).configure(
Array[String]("num", "wend", "icnt"),
Array[TypeInformation[_]](Types.LONG, Types.SQL_TIMESTAMP, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w, 'num)
.select('num, 'w.end as 'wend, 'id.count as 'icnt)
.insertInto("upsertSink")
env.execute()
val results = RowCollector.getAndClearValues
assertFalse(
"Received retraction messages for append only table",
results.exists(!_.f0))
val retracted = RowCollector.upsertResults(results, Array(0, 1, 2)).sorted
val expected = List(
"1,1970-01-01 00:00:00.005,1",
"2,1970-01-01 00:00:00.005,2",
"3,1970-01-01 00:00:00.005,1",
"3,1970-01-01 00:00:00.01,2",
"4,1970-01-01 00:00:00.01,3",
"4,1970-01-01 00:00:00.015,1",
"5,1970-01-01 00:00:00.015,4",
"5,1970-01-01 00:00:00.02,1",
"6,1970-01-01 00:00:00.02,4",
"6,1970-01-01 00:00:00.025,2").sorted
assertEquals(expected, retracted)
}
@Test
def testUpsertSinkOnAppendingTableWithFullKey2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"upsertSink",
new TestUpsertSink(Array("wstart", "wend", "num"), true).configure(
Array[String]("wstart", "wend", "num", "icnt"),
Array[TypeInformation[_]]
(Types.SQL_TIMESTAMP, Types.SQL_TIMESTAMP, Types.LONG, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w, 'num)
.select('w.start as 'wstart, 'w.end as 'wend, 'num, 'id.count as 'icnt)
.insertInto("upsertSink")
env.execute()
val results = RowCollector.getAndClearValues
assertFalse(
"Received retraction messages for append only table",
results.exists(!_.f0))
val retracted = RowCollector.upsertResults(results, Array(0, 1, 2)).sorted
val expected = List(
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,1,1",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,2,2",
"1970-01-01 00:00:00.0,1970-01-01 00:00:00.005,3,1",
"1970-01-01 00:00:00.005,1970-01-01 00:00:00.01,3,2",
"1970-01-01 00:00:00.005,1970-01-01 00:00:00.01,4,3",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.015,4,1",
"1970-01-01 00:00:00.01,1970-01-01 00:00:00.015,5,4",
"1970-01-01 00:00:00.015,1970-01-01 00:00:00.02,5,1",
"1970-01-01 00:00:00.015,1970-01-01 00:00:00.02,6,4",
"1970-01-01 00:00:00.02,1970-01-01 00:00:00.025,6,2").sorted
assertEquals(expected, retracted)
}
@Test
def testUpsertSinkOnAppendingTableWithoutFullKey1(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"upsertSink",
new TestUpsertSink(null, true).configure(
Array[String]("wend", "cnt"),
Array[TypeInformation[_]](Types.SQL_TIMESTAMP, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w, 'num)
.select('w.end as 'wend, 'id.count as 'cnt)
.insertInto("upsertSink")
env.execute()
val results = RowCollector.getAndClearValues
assertFalse(
"Received retraction messages for append only table",
results.exists(!_.f0))
val retracted = results.map(_.f1.toString).sorted
val expected = List(
"1970-01-01 00:00:00.005,1",
"1970-01-01 00:00:00.005,2",
"1970-01-01 00:00:00.005,1",
"1970-01-01 00:00:00.01,2",
"1970-01-01 00:00:00.01,3",
"1970-01-01 00:00:00.015,1",
"1970-01-01 00:00:00.015,4",
"1970-01-01 00:00:00.02,1",
"1970-01-01 00:00:00.02,4",
"1970-01-01 00:00:00.025,2").sorted
assertEquals(expected, retracted)
}
@Test
def testUpsertSinkOnAppendingTableWithoutFullKey2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
tEnv.registerTableSink(
"upsertSink",
new TestUpsertSink(null, true).configure(
Array[String]("num", "cnt"),
Array[TypeInformation[_]](Types.LONG, Types.LONG)))
t.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w, 'num)
.select('num, 'id.count as 'cnt)
.insertInto("upsertSink")
env.execute()
val results = RowCollector.getAndClearValues
assertFalse(
"Received retraction messages for append only table",
results.exists(!_.f0))
val retracted = results.map(_.f1.toString).sorted
val expected = List(
"1,1",
"2,2",
"3,1",
"3,2",
"4,3",
"4,1",
"5,4",
"5,1",
"6,4",
"6,2").sorted
assertEquals(expected, retracted)
}
@Test
def testToAppendStreamRowtime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
val r = t
.window(Tumble over 5.milli on 'rowtime as 'w)
.groupBy('num, 'w)
.select('num, 'w.rowtime, 'w.rowtime.cast(Types.LONG))
r.toAppendStream[Row]
.process(new ProcessFunction[Row, Row] {
override def processElement(
row: Row,
ctx: ProcessFunction[Row, Row]#Context,
out: Collector[Row]): Unit = {
val rowTS: Long = row.getField(2).asInstanceOf[Long]
if (ctx.timestamp() == rowTS) {
out.collect(row)
}
}
}).addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = List(
"1,1970-01-01 00:00:00.004,4",
"2,1970-01-01 00:00:00.004,4",
"3,1970-01-01 00:00:00.004,4",
"3,1970-01-01 00:00:00.009,9",
"4,1970-01-01 00:00:00.009,9",
"4,1970-01-01 00:00:00.014,14",
"5,1970-01-01 00:00:00.014,14",
"5,1970-01-01 00:00:00.019,19",
"6,1970-01-01 00:00:00.019,19",
"6,1970-01-01 00:00:00.024,24")
assertEquals(expected, StreamITCase.testResults.sorted)
}
@Test
def testToRetractStreamRowtime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
StreamITCase.clear
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
val r = t
.window(Tumble over 5.milli on 'rowtime as 'w)
.groupBy('num, 'w)
.select('num, 'w.rowtime, 'w.rowtime.cast(Types.LONG))
r.toRetractStream[Row]
.process(new ProcessFunction[(Boolean, Row), Row] {
override def processElement(
row: (Boolean, Row),
ctx: ProcessFunction[(Boolean, Row), Row]#Context,
out: Collector[Row]): Unit = {
val rowTs = row._2.getField(2).asInstanceOf[Long]
if (ctx.timestamp() == rowTs) {
out.collect(row._2)
}
}
}).addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = List(
"1,1970-01-01 00:00:00.004,4",
"2,1970-01-01 00:00:00.004,4",
"3,1970-01-01 00:00:00.004,4",
"3,1970-01-01 00:00:00.009,9",
"4,1970-01-01 00:00:00.009,9",
"4,1970-01-01 00:00:00.014,14",
"5,1970-01-01 00:00:00.014,14",
"5,1970-01-01 00:00:00.019,19",
"6,1970-01-01 00:00:00.019,19",
"6,1970-01-01 00:00:00.024,24")
assertEquals(expected, StreamITCase.testResults.sorted)
}
@Test(expected = classOf[TableException])
def testToAppendStreamMultiRowtime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
val r = t
.window(Tumble over 5.milli on 'rowtime as 'w)
.groupBy('num, 'w)
.select('num, 'w.rowtime, 'w.rowtime as 'rowtime2)
r.toAppendStream[Row]
}
@Test(expected = classOf[TableException])
def testToRetractStreamMultiRowtime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.getConfig.enableObjectReuse()
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = TableEnvironment.getTableEnvironment(env)
val t = StreamTestData.get3TupleDataStream(env)
.assignAscendingTimestamps(_._1.toLong)
.toTable(tEnv, 'id, 'num, 'text, 'rowtime.rowtime)
val r = t
.window(Tumble over 5.milli on 'rowtime as 'w)
.groupBy('num, 'w)
.select('num, 'w.rowtime, 'w.rowtime as 'rowtime2)
r.toRetractStream[Row]
}
}
private[flink] class TestAppendSink extends AppendStreamTableSink[Row] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
override def emitDataStream(s: DataStream[Row]): Unit = {
s.map(
new MapFunction[Row, JTuple2[JBool, Row]] {
override def map(value: Row): JTuple2[JBool, Row] = new JTuple2(true, value)
})
.addSink(new RowSink)
}
override def getOutputType: TypeInformation[Row] = new RowTypeInfo(fTypes, fNames)
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TableSink[Row] = {
val copy = new TestAppendSink
copy.fNames = fieldNames
copy.fTypes = fieldTypes
copy
}
}
private[flink] class TestRetractSink extends RetractStreamTableSink[Row] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
override def emitDataStream(s: DataStream[JTuple2[JBool, Row]]): Unit = {
s.addSink(new RowSink)
}
override def getRecordType: TypeInformation[Row] = new RowTypeInfo(fTypes, fNames)
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TableSink[JTuple2[JBool, Row]] = {
val copy = new TestRetractSink
copy.fNames = fieldNames
copy.fTypes = fieldTypes
copy
}
}
private[flink] class TestUpsertSink(
expectedKeys: Array[String],
expectedIsAppendOnly: Boolean)
extends UpsertStreamTableSink[Row] {
var fNames: Array[String] = _
var fTypes: Array[TypeInformation[_]] = _
override def setKeyFields(keys: Array[String]): Unit =
if (keys != null) {
assertEquals("Provided key fields do not match expected keys",
expectedKeys.sorted.mkString(","),
keys.sorted.mkString(","))
} else {
assertNull("Provided key fields should not be null.", expectedKeys)
}
override def setIsAppendOnly(isAppendOnly: JBool): Unit =
assertEquals(
"Provided isAppendOnly does not match expected isAppendOnly",
expectedIsAppendOnly,
isAppendOnly)
override def getRecordType: TypeInformation[Row] = new RowTypeInfo(fTypes, fNames)
override def emitDataStream(s: DataStream[JTuple2[JBool, Row]]): Unit = {
s.addSink(new RowSink)
}
override def getFieldNames: Array[String] = fNames
override def getFieldTypes: Array[TypeInformation[_]] = fTypes
override def configure(
fieldNames: Array[String],
fieldTypes: Array[TypeInformation[_]]): TableSink[JTuple2[JBool, Row]] = {
val copy = new TestUpsertSink(expectedKeys, expectedIsAppendOnly)
copy.fNames = fieldNames
copy.fTypes = fieldTypes
copy
}
}
class RowSink extends SinkFunction[JTuple2[JBool, Row]] {
override def invoke(value: JTuple2[JBool, Row]): Unit = RowCollector.addValue(value)
}
object RowCollector {
private val sink: mutable.ArrayBuffer[JTuple2[JBool, Row]] =
new mutable.ArrayBuffer[JTuple2[JBool, Row]]()
def addValue(value: JTuple2[JBool, Row]): Unit = {
// make a copy
val copy = new JTuple2[JBool, Row](value.f0, Row.copy(value.f1))
sink.synchronized {
sink += copy
}
}
def getAndClearValues: List[JTuple2[JBool, Row]] = {
val out = sink.toList
sink.clear()
out
}
/** Converts a list of retraction messages into a list of final results. */
def retractResults(results: List[JTuple2[JBool, Row]]): List[String] = {
val retracted = results
.foldLeft(Map[String, Int]()){ (m: Map[String, Int], v: JTuple2[JBool, Row]) =>
val cnt = m.getOrElse(v.f1.toString, 0)
if (v.f0) {
m + (v.f1.toString -> (cnt + 1))
} else {
m + (v.f1.toString -> (cnt - 1))
}
}.filter{ case (_, c: Int) => c != 0 }
assertFalse(
"Received retracted rows which have not been accumulated.",
retracted.exists{ case (_, c: Int) => c < 0})
retracted.flatMap { case (r: String, c: Int) => (0 until c).map(_ => r) }.toList
}
/** Converts a list of upsert messages into a list of final results. */
def upsertResults(results: List[JTuple2[JBool, Row]], keys: Array[Int]): List[String] = {
def getKeys(r: Row): Row = Row.project(r, keys)
val upserted = results.foldLeft(Map[Row, String]()){ (o: Map[Row, String], r) =>
val key = getKeys(r.f1)
if (r.f0) {
o + (key -> r.f1.toString)
} else {
o - key
}
}
upserted.values.toList
}
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/runtime/stream/table/TableSinkITCase.scala | Scala | apache-2.0 | 25,742 |
import org.apache.spark._
import org.apache.spark.sql.SQLContext
import org.apache.spark.mllib.feature.HashingTF
import org.apache.spark.mllib.clustering.KMeans
object Main {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("test").setMaster("local[*]")
val sc = new SparkContext(conf)
val sqlCtx = new SQLContext(sc)
val input = sqlCtx.jsonFile("../data/tweets/*/*")
input.registerTempTable("tweets")
val texts = sqlCtx.sql("SELECT text FROM tweets").map(_.getString(0))
val tf = new HashingTF(1000)
def featurize(s: String) =
tf.transform(s.sliding(2).toSeq)
val vectors = texts.map(featurize).cache
val model =
KMeans.train(vectors, 10, 100)
val groups =
texts.groupBy(t => model.predict(featurize(t)))
groups.foreach(println)
model.predict(featurize("test"))
}
}
| OmniaGM/spark-training | examples/twitter/scala/Main.scala | Scala | mit | 893 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.util.logging
/**
* The trait <code>ConsoleLogger</code> is mixed into a concrete class who
* has class <code>Logged</code> among its base classes.
*
* @author Burak Emir
* @version 1.0
*/
trait ConsoleLogger extends Logged {
/** logs argument to Console using <code>Console.println</code>
*/
override def log(msg: String): Unit = Console.println(msg)
}
| cran/rkafkajars | java/scala/util/logging/ConsoleLogger.scala | Scala | apache-2.0 | 915 |
import org.apache.hadoop.conf.Configuration
import org.apache.spark. {
SparkContext,
SparkConf
}
import org.apache.spark.rdd.RDD
import com.mongodb.MongoClient
import org.bson.BSONObject
import com.mongodb.hadoop. {
MongoInputFormat,
MongoOutputFormat,
BSONFileInputFormat,
BSONFileOutputFormat
}
import com.mongodb.hadoop.io.MongoUpdateWritable
import java.io._
import com.cloudera.datascience.lsa._
import com.cloudera.datascience.lsa.ParseWikipedia._
import com.cloudera.datascience.lsa.RunLSA._
import org.apache.spark.rdd.EmptyRDD
import scala.collection.mutable.ListBuffer
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.linalg.distributed.RowMatrix
import breeze.linalg. {
DenseMatrix => BDenseMatrix, DenseVector => BDenseVector, SparseVector => BSparseVector
}
import org.apache.spark.mllib.regression._
import org.apache.spark.rdd._
//Access mongodb
var client = new MongoClient("localhost", 27017)
var db = client.getDatabase("cordis")
//Chargement des données depuis mongodb
@transient val mongoConfig = new Configuration()
mongoConfig.set("mongo.input.uri",
"mongodb://localhost:27017/cordis.project")
val documents = sc.newAPIHadoopRDD(
mongoConfig, // Configuration
classOf[MongoInputFormat], // InputFormat
classOf[Object], // Key type
classOf[BSONObject]) // Value type
//chargement des stop words
val stopWords = sc.broadcast(ParseWikipedia.loadStopWords("deps/lsa/src/main/resources/stopwords.txt")).value
//Lemmatization
var lemmatized = documents.map(s => (s._2.get("_id").toString, ParseWikipedia.plainTextToLemmas(s._2.get("objective").toString, stopWords, ParseWikipedia.createNLPPipeline())))
//liste des mots
var words = lemmatized.map(a => Set(a._2: _ * )).reduce((a, b) => (a++b))
val numTerms = 1000 // nombre de terme
val k = 100 // nombre de valeurs singuliers à garder
val nbConcept = 30 //Nombre de concept
//on filtre les mots de moins de deux caractères
val filtered = lemmatized.filter(_._2.size > 1)
val documentSize = documents.collect().length
println("Documents Size : " + documentSize)
println("Number of Terms : " + numTerms)
val(termDocMatrix, termIds, docIds, idfs) = ParseWikipedia.termDocumentMatrix(filtered, stopWords, numTerms, sc)
//nettoyage des collections
db.getCollection("projetIdfs").drop()
db.getCollection("projetTermDocMatrix").drop()
db.getCollection("projetTermIds").drop()
db.getCollection("projetDocIds").drop()
//sauvegarde des tf-idf
val outputConfig = new Configuration()
outputConfig.set("mongo.output.uri", "mongodb://localhost:27017/cordis.projetIdfs")
sc.parallelize(idfs.toSeq).saveAsNewAPIHadoopFile("file:///this-is-completely-unused", classOf[Object], classOf[BSONObject], classOf[MongoOutputFormat[Object, BSONObject]], outputConfig)
outputConfig.set("mongo.output.uri", "mongodb://localhost:27017/cordis.projetTermDocMatrix")
termDocMatrix.zipWithIndex().map(a => (a._2, a._1.toArray)).saveAsNewAPIHadoopFile("file:///this-is-completely-unused", classOf[Object], classOf[BSONObject], classOf[MongoOutputFormat[Object, BSONObject]], outputConfig)
outputConfig.set("mongo.output.uri", "mongodb://localhost:27017/cordis.projetTermIds")
sc.parallelize(termIds.toSeq).saveAsNewAPIHadoopFile("file:///this-is-completely-unused", classOf[Object], classOf[BSONObject], classOf[MongoOutputFormat[Object, BSONObject]], outputConfig)
outputConfig.set("mongo.output.uri", "mongodb://localhost:27017/cordis.projetDocIds")
sc.parallelize(docIds.toSeq).saveAsNewAPIHadoopFile("file:///this-is-completely-unused", classOf[Object], classOf[BSONObject], classOf[MongoOutputFormat[Object, BSONObject]], outputConfig)
val mat = new RowMatrix(termDocMatrix)
/* Calcul de la décomposition en valeurs singulières */
val svd = mat.computeSVD(k, computeU = true)
/* resultats par termes */
val topConceptTerms = RunLSA.topTermsInTopConcepts(svd, nbConcept, numTerms, termIds)
/* résultats par document */
val topConceptDocs = RunLSA.topDocsInTopConcepts(svd, nbConcept, documentSize, docIds)
var all = sc.emptyRDD[(String, Double)]
import collection.mutable.HashMap
val docConcept = new HashMap[String, ListBuffer[Double]]()
var count = 0
for (a <-topConceptDocs) {
count += 1
for ((b, c) <-a) {
if (!docConcept.contains(b)) {
docConcept.put(b, new ListBuffer[Double]())
}
docConcept(b) += c
}
for ((k, v) <-docConcept) {
while (v.size < count) {
v += 0.0
}
}
}
var docConceptRDD = sc.parallelize(docConcept.toSeq)
var toWrite = docConceptRDD.map(a => (a._1, a._2.toArray))
/*Sauvegarde dans Mongo DB*/
db.getCollection("projetDocConcept").drop()
val outputConfig = new Configuration()
outputConfig.set("mongo.output.uri",
"mongodb://localhost:27017/cordis.projetDocConcept")
toWrite.saveAsNewAPIHadoopFile(
"file:///this-is-completely-unused",
classOf[Object],
classOf[BSONObject],
classOf[MongoOutputFormat[Object, BSONObject]],
outputConfig)
//make labeled point
val termConcept = new HashMap[String, ListBuffer[Double]]()
count = 0
for (a <-topConceptTerms) {
count += 1
for ((b, c) <-a) {
if (!termConcept.contains(b)) {
termConcept.put(b, new ListBuffer[Double]())
}
termConcept(b) += c
}
for ((k, v) <-termConcept) {
while (v.size < count) {
v += 0.0
}
}
}
db.getCollection("projetTermConcept").drop()
var parr = sc.parallelize(termConcept.toSeq)
val outputConfig = new Configuration()
outputConfig.set("mongo.output.uri", "mongodb://localhost:27017/cordis.projetTermConcept")
parr.map(a => (a._1, a._2.toArray)).coalesce(1, true).saveAsNewAPIHadoopFile("file:///this-is-completely-unused", classOf[Object], classOf[BSONObject], classOf[MongoOutputFormat[Object, BSONObject]], outputConfig)
exit | StatisticalProject/CORDIR | makeLSA.scala | Scala | apache-2.0 | 5,675 |
import java.io.File
import testgen.TestSuiteBuilder.{toString, _}
import testgen._
object FlattenArrayTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/flatten-array.json")
def sutArgsFromInput(parseResult: CanonicalDataParser.ParseResult, argNames: String*): String =
argNames map (name => toArgString(parseResult("input").asInstanceOf[Map[String, Any]](name))) mkString ", "
def toArgString(any: Any): String = {
any match {
case xs: List[Any] => s"List(${xs.map(x => toArgString(x)).mkString(", ")})"
case null => "null"
case _ => any.toString
}
}
def toExpectedString(expected: CanonicalDataParser.Expected): String = {
expected match {
case Left(_) => throw new IllegalStateException()
case Right(xs: List[Any]) => s"List(${xs.mkString(", ")})"
}
}
def fromLabeledTestFromInput(argNames: String*): ToTestCaseData =
withLabeledTest { sut =>
labeledTest =>
val args = sutArgsFromInput(labeledTest.result, argNames: _*)
val property = labeledTest.property
val sutCall =
s"""$sut.$property($args)"""
val expected = toExpectedString(labeledTest.expected)
TestCaseData(labeledTest.description, sutCall, expected)
}
val code = TestSuiteBuilder.build(file, fromLabeledTestFromInput("array"))
println(s"-------------")
println(code)
println(s"-------------")
}
}
| ricemery/xscala | testgen/src/main/scala/FlattenArrayTestGenerator.scala | Scala | mit | 1,512 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java
import java.{lang => jl}
import java.lang.{Iterable => JIterable}
import java.util.{Comparator, Iterator => JIterator, List => JList}
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import scala.reflect.ClassTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.{JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat}
import org.apache.spark.{HashPartitioner, Partitioner}
import org.apache.spark.Partitioner._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.api.java.JavaUtils.mapAsSerializableJavaMap
import org.apache.spark.api.java.function.{FlatMapFunction, Function => JFunction,
Function2 => JFunction2, PairFunction}
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.{OrderedRDDFunctions, RDD}
import org.apache.spark.rdd.RDD.rddToPairRDDFunctions
import org.apache.spark.serializer.Serializer
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
class JavaPairRDD[K, V](val rdd: RDD[(K, V)])
(implicit val kClassTag: ClassTag[K], implicit val vClassTag: ClassTag[V])
extends AbstractJavaRDDLike[(K, V), JavaPairRDD[K, V]] {
override def wrapRDD(rdd: RDD[(K, V)]): JavaPairRDD[K, V] = JavaPairRDD.fromRDD(rdd)
override val classTag: ClassTag[(K, V)] = rdd.elementClassTag
import JavaPairRDD._
// Common RDD functions
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def cache(): JavaPairRDD[K, V] = new JavaPairRDD[K, V](rdd.cache())
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. Can only be called once on each RDD.
*/
def persist(newLevel: StorageLevel): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.persist(newLevel))
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
* This method blocks until all blocks are deleted.
*/
def unpersist(): JavaPairRDD[K, V] = wrapRDD(rdd.unpersist())
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
*/
def unpersist(blocking: Boolean): JavaPairRDD[K, V] = wrapRDD(rdd.unpersist(blocking))
// Transformations (return a new RDD)
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(): JavaPairRDD[K, V] = new JavaPairRDD[K, V](rdd.distinct())
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.distinct(numPartitions))
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.filter(x => f.call(x).booleanValue()))
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*/
def coalesce(numPartitions: Int): JavaPairRDD[K, V] = fromRDD(rdd.coalesce(numPartitions))
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*/
def coalesce(numPartitions: Int, shuffle: Boolean): JavaPairRDD[K, V] =
fromRDD(rdd.coalesce(numPartitions, shuffle))
/**
* Return a new RDD that has exactly numPartitions partitions.
*
* Can increase or decrease the level of parallelism in this RDD. Internally, this uses
* a shuffle to redistribute data.
*
* If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
* which can avoid performing a shuffle.
*/
def repartition(numPartitions: Int): JavaPairRDD[K, V] = fromRDD(rdd.repartition(numPartitions))
/**
* Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double): JavaPairRDD[K, V] =
sample(withReplacement, fraction, Utils.random.nextLong)
/**
* Return a sampled subset of this RDD.
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.sample(withReplacement, fraction, seed))
/**
* Return a subset of this RDD sampled by key (via stratified sampling).
*
* Create a sample of this RDD using variable sampling rates for different keys as specified by
* `fractions`, a key to sampling rate map, via simple random sampling with one pass over the
* RDD, to produce a sample of size that's approximately equal to the sum of
* math.ceil(numItems * samplingRate) over all key values.
*/
def sampleByKey(withReplacement: Boolean,
fractions: java.util.Map[K, jl.Double],
seed: Long): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.sampleByKey(
withReplacement,
fractions.asScala.mapValues(_.toDouble).toMap, // map to Scala Double; toMap to serialize
seed))
/**
* Return a subset of this RDD sampled by key (via stratified sampling).
*
* Create a sample of this RDD using variable sampling rates for different keys as specified by
* `fractions`, a key to sampling rate map, via simple random sampling with one pass over the
* RDD, to produce a sample of size that's approximately equal to the sum of
* math.ceil(numItems * samplingRate) over all key values.
*
* Use Utils.random.nextLong as the default seed for the random number generator.
*/
def sampleByKey(withReplacement: Boolean,
fractions: java.util.Map[K, jl.Double]): JavaPairRDD[K, V] =
sampleByKey(withReplacement, fractions, Utils.random.nextLong)
/**
* Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
* math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
*
* This method differs from `sampleByKey` in that we make additional passes over the RDD to
* create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
* over all key values with a 99.99% confidence. When sampling without replacement, we need one
* additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
* two additional passes.
*/
def sampleByKeyExact(withReplacement: Boolean,
fractions: java.util.Map[K, jl.Double],
seed: Long): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.sampleByKeyExact(
withReplacement,
fractions.asScala.mapValues(_.toDouble).toMap, // map to Scala Double; toMap to serialize
seed))
/**
* Return a subset of this RDD sampled by key (via stratified sampling) containing exactly
* math.ceil(numItems * samplingRate) for each stratum (group of pairs with the same key).
*
* This method differs from `sampleByKey` in that we make additional passes over the RDD to
* create a sample size that's exactly equal to the sum of math.ceil(numItems * samplingRate)
* over all key values with a 99.99% confidence. When sampling without replacement, we need one
* additional pass over the RDD to guarantee sample size; when sampling with replacement, we need
* two additional passes.
*
* Use Utils.random.nextLong as the default seed for the random number generator.
*/
def sampleByKeyExact(
withReplacement: Boolean,
fractions: java.util.Map[K, jl.Double]): JavaPairRDD[K, V] =
sampleByKeyExact(withReplacement, fractions, Utils.random.nextLong)
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: JavaPairRDD[K, V]): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.union(other.rdd))
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*/
def intersection(other: JavaPairRDD[K, V]): JavaPairRDD[K, V] =
new JavaPairRDD[K, V](rdd.intersection(other.rdd))
// first() has to be overridden here so that the generated method has the signature
// 'public scala.Tuple2 first()'; if the trait's definition is used,
// then the method has the signature 'public java.lang.Object first()',
// causing NoSuchMethodErrors at runtime.
override def first(): (K, V) = rdd.first()
// Pair RDD functions
/**
* Generic function to combine the elements for each key using a custom set of aggregation
* functions. Turns a JavaPairRDD[(K, V)] into a result of type JavaPairRDD[(K, C)], for a
* "combined type" C.
*
* Users provide three functions:
*
* - `createCombiner`, which turns a V into a C (e.g., creates a one-element list)
* - `mergeValue`, to merge a V into a C (e.g., adds it to the end of a list)
* - `mergeCombiners`, to combine two C's into a single one.
*
* In addition, users can control the partitioning of the output RDD, the serializer that is use
* for the shuffle, and whether to perform map-side aggregation (if a mapper can produce multiple
* items with the same key).
*
* @note V and C can be different -- for example, one might group an RDD of type (Int, Int) into
* an RDD of type (Int, List[Int]).
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
partitioner: Partitioner,
mapSideCombine: Boolean,
serializer: Serializer): JavaPairRDD[K, C] = {
implicit val ctag: ClassTag[C] = fakeClassTag
fromRDD(rdd.combineByKeyWithClassTag(
createCombiner,
mergeValue,
mergeCombiners,
partitioner,
mapSideCombine,
serializer
))
}
/**
* Generic function to combine the elements for each key using a custom set of aggregation
* functions. Turns a JavaPairRDD[(K, V)] into a result of type JavaPairRDD[(K, C)], for a
* "combined type" C.
*
* Users provide three functions:
*
* - `createCombiner`, which turns a V into a C (e.g., creates a one-element list)
* - `mergeValue`, to merge a V into a C (e.g., adds it to the end of a list)
* - `mergeCombiners`, to combine two C's into a single one.
*
* In addition, users can control the partitioning of the output RDD. This method automatically
* uses map-side aggregation in shuffling the RDD.
*
* @note V and C can be different -- for example, one might group an RDD of type (Int, Int) into
* an RDD of type (Int, List[Int]).
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
partitioner: Partitioner): JavaPairRDD[K, C] = {
combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner, true, null)
}
/**
* Simplified version of combineByKey that hash-partitions the output RDD and uses map-side
* aggregation.
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
numPartitions: Int): JavaPairRDD[K, C] =
combineByKey(createCombiner, mergeValue, mergeCombiners, new HashPartitioner(numPartitions))
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce.
*/
def reduceByKey(partitioner: Partitioner, func: JFunction2[V, V, V]): JavaPairRDD[K, V] =
fromRDD(rdd.reduceByKey(partitioner, func))
/**
* Merge the values for each key using an associative and commutative reduce function, but return
* the result immediately to the master as a Map. This will also perform the merging locally on
* each mapper before sending results to a reducer, similarly to a "combiner" in MapReduce.
*/
def reduceByKeyLocally(func: JFunction2[V, V, V]): java.util.Map[K, V] =
mapAsSerializableJavaMap(rdd.reduceByKeyLocally(func))
/** Count the number of elements for each key, and return the result to the master as a Map. */
def countByKey(): java.util.Map[K, jl.Long] =
mapAsSerializableJavaMap(rdd.countByKey()).asInstanceOf[java.util.Map[K, jl.Long]]
/**
* Approximate version of countByKey that can return a partial result if it does
* not finish within a timeout.
*/
def countByKeyApprox(timeout: Long): PartialResult[java.util.Map[K, BoundedDouble]] =
rdd.countByKeyApprox(timeout).map(mapAsSerializableJavaMap)
/**
* Approximate version of countByKey that can return a partial result if it does
* not finish within a timeout.
*/
def countByKeyApprox(timeout: Long, confidence: Double = 0.95)
: PartialResult[java.util.Map[K, BoundedDouble]] =
rdd.countByKeyApprox(timeout, confidence).map(mapAsSerializableJavaMap)
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
* as in scala.TraversableOnce. The former operation is used for merging values within a
* partition, and the latter is used for merging values between partitions. To avoid memory
* allocation, both of these functions are allowed to modify and return their first argument
* instead of creating a new U.
*/
def aggregateByKey[U](zeroValue: U, partitioner: Partitioner, seqFunc: JFunction2[U, V, U],
combFunc: JFunction2[U, U, U]): JavaPairRDD[K, U] = {
implicit val ctag: ClassTag[U] = fakeClassTag
fromRDD(rdd.aggregateByKey(zeroValue, partitioner)(seqFunc, combFunc))
}
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's,
* as in scala.TraversableOnce. The former operation is used for merging values within a
* partition, and the latter is used for merging values between partitions. To avoid memory
* allocation, both of these functions are allowed to modify and return their first argument
* instead of creating a new U.
*/
def aggregateByKey[U](zeroValue: U, numPartitions: Int, seqFunc: JFunction2[U, V, U],
combFunc: JFunction2[U, U, U]): JavaPairRDD[K, U] = {
implicit val ctag: ClassTag[U] = fakeClassTag
fromRDD(rdd.aggregateByKey(zeroValue, numPartitions)(seqFunc, combFunc))
}
/**
* Aggregate the values of each key, using given combine functions and a neutral "zero value".
* This function can return a different result type, U, than the type of the values in this RDD,
* V. Thus, we need one operation for merging a V into a U and one operation for merging two U's.
* The former operation is used for merging values within a partition, and the latter is used for
* merging values between partitions. To avoid memory allocation, both of these functions are
* allowed to modify and return their first argument instead of creating a new U.
*/
def aggregateByKey[U](zeroValue: U, seqFunc: JFunction2[U, V, U], combFunc: JFunction2[U, U, U]):
JavaPairRDD[K, U] = {
implicit val ctag: ClassTag[U] = fakeClassTag
fromRDD(rdd.aggregateByKey(zeroValue)(seqFunc, combFunc))
}
/**
* Merge the values for each key using an associative function and a neutral "zero value" which
* may be added to the result an arbitrary number of times, and must not change the result
* (e.g ., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(zeroValue: V, partitioner: Partitioner, func: JFunction2[V, V, V])
: JavaPairRDD[K, V] = fromRDD(rdd.foldByKey(zeroValue, partitioner)(func))
/**
* Merge the values for each key using an associative function and a neutral "zero value" which
* may be added to the result an arbitrary number of times, and must not change the result
* (e.g ., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(zeroValue: V, numPartitions: Int, func: JFunction2[V, V, V]): JavaPairRDD[K, V] =
fromRDD(rdd.foldByKey(zeroValue, numPartitions)(func))
/**
* Merge the values for each key using an associative function and a neutral "zero value"
* which may be added to the result an arbitrary number of times, and must not change the result
* (e.g., Nil for list concatenation, 0 for addition, or 1 for multiplication.).
*/
def foldByKey(zeroValue: V, func: JFunction2[V, V, V]): JavaPairRDD[K, V] =
fromRDD(rdd.foldByKey(zeroValue)(func))
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce. Output will be hash-partitioned with numPartitions partitions.
*/
def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairRDD[K, V] =
fromRDD(rdd.reduceByKey(func, numPartitions))
/**
* Group the values for each key in the RDD into a single sequence. Allows controlling the
* partitioning of the resulting key-value pair RDD by passing a Partitioner.
*
* @note If you are grouping in order to perform an aggregation (such as a sum or average) over
* each key, using `JavaPairRDD.reduceByKey` or `JavaPairRDD.combineByKey`
* will provide much better performance.
*/
def groupByKey(partitioner: Partitioner): JavaPairRDD[K, JIterable[V]] =
fromRDD(groupByResultToJava(rdd.groupByKey(partitioner)))
/**
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with into `numPartitions` partitions.
*
* @note If you are grouping in order to perform an aggregation (such as a sum or average) over
* each key, using `JavaPairRDD.reduceByKey` or `JavaPairRDD.combineByKey`
* will provide much better performance.
*/
def groupByKey(numPartitions: Int): JavaPairRDD[K, JIterable[V]] =
fromRDD(groupByResultToJava(rdd.groupByKey(numPartitions)))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: JavaPairRDD[K, V]): JavaPairRDD[K, V] =
fromRDD(rdd.subtract(other))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: JavaPairRDD[K, V], numPartitions: Int): JavaPairRDD[K, V] =
fromRDD(rdd.subtract(other, numPartitions))
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: JavaPairRDD[K, V], p: Partitioner): JavaPairRDD[K, V] =
fromRDD(rdd.subtract(other, p))
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtractByKey[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, V] = {
implicit val ctag: ClassTag[W] = fakeClassTag
fromRDD(rdd.subtractByKey(other))
}
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*/
def subtractByKey[W](other: JavaPairRDD[K, W], numPartitions: Int): JavaPairRDD[K, V] = {
implicit val ctag: ClassTag[W] = fakeClassTag
fromRDD(rdd.subtractByKey(other, numPartitions))
}
/**
* Return an RDD with the pairs from `this` whose keys are not in `other`.
*/
def subtractByKey[W](other: JavaPairRDD[K, W], p: Partitioner): JavaPairRDD[K, V] = {
implicit val ctag: ClassTag[W] = fakeClassTag
fromRDD(rdd.subtractByKey(other, p))
}
/**
* Return a copy of the RDD partitioned using the specified partitioner.
*/
def partitionBy(partitioner: Partitioner): JavaPairRDD[K, V] =
fromRDD(rdd.partitionBy(partitioner))
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Uses the given Partitioner to partition the output RDD.
*/
def join[W](other: JavaPairRDD[K, W], partitioner: Partitioner): JavaPairRDD[K, (V, W)] =
fromRDD(rdd.join(other, partitioner))
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Uses the given Partitioner to
* partition the output RDD.
*/
def leftOuterJoin[W](other: JavaPairRDD[K, W], partitioner: Partitioner)
: JavaPairRDD[K, (V, Optional[W])] = {
val joinResult = rdd.leftOuterJoin(other, partitioner)
fromRDD(joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))})
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Uses the given Partitioner to
* partition the output RDD.
*/
def rightOuterJoin[W](other: JavaPairRDD[K, W], partitioner: Partitioner)
: JavaPairRDD[K, (Optional[V], W)] = {
val joinResult = rdd.rightOuterJoin(other, partitioner)
fromRDD(joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)})
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Uses the given Partitioner to partition the output RDD.
*/
def fullOuterJoin[W](other: JavaPairRDD[K, W], partitioner: Partitioner)
: JavaPairRDD[K, (Optional[V], Optional[W])] = {
val joinResult = rdd.fullOuterJoin(other, partitioner)
fromRDD(joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
})
}
/**
* Simplified version of combineByKey that hash-partitions the resulting RDD using the existing
* partitioner/parallelism level and using map-side aggregation.
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C]): JavaPairRDD[K, C] = {
implicit val ctag: ClassTag[C] = fakeClassTag
fromRDD(combineByKey(createCombiner, mergeValue, mergeCombiners, defaultPartitioner(rdd)))
}
/**
* Merge the values for each key using an associative and commutative reduce function. This will
* also perform the merging locally on each mapper before sending results to a reducer, similarly
* to a "combiner" in MapReduce. Output will be hash-partitioned with the existing partitioner/
* parallelism level.
*/
def reduceByKey(func: JFunction2[V, V, V]): JavaPairRDD[K, V] = {
fromRDD(reduceByKey(defaultPartitioner(rdd), func))
}
/**
* Group the values for each key in the RDD into a single sequence. Hash-partitions the
* resulting RDD with the existing partitioner/parallelism level.
*
* @note If you are grouping in order to perform an aggregation (such as a sum or average) over
* each key, using `JavaPairRDD.reduceByKey` or `JavaPairRDD.combineByKey`
* will provide much better performance.
*/
def groupByKey(): JavaPairRDD[K, JIterable[V]] =
fromRDD(groupByResultToJava(rdd.groupByKey()))
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Performs a hash join across the cluster.
*/
def join[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (V, W)] =
fromRDD(rdd.join(other))
/**
* Return an RDD containing all pairs of elements with matching keys in `this` and `other`. Each
* pair of elements will be returned as a (k, (v1, v2)) tuple, where (k, v1) is in `this` and
* (k, v2) is in `other`. Performs a hash join across the cluster.
*/
def join[W](other: JavaPairRDD[K, W], numPartitions: Int): JavaPairRDD[K, (V, W)] =
fromRDD(rdd.join(other, numPartitions))
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Hash-partitions the output
* using the existing partitioner/parallelism level.
*/
def leftOuterJoin[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (V, Optional[W])] = {
val joinResult = rdd.leftOuterJoin(other)
fromRDD(joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))})
}
/**
* Perform a left outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (v, Some(w))) for w in `other`, or the
* pair (k, (v, None)) if no elements in `other` have key k. Hash-partitions the output
* into `numPartitions` partitions.
*/
def leftOuterJoin[W](other: JavaPairRDD[K, W], numPartitions: Int)
: JavaPairRDD[K, (V, Optional[W])] = {
val joinResult = rdd.leftOuterJoin(other, numPartitions)
fromRDD(joinResult.mapValues{case (v, w) => (v, JavaUtils.optionToOptional(w))})
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Hash-partitions the resulting
* RDD using the existing partitioner/parallelism level.
*/
def rightOuterJoin[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (Optional[V], W)] = {
val joinResult = rdd.rightOuterJoin(other)
fromRDD(joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)})
}
/**
* Perform a right outer join of `this` and `other`. For each element (k, w) in `other`, the
* resulting RDD will either contain all pairs (k, (Some(v), w)) for v in `this`, or the
* pair (k, (None, w)) if no elements in `this` have key k. Hash-partitions the resulting
* RDD into the given number of partitions.
*/
def rightOuterJoin[W](other: JavaPairRDD[K, W], numPartitions: Int)
: JavaPairRDD[K, (Optional[V], W)] = {
val joinResult = rdd.rightOuterJoin(other, numPartitions)
fromRDD(joinResult.mapValues{case (v, w) => (JavaUtils.optionToOptional(v), w)})
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Hash-partitions the resulting RDD using the existing partitioner/
* parallelism level.
*/
def fullOuterJoin[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (Optional[V], Optional[W])] = {
val joinResult = rdd.fullOuterJoin(other)
fromRDD(joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
})
}
/**
* Perform a full outer join of `this` and `other`. For each element (k, v) in `this`, the
* resulting RDD will either contain all pairs (k, (Some(v), Some(w))) for w in `other`, or
* the pair (k, (Some(v), None)) if no elements in `other` have key k. Similarly, for each
* element (k, w) in `other`, the resulting RDD will either contain all pairs
* (k, (Some(v), Some(w))) for v in `this`, or the pair (k, (None, Some(w))) if no elements
* in `this` have key k. Hash-partitions the resulting RDD into the given number of partitions.
*/
def fullOuterJoin[W](other: JavaPairRDD[K, W], numPartitions: Int)
: JavaPairRDD[K, (Optional[V], Optional[W])] = {
val joinResult = rdd.fullOuterJoin(other, numPartitions)
fromRDD(joinResult.mapValues{ case (v, w) =>
(JavaUtils.optionToOptional(v), JavaUtils.optionToOptional(w))
})
}
/**
* Return the key-value pairs in this RDD to the master as a Map.
*
* @note this method should only be used if the resulting data is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def collectAsMap(): java.util.Map[K, V] = mapAsSerializableJavaMap(rdd.collectAsMap())
/**
* Pass each value in the key-value pair RDD through a map function without changing the keys;
* this also retains the original RDD's partitioning.
*/
def mapValues[U](f: JFunction[V, U]): JavaPairRDD[K, U] = {
implicit val ctag: ClassTag[U] = fakeClassTag
fromRDD(rdd.mapValues(f))
}
/**
* Pass each value in the key-value pair RDD through a flatMap function without changing the
* keys; this also retains the original RDD's partitioning.
*/
def flatMapValues[U](f: FlatMapFunction[V, U]): JavaPairRDD[K, U] = {
def fn: (V) => Iterator[U] = (x: V) => f.call(x).asScala
implicit val ctag: ClassTag[U] = fakeClassTag
fromRDD(rdd.flatMapValues(fn))
}
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](other: JavaPairRDD[K, W], partitioner: Partitioner)
: JavaPairRDD[K, (JIterable[V], JIterable[W])] =
fromRDD(cogroupResultToJava(rdd.cogroup(other, partitioner)))
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: JavaPairRDD[K, W1], other2: JavaPairRDD[K, W2],
partitioner: Partitioner): JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2])] =
fromRDD(cogroupResult2ToJava(rdd.cogroup(other1, other2, partitioner)))
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: JavaPairRDD[K, W1],
other2: JavaPairRDD[K, W2],
other3: JavaPairRDD[K, W3],
partitioner: Partitioner)
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3])] =
fromRDD(cogroupResult3ToJava(rdd.cogroup(other1, other2, other3, partitioner)))
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (JIterable[V], JIterable[W])] =
fromRDD(cogroupResultToJava(rdd.cogroup(other)))
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: JavaPairRDD[K, W1], other2: JavaPairRDD[K, W2])
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2])] =
fromRDD(cogroupResult2ToJava(rdd.cogroup(other1, other2)))
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: JavaPairRDD[K, W1],
other2: JavaPairRDD[K, W2],
other3: JavaPairRDD[K, W3])
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3])] =
fromRDD(cogroupResult3ToJava(rdd.cogroup(other1, other2, other3)))
/**
* For each key k in `this` or `other`, return a resulting RDD that contains a tuple with the
* list of values for that key in `this` as well as `other`.
*/
def cogroup[W](other: JavaPairRDD[K, W], numPartitions: Int)
: JavaPairRDD[K, (JIterable[V], JIterable[W])] =
fromRDD(cogroupResultToJava(rdd.cogroup(other, numPartitions)))
/**
* For each key k in `this` or `other1` or `other2`, return a resulting RDD that contains a
* tuple with the list of values for that key in `this`, `other1` and `other2`.
*/
def cogroup[W1, W2](other1: JavaPairRDD[K, W1], other2: JavaPairRDD[K, W2], numPartitions: Int)
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2])] =
fromRDD(cogroupResult2ToJava(rdd.cogroup(other1, other2, numPartitions)))
/**
* For each key k in `this` or `other1` or `other2` or `other3`,
* return a resulting RDD that contains a tuple with the list of values
* for that key in `this`, `other1`, `other2` and `other3`.
*/
def cogroup[W1, W2, W3](other1: JavaPairRDD[K, W1],
other2: JavaPairRDD[K, W2],
other3: JavaPairRDD[K, W3],
numPartitions: Int)
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3])] =
fromRDD(cogroupResult3ToJava(rdd.cogroup(other1, other2, other3, numPartitions)))
/** Alias for cogroup. */
def groupWith[W](other: JavaPairRDD[K, W]): JavaPairRDD[K, (JIterable[V], JIterable[W])] =
fromRDD(cogroupResultToJava(rdd.groupWith(other)))
/** Alias for cogroup. */
def groupWith[W1, W2](other1: JavaPairRDD[K, W1], other2: JavaPairRDD[K, W2])
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2])] =
fromRDD(cogroupResult2ToJava(rdd.groupWith(other1, other2)))
/** Alias for cogroup. */
def groupWith[W1, W2, W3](other1: JavaPairRDD[K, W1],
other2: JavaPairRDD[K, W2],
other3: JavaPairRDD[K, W3])
: JavaPairRDD[K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3])] =
fromRDD(cogroupResult3ToJava(rdd.groupWith(other1, other2, other3)))
/**
* Return the list of values in the RDD for key `key`. This operation is done efficiently if the
* RDD has a known partitioner by only searching the partition that the key maps to.
*/
def lookup(key: K): JList[V] = rdd.lookup(key).asJava
/** Output the RDD to any Hadoop-supported file system. */
def saveAsHadoopFile[F <: OutputFormat[_, _]](
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F],
conf: JobConf) {
rdd.saveAsHadoopFile(path, keyClass, valueClass, outputFormatClass, conf)
}
/** Output the RDD to any Hadoop-supported file system. */
def saveAsHadoopFile[F <: OutputFormat[_, _]](
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F]) {
rdd.saveAsHadoopFile(path, keyClass, valueClass, outputFormatClass)
}
/** Output the RDD to any Hadoop-supported file system, compressing with the supplied codec. */
def saveAsHadoopFile[F <: OutputFormat[_, _]](
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F],
codec: Class[_ <: CompressionCodec]) {
rdd.saveAsHadoopFile(path, keyClass, valueClass, outputFormatClass, codec)
}
/** Output the RDD to any Hadoop-supported file system. */
def saveAsNewAPIHadoopFile[F <: NewOutputFormat[_, _]](
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F],
conf: Configuration) {
rdd.saveAsNewAPIHadoopFile(path, keyClass, valueClass, outputFormatClass, conf)
}
/**
* Output the RDD to any Hadoop-supported storage system, using
* a Configuration object for that storage system.
*/
def saveAsNewAPIHadoopDataset(conf: Configuration) {
rdd.saveAsNewAPIHadoopDataset(conf)
}
/** Output the RDD to any Hadoop-supported file system. */
def saveAsNewAPIHadoopFile[F <: NewOutputFormat[_, _]](
path: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[F]) {
rdd.saveAsNewAPIHadoopFile(path, keyClass, valueClass, outputFormatClass)
}
/**
* Output the RDD to any Hadoop-supported storage system, using a Hadoop JobConf object for
* that storage system. The JobConf should set an OutputFormat and any output paths required
* (e.g. a table name to write to) in the same way as it would be configured for a Hadoop
* MapReduce job.
*/
def saveAsHadoopDataset(conf: JobConf) {
rdd.saveAsHadoopDataset(conf)
}
/**
* Repartition the RDD according to the given partitioner and, within each resulting partition,
* sort records by their keys.
*
* This is more efficient than calling `repartition` and then sorting within each partition
* because it can push the sorting down into the shuffle machinery.
*/
def repartitionAndSortWithinPartitions(partitioner: Partitioner): JavaPairRDD[K, V] = {
val comp = com.google.common.collect.Ordering.natural().asInstanceOf[Comparator[K]]
repartitionAndSortWithinPartitions(partitioner, comp)
}
/**
* Repartition the RDD according to the given partitioner and, within each resulting partition,
* sort records by their keys.
*
* This is more efficient than calling `repartition` and then sorting within each partition
* because it can push the sorting down into the shuffle machinery.
*/
def repartitionAndSortWithinPartitions(partitioner: Partitioner, comp: Comparator[K])
: JavaPairRDD[K, V] = {
implicit val ordering = comp // Allow implicit conversion of Comparator to Ordering.
fromRDD(
new OrderedRDDFunctions[K, V, (K, V)](rdd).repartitionAndSortWithinPartitions(partitioner))
}
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements in
* ascending order. Calling `collect` or `save` on the resulting RDD will return or output an
* ordered list of records (in the `save` case, they will be written to multiple `part-X` files
* in the filesystem, in order of the keys).
*/
def sortByKey(): JavaPairRDD[K, V] = sortByKey(true)
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(ascending: Boolean): JavaPairRDD[K, V] = {
val comp = com.google.common.collect.Ordering.natural().asInstanceOf[Comparator[K]]
sortByKey(comp, ascending)
}
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(ascending: Boolean, numPartitions: Int): JavaPairRDD[K, V] = {
val comp = com.google.common.collect.Ordering.natural().asInstanceOf[Comparator[K]]
sortByKey(comp, ascending, numPartitions)
}
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(comp: Comparator[K]): JavaPairRDD[K, V] = sortByKey(comp, true)
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(comp: Comparator[K], ascending: Boolean): JavaPairRDD[K, V] = {
implicit val ordering = comp // Allow implicit conversion of Comparator to Ordering.
fromRDD(new OrderedRDDFunctions[K, V, (K, V)](rdd).sortByKey(ascending))
}
/**
* Sort the RDD by key, so that each partition contains a sorted range of the elements. Calling
* `collect` or `save` on the resulting RDD will return or output an ordered list of records
* (in the `save` case, they will be written to multiple `part-X` files in the filesystem, in
* order of the keys).
*/
def sortByKey(comp: Comparator[K], ascending: Boolean, numPartitions: Int): JavaPairRDD[K, V] = {
implicit val ordering = comp // Allow implicit conversion of Comparator to Ordering.
fromRDD(new OrderedRDDFunctions[K, V, (K, V)](rdd).sortByKey(ascending, numPartitions))
}
/**
* Return an RDD with the keys of each tuple.
*/
def keys(): JavaRDD[K] = JavaRDD.fromRDD[K](rdd.map(_._1))
/**
* Return an RDD with the values of each tuple.
*/
def values(): JavaRDD[V] = JavaRDD.fromRDD[V](rdd.map(_._2))
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
* @param partitioner partitioner of the resulting RDD.
*/
def countApproxDistinctByKey(relativeSD: Double, partitioner: Partitioner)
: JavaPairRDD[K, jl.Long] = {
fromRDD(rdd.countApproxDistinctByKey(relativeSD, partitioner)).
asInstanceOf[JavaPairRDD[K, jl.Long]]
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
* @param numPartitions number of partitions of the resulting RDD.
*/
def countApproxDistinctByKey(relativeSD: Double, numPartitions: Int): JavaPairRDD[K, jl.Long] = {
fromRDD(rdd.countApproxDistinctByKey(relativeSD, numPartitions)).
asInstanceOf[JavaPairRDD[K, jl.Long]]
}
/**
* Return approximate number of distinct values for each key in this RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
*/
def countApproxDistinctByKey(relativeSD: Double): JavaPairRDD[K, jl.Long] = {
fromRDD(rdd.countApproxDistinctByKey(relativeSD)).asInstanceOf[JavaPairRDD[K, jl.Long]]
}
/** Assign a name to this RDD */
def setName(name: String): JavaPairRDD[K, V] = {
rdd.setName(name)
this
}
}
object JavaPairRDD {
private[spark]
def groupByResultToJava[K: ClassTag, T](rdd: RDD[(K, Iterable[T])]): RDD[(K, JIterable[T])] = {
rddToPairRDDFunctions(rdd).mapValues(_.asJava)
}
private[spark]
def cogroupResultToJava[K: ClassTag, V, W](
rdd: RDD[(K, (Iterable[V], Iterable[W]))]): RDD[(K, (JIterable[V], JIterable[W]))] = {
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava))
}
private[spark]
def cogroupResult2ToJava[K: ClassTag, V, W1, W2](
rdd: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2]))])
: RDD[(K, (JIterable[V], JIterable[W1], JIterable[W2]))] = {
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava, x._3.asJava))
}
private[spark]
def cogroupResult3ToJava[K: ClassTag, V, W1, W2, W3](
rdd: RDD[(K, (Iterable[V], Iterable[W1], Iterable[W2], Iterable[W3]))])
: RDD[(K, (JIterable[V], JIterable[W1], JIterable[W2], JIterable[W3]))] = {
rddToPairRDDFunctions(rdd).mapValues(x => (x._1.asJava, x._2.asJava, x._3.asJava, x._4.asJava))
}
def fromRDD[K: ClassTag, V: ClassTag](rdd: RDD[(K, V)]): JavaPairRDD[K, V] = {
new JavaPairRDD[K, V](rdd)
}
implicit def toRDD[K, V](rdd: JavaPairRDD[K, V]): RDD[(K, V)] = rdd.rdd
private[spark]
implicit def toScalaFunction2[T1, T2, R](fun: JFunction2[T1, T2, R]): Function2[T1, T2, R] = {
(x: T1, x1: T2) => fun.call(x, x1)
}
private[spark] implicit def toScalaFunction[T, R](fun: JFunction[T, R]): T => R = x => fun.call(x)
private[spark]
implicit def pairFunToScalaFun[A, B, C](x: PairFunction[A, B, C]): A => (B, C) = y => x.call(y)
/** Convert a JavaRDD of key-value pairs to JavaPairRDD. */
def fromJavaRDD[K, V](rdd: JavaRDD[(K, V)]): JavaPairRDD[K, V] = {
implicit val ctagK: ClassTag[K] = fakeClassTag
implicit val ctagV: ClassTag[V] = fakeClassTag
new JavaPairRDD[K, V](rdd.rdd)
}
}
| ahnqirage/spark | core/src/main/scala/org/apache/spark/api/java/JavaPairRDD.scala | Scala | apache-2.0 | 46,844 |
package com.datastax.spark.connector.rdd.partitioner
import com.datastax.spark.connector.rdd.partitioner.dht.LongToken
/** Fast token range splitter assuming that data are spread out evenly in the whole range. */
private[partitioner] class Murmur3PartitionerTokenRangeSplitter
extends TokenRangeSplitter[Long, LongToken] {
private type TokenRange = com.datastax.spark.connector.rdd.partitioner.dht.TokenRange[Long, LongToken]
override def split(tokenRange: TokenRange, splitSize: Int): Seq[TokenRange] = {
val rangeSize = tokenRange.rangeSize
val splitPointsCount = if (rangeSize < splitSize) rangeSize.toInt else splitSize
val splitPoints = (0 until splitPointsCount).map({ i =>
new LongToken(tokenRange.start.value + (rangeSize * i / splitPointsCount).toLong)
}) :+ tokenRange.end
for (Seq(left, right) <- splitPoints.sliding(2).toSeq) yield
new TokenRange(left, right, tokenRange.replicas, tokenRange.tokenFactory)
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/partitioner/Murmur3PartitionerTokenRangeSplitter.scala | Scala | apache-2.0 | 967 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package kafka.tools
import kafka.consumer._
import joptsimple._
import kafka.api.{OffsetRequest, PartitionOffsetRequestInfo}
import kafka.common.TopicAndPartition
import kafka.client.ClientUtils
import kafka.utils.{CommandLineUtils, Exit, ToolsUtils}
object GetOffsetShell {
def main(args: Array[String]): Unit = {
val parser = new OptionParser(false)
val brokerListOpt = parser.accepts("broker-list", "REQUIRED: The list of hostname and port of the server to connect to.")
.withRequiredArg
.describedAs("hostname:port,...,hostname:port")
.ofType(classOf[String])
val topicOpt = parser.accepts("topic", "REQUIRED: The topic to get offset from.")
.withRequiredArg
.describedAs("topic")
.ofType(classOf[String])
val partitionOpt = parser.accepts("partitions", "comma separated list of partition ids. If not specified, it will find offsets for all partitions")
.withRequiredArg
.describedAs("partition ids")
.ofType(classOf[String])
.defaultsTo("")
val timeOpt = parser.accepts("time", "timestamp of the offsets before that")
.withRequiredArg
.describedAs("timestamp/-1(latest)/-2(earliest)")
.ofType(classOf[java.lang.Long])
.defaultsTo(-1)
val nOffsetsOpt = parser.accepts("offsets", "number of offsets returned")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val maxWaitMsOpt = parser.accepts("max-wait-ms", "The max amount of time each fetch request waits.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1000)
if(args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "An interactive shell for getting consumer offsets.")
val options = parser.parse(args : _*)
CommandLineUtils.checkRequiredArgs(parser, options, brokerListOpt, topicOpt)
val clientId = "GetOffsetShell"
val brokerList = options.valueOf(brokerListOpt)
ToolsUtils.validatePortOrDie(parser, brokerList)
val metadataTargetBrokers = ClientUtils.parseBrokerList(brokerList)
val topic = options.valueOf(topicOpt)
val partitionList = options.valueOf(partitionOpt)
val time = options.valueOf(timeOpt).longValue
val nOffsets = options.valueOf(nOffsetsOpt).intValue
val maxWaitMs = options.valueOf(maxWaitMsOpt).intValue()
val topicsMetadata = ClientUtils.fetchTopicMetadata(Set(topic), metadataTargetBrokers, clientId, maxWaitMs).topicsMetadata
if(topicsMetadata.size != 1 || !topicsMetadata.head.topic.equals(topic)) {
System.err.println(("Error: no valid topic metadata for topic: %s, " + " probably the topic does not exist, run ").format(topic) +
"kafka-list-topic.sh to verify")
Exit.exit(1)
}
val partitions =
if(partitionList == "") {
topicsMetadata.head.partitionsMetadata.map(_.partitionId)
} else {
partitionList.split(",").map(_.toInt).toSeq
}
partitions.foreach { partitionId =>
val partitionMetadataOpt = topicsMetadata.head.partitionsMetadata.find(_.partitionId == partitionId)
partitionMetadataOpt match {
case Some(metadata) =>
metadata.leader match {
case Some(leader) =>
val consumer = new SimpleConsumer(leader.host, leader.port, 10000, 100000, clientId)
val topicAndPartition = TopicAndPartition(topic, partitionId)
val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(time, nOffsets)))
val offsets = consumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets
println("%s:%d:%s".format(topic, partitionId, offsets.mkString(",")))
case None => System.err.println("Error: partition %d does not have a leader. Skip getting offsets".format(partitionId))
}
case None => System.err.println("Error: partition %d does not exist".format(partitionId))
}
}
}
}
| themarkypantz/kafka | core/src/main/scala/kafka/tools/GetOffsetShell.scala | Scala | apache-2.0 | 5,320 |
package modules
import com.mohiva.play.silhouette.api.util.PasswordInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.repositories.DelegableAuthInfoRepository
import models.daos.passwordinfo.PasswordInfoDao
import play.api.libs.concurrent.Execution.Implicits._
/**
* Provides a way of saving authorization info about users such as their email and their hashed password
* Created by Matthias Braun on 1/3/2016.
*/
trait AuthInfoServiceModule {
def passwordInfoDao: PasswordInfoDao
lazy val authInfoRepo = new DelegableAuthInfoRepository(passwordInfoDao)
}
| mb720/cvs | app/modules/AuthInfoServiceModule.scala | Scala | bsd-2-clause | 647 |
package org.jetbrains.plugins.scala.lang.psi.light
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScAnnotationsHolder
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScMember, ScObject}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScModifierListOwner, ScTypedDefinition}
import org.jetbrains.plugins.scala.lang.psi.light.PsiTypedDefinitionWrapper.DefinitionRole
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.api.{AnyRef, Unit}
/**
* User: Alefas
* Date: 18.02.12
*/
class PsiTypedDefinitionWrapper(val delegate: ScTypedDefinition, isStatic: Boolean, isInterface: Boolean,
role: DefinitionRole.DefinitionRole,
cClass: Option[PsiClass] = None) extends {
val containingClass: PsiClass = {
val result = cClass.getOrElse {
delegate.nameContext match {
case s: ScMember =>
val res = Option(s.containingClass).orElse(s.syntheticContainingClass).orNull
if (isStatic) {
res match {
case o: ScObject => o.fakeCompanionClassOrCompanionClass
case _ => res
}
} else res
case _ => null
}
}
if (result == null) {
val message = "Containing class is null: " + delegate.getContainingFile.getText + "\n" +
"typed Definition: " + delegate.getTextRange.getStartOffset
throw new RuntimeException(message)
}
result
}
val method: PsiMethod = {
val methodText = PsiTypedDefinitionWrapper.methodText(delegate, isStatic, isInterface, role)
LightUtil.createJavaMethod(methodText, containingClass, delegate.getProject)
}
} with PsiMethodWrapper(delegate.getManager, method, containingClass)
with NavigablePsiElementWrapper[ScTypedDefinition] {
override def hasModifierProperty(name: String): Boolean = {
name match {
case "abstract" if isInterface => true
case _ => super.hasModifierProperty(name)
}
}
override def getNameIdentifier: PsiIdentifier = delegate.getNameIdentifier
override def isWritable: Boolean = getContainingFile.isWritable
override def setName(name: String): PsiElement = {
if (role == DefinitionRole.SIMPLE_ROLE) delegate.setName(name)
else this
}
override protected def returnType: ScType = PsiTypedDefinitionWrapper.typeFor(delegate, role)
override protected def parameterListText: String = PsiTypedDefinitionWrapper.parameterListText(delegate, role, None)
}
object PsiTypedDefinitionWrapper {
object DefinitionRole extends Enumeration {
type DefinitionRole = Value
val SIMPLE_ROLE, GETTER, IS_GETTER, SETTER, EQ = Value
def isSetter(role: DefinitionRole): Boolean = role == SETTER || role == EQ
}
import org.jetbrains.plugins.scala.lang.psi.light.PsiTypedDefinitionWrapper.DefinitionRole._
def methodText(b: ScTypedDefinition, isStatic: Boolean, isInterface: Boolean, role: DefinitionRole): String = {
val builder = new StringBuilder
ScalaPsiUtil.nameContext(b) match {
case m: ScModifierListOwner =>
builder.append(JavaConversionUtil.annotationsAndModifiers(m, isStatic))
case _ =>
}
builder.append("java.lang.Object")
builder.append(" ")
val name = role match {
case SIMPLE_ROLE => b.getName
case GETTER => "get" + b.getName.capitalize
case IS_GETTER => "is" + b.getName.capitalize
case SETTER => "set" + b.getName.capitalize
case EQ => b.getName + "_$eq"
}
builder.append(name)
builder.append("()")
val holder = PsiTreeUtil.getContextOfType(b, classOf[ScAnnotationsHolder])
if (holder != null) {
builder.append(LightUtil.getThrowsSection(holder))
}
if (!isInterface)
builder.append(" {}")
else
builder.append(";")
builder.toString()
}
def processWrappersFor(t: ScTypedDefinition, cClass: Option[PsiClass], nodeName: String, isStatic: Boolean, isInterface: Boolean,
processMethod: PsiMethod => Unit, processName: String => Unit = _ => ()): Unit = {
if (nodeName == t.name) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = SIMPLE_ROLE, cClass))
processName(t.name)
if (t.isVar) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = EQ, cClass))
processName(t.name + "_eq")
}
}
t.nameContext match {
case s: ScAnnotationsHolder =>
val beanProperty = ScalaPsiUtil.isBeanProperty(s)
val booleanBeanProperty = ScalaPsiUtil.isBooleanBeanProperty(s)
if (beanProperty) {
if (nodeName == "get" + t.name.capitalize) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = GETTER, cClass))
processName("get" + t.getName.capitalize)
}
if (t.isVar && nodeName == "set" + t.name.capitalize) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = SETTER, cClass))
processName("set" + t.getName.capitalize)
}
} else if (booleanBeanProperty) {
if (nodeName == "is" + t.name.capitalize) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = IS_GETTER, cClass))
processName("is" + t.getName.capitalize)
}
if (t.isVar && nodeName == "set" + t.name.capitalize) {
processMethod(t.getTypedDefinitionWrapper(isStatic, isInterface, role = SETTER, cClass))
processName("set" + t.getName.capitalize)
}
}
case _ =>
}
}
private[light] def parameterListText(td: ScTypedDefinition, role: DefinitionRole, staticTrait: Option[PsiClassWrapper]): String = {
val thisParam = staticTrait.map { trt =>
val qualName = trt.getQualifiedName
qualName.stripSuffix("$class") + " This"
}
val params =
if (!DefinitionRole.isSetter(role)) Nil
else {
val paramType = typeFor(td, DefinitionRole.SIMPLE_ROLE)
val typeText = JavaConversionUtil.typeText(paramType)(td.elementScope)
val name = td.getName
Seq(s"$typeText $name")
}
(thisParam ++: params).mkString("(", ", ", ")")
}
def typeFor(typedDefinition: ScTypedDefinition, role: DefinitionRole): ScType = {
import typedDefinition.projectContext
if (role == SETTER || role == EQ) Unit
else typedDefinition.getType().getOrElse(AnyRef)
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/light/PsiTypedDefinitionWrapper.scala | Scala | apache-2.0 | 6,624 |
package play.api.cache.redis.impl
import scala.concurrent.{ExecutionContext, Future}
/**
* Invocation policy implements whether to wait for the operation result or not.
* This applies only in the limited number of operations. The best examples are `getOrElse`
* and `getOrFuture`. First, both methods invoke `get`, then, if missed, compute `orElse` clause.
* Finally, there is the invocation of `set`, however, in some scenarios, there is not required to
* wait for the result of `set` operation. The value can be returned earlier. This is the difference
* between `Eager` (not waiting) and `Lazy` (waiting) invocation policies.
*/
sealed trait InvocationPolicy {
def invoke[T](f: => Future[Any], thenReturn: T)(implicit context: ExecutionContext): Future[T]
}
object EagerInvocation extends InvocationPolicy {
def invoke[T](f: => Future[Any], thenReturn: T)(implicit context: ExecutionContext) = { f; Future successful thenReturn }
}
object LazyInvocation extends InvocationPolicy {
def invoke[T](f: => Future[Any], thenReturn: T)(implicit context: ExecutionContext) = f.map(_ => thenReturn)
}
| KarelCemus/play-redis | src/main/scala/play/api/cache/redis/impl/InvocationPolicy.scala | Scala | mpl-2.0 | 1,119 |
package com.twitter.finagle.util
import org.scalatest.FunSuite
import com.twitter.finagle.core.util.InetAddressUtil
import java.net.InetAddress
class InetAddressUtilTest extends FunSuite {
test("InetAddressUtil should isPrivateAddress") {
import InetAddressUtil.isPrivateAddress
assert(!isPrivateAddress(InetAddress.getByName("0.0.0.0")))
assert(!isPrivateAddress(InetAddress.getByName("199.59.148.13")))
assert(isPrivateAddress(InetAddress.getByName("10.0.0.0")))
assert(isPrivateAddress(InetAddress.getByName("10.255.255.255")))
assert(isPrivateAddress(InetAddress.getByName("172.16.0.0")))
assert(isPrivateAddress(InetAddress.getByName("172.31.255.255")))
assert(isPrivateAddress(InetAddress.getByName("192.168.0.0")))
assert(isPrivateAddress(InetAddress.getByName("192.168.255.255")))
}
test("InetAddressUtil should getByName") {
import InetAddressUtil.getByName
assert(getByName("69.55.236.117").getHostAddress == "69.55.236.117")
assert(getByName("0.0.0.0").getHostAddress == "0.0.0.0")
assert(getByName("255.0.0.0").getHostAddress == "255.0.0.0")
assert(getByName("0.255.0.0").getHostAddress == "0.255.0.0")
assert(getByName("0.0.255.0").getHostAddress == "0.0.255.0")
assert(getByName("0.0.0.255").getHostAddress == "0.0.0.255")
assert(getByName("255.255.255.255").getHostAddress == "255.255.255.255")
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/util/InetAddressUtilTest.scala | Scala | apache-2.0 | 1,394 |
package com.datamountaineer.streamreactor.connect.converters.source
import java.util
import org.apache.kafka.connect.data.Schema
import org.apache.kafka.connect.data.SchemaAndValue
import org.apache.kafka.connect.json.JsonConverter
/**
* A Json converter built with resilience, meaning that malformed Json messages are now ignored
*/
class JsonResilientConverter extends JsonConverter {
override def configure(configs: util.Map[String, _], isKey: Boolean) {
super.configure(configs, isKey)
}
override def fromConnectData(topic: String, schema: Schema, value: Object): Array[Byte] = {
try {
super.fromConnectData(topic, schema, value)
} catch {
case t: Throwable =>
t.printStackTrace()
// Ignore exceptions
null
}
}
override def toConnectData(topic: String, value: Array[Byte]): SchemaAndValue = {
try {
super.toConnectData(topic, value)
} catch {
case t: Throwable =>
t.printStackTrace()
// Ignore exceptions
SchemaAndValue.NULL
}
}
}
| datamountaineer/kafka-connect-common | src/main/scala/com/datamountaineer/streamreactor/connect/converters/source/JsonResilientConverter.scala | Scala | apache-2.0 | 1,058 |
package samples.scalaexchange.step2
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.server._
import akka.stream.ActorMaterializer
import samples.scalaexchange.utils.SampleApp
import scala.io.StdIn
object HttpServiceApp extends SampleApp
with HelloWorldService {
val myExceptionHandler = ExceptionHandler {
case ex: Exception =>
complete {
<html>
<body>
{ex.getMessage}
</body>
</html>
}
}
// our routes:
val route: Route = handleExceptions(myExceptionHandler) {
helloRoutes
}
// start the http server:
val bindingFuture = Http().bindAndHandle(route, "127.0.0.1", 8080)
} | ktoso/akka-scala-exchange | src/main/scala/samples/scalaexchange/step2/HttpServiceApp.scala | Scala | apache-2.0 | 700 |
package com.github.jarlakxen.reactive.ftp
import scala.concurrent._
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.scaladsl._
import akka.testkit._
import akka.util.ByteString
import org.junit.runner.RunWith
import org.specs2.concurrent.ExecutionEnv
import org.specs2.mutable.SpecificationLike
import org.specs2.runner.JUnitRunner
import org.specs2.specification.AfterAll
@RunWith(classOf[JUnitRunner])
class FtpSpecs(implicit ee: ExecutionEnv) extends TestKit(ActorSystem("FtpProtocolManagerSpec")) with DockerFTPSpec with ImplicitSender with SpecificationLike with AfterAll {
import FtpSpecs._
sequential
import system.dispatcher
implicit val materializer = ActorMaterializer()
override def afterAll(): Unit = {
super.afterAll()
TestKit.shutdownActorSystem(system)
}
"Ftp" should {
"list files by pattern" in {
val files = Ftp().filesFrom("localhost", ftpPort, "test2", "test", "/", "^.*\\\\.txt$".r).runWith(sinkRemoteFileNames)
files must be_==(List("file1.txt", "file2.txt")).awaitFor(5 seconds)
}
"download files by pattern" in {
val filesContent = Ftp().filesFrom("localhost", ftpPort, "test2", "test", "/", "^.*\\\\.txt$".r).runWith(sinkRemoteFileContents).flatMap(contents => Future.sequence(contents))
filesContent.map(_.map(_.utf8String)) must be_==(List("", "something")).awaitFor(5 seconds)
}
}
}
object FtpSpecs {
val sinkRemoteFileNames =
Flow[Ftp.RemoteFile]
.map(_.name)
.toMat(Sink.fold(List.empty[String])(_ :+ _))(Keep.right)
def sinkRemoteFileContents(implicit materializer: ActorMaterializer) =
Flow[Ftp.RemoteFile]
.map(_.stream.runFold(ByteString.empty)(_ ++ _))
.toMat(Sink.fold(List.empty[Future[ByteString]])(_ :+ _))(Keep.right)
} | Jarlakxen/reactive-ftp | src/test/scala/com/github/jarlakxen/reactive/ftp/FtpSpecs.scala | Scala | apache-2.0 | 1,844 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js tools **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013-2014, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.core.tools.javascript
import scala.annotation.tailrec
import scala.collection.mutable
import java.io._
import java.util.regex.Pattern
import java.net.{ URI, URISyntaxException }
import org.scalajs.core.ir.Position
import org.scalajs.core.tools.io._
/** An abstract builder taking IR or JSTrees */
trait JSTreeBuilder {
/** Add a JavaScript tree representing a statement.
* The tree must be a valid JavaScript tree (typically obtained by
* desugaring a full-fledged IR tree).
*/
def addJSTree(tree: Trees.Tree): Unit
/** Completes the builder. */
def complete(): Unit = ()
}
class JSFileBuilder(val name: String,
protected val outputWriter: Writer) extends JSTreeBuilder {
def addLine(line: String): Unit = {
outputWriter.write(line)
outputWriter.write('\\n')
}
def addLines(lines: Seq[String]): Unit =
lines.foreach(addLine)
def addFile(file: VirtualJSFile): Unit =
addPartsOfFile(file)(!_.startsWith("//# sourceMappingURL="))
def addPartsOfFile(file: VirtualJSFile)(selector: String => Boolean): Unit = {
for (line <- file.readLines() if selector(line))
addLine(line)
}
/** Add a JavaScript tree representing a statement.
* The tree must be a valid JavaScript tree (typically obtained by
* desugaring a full-fledged IR tree).
*/
def addJSTree(tree: Trees.Tree): Unit = {
val printer = new Printers.JSTreePrinter(outputWriter)
printer.printTopLevelTree(tree)
// Do not close the printer: we do not have ownership of the writers
}
/** Closes the underlying writer(s).
*/
def closeWriters(): Unit = {
outputWriter.close()
}
}
class JSFileBuilderWithSourceMapWriter(n: String, ow: Writer,
protected val sourceMapWriter: SourceMapWriter)
extends JSFileBuilder(n, ow) {
override def addLine(line: String): Unit = {
super.addLine(line)
sourceMapWriter.nextLine()
}
private final val NotSelected = -1
override def addPartsOfFile(file: VirtualJSFile)(
selector: String => Boolean): Unit = {
val br = new BufferedReader(file.reader)
try {
// Select lines, and remember offsets
val offsets = new mutable.ArrayBuffer[Int] // (maybe NotSelected)
val selectedLineLengths = new mutable.ArrayBuffer[Int]
var line: String = br.readLine()
var selectedCount = 0
while (line != null) {
if (selector(line)) {
super.addLine(line) // super call not to advance line in source map
offsets += selectedCount
selectedLineLengths += line.length
selectedCount += 1
} else {
offsets += NotSelected
}
line = br.readLine()
}
/* We ignore a potential source map.
* This happens typically for corejslib.js and other helper files
* written directly in JS.
* We generate a fake line-by-line source map for these on the fly
*/
val sourceFile = file.toURI
for (lineNumber <- 0 until offsets.size) {
val offset = offsets(lineNumber)
if (offset != NotSelected) {
val originalPos = Position(sourceFile, lineNumber, 0)
sourceMapWriter.startNode(0, originalPos, None)
sourceMapWriter.endNode(selectedLineLengths(offset))
sourceMapWriter.nextLine()
}
}
} finally {
br.close()
}
}
override def addJSTree(tree: Trees.Tree): Unit = {
val printer = new Printers.JSTreePrinterWithSourceMap(
outputWriter, sourceMapWriter)
printer.printTopLevelTree(tree)
// Do not close the printer: we do not have ownership of the writers
}
override def complete(): Unit = {
super.complete()
sourceMapWriter.complete()
}
}
class JSFileBuilderWithSourceMap(n: String, ow: Writer,
sourceMapOutputWriter: Writer,
relativizeSourceMapBasePath: Option[URI] = None)
extends JSFileBuilderWithSourceMapWriter(
n, ow,
new SourceMapWriter(sourceMapOutputWriter, n,
relativizeSourceMapBasePath)) {
override def complete(): Unit = {
addLine("//# sourceMappingURL=" + name + ".map")
super.complete()
}
override def closeWriters(): Unit = {
super.closeWriters()
sourceMapOutputWriter.close()
}
}
| mdedetrich/scala-js | tools/shared/src/main/scala/org/scalajs/core/tools/javascript/JSBuilders.scala | Scala | bsd-3-clause | 4,783 |
package scala.collection.mutable
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import org.junit.Assert._
import scala.tools.testkit.AssertUtil._
@RunWith(classOf[JUnit4])
class BitSetTest {
// Test for scala/bug#8910
@Test def capacityExpansionTest(): Unit = {
val bitSet = BitSet.empty
val size = bitSet.toBitMask.length
bitSet ^= bitSet
assert(bitSet.toBitMask.length == size, "Capacity of bitset changed after ^=")
bitSet |= bitSet
assert(bitSet.toBitMask.length == size, "Capacity of bitset changed after |=")
bitSet &= bitSet
assert(bitSet.toBitMask.length == size, "Capacity of bitset changed after &=")
bitSet &~= bitSet
assert(bitSet.toBitMask.length == size, "Capacity of bitset changed after &~=")
}
@Test def test_SI8917(): Unit = {
val bigBitSet = BitSet(1, 100, 10000)
val littleBitSet = BitSet(100)
bigBitSet &= littleBitSet
assert(!(bigBitSet contains 10000), "&= not applied to the full bitset")
littleBitSet &= bigBitSet
assert(littleBitSet.toBitMask.length < bigBitSet.toBitMask.length, "Needlessly extended the size of bitset on &=")
}
@Test def test_SI8647(): Unit = {
val bs = BitSet()
bs.map(_ + 1) // Just needs to compile
val xs = bs: SortedSet[Int]
xs.map(_ + 1) // Also should compile (did before)
}
@Test def t10164(): Unit = {
val bs = BitSet()
val last = (bs ++ (0 to 128)).last // Just needs not to throw
assert(last == 128)
}
@Test def t10399(): Unit = {
val bsFromEmptyBitMask = BitSet.fromBitMask(Array.empty[Long])
assert(bsFromEmptyBitMask.add(0))
val bsFromEmptyBitMaskNoCopy = BitSet.fromBitMaskNoCopy(Array.empty[Long])
assert(bsFromEmptyBitMaskNoCopy.add(0))
}
@Test def strawman_508: Unit = {
val m = BitSet(1)
assert(m.map(i => i.toLong).isInstanceOf[TreeSet[Long]])
assert(m.map(i => i + 1).isInstanceOf[BitSet])
val im = collection.immutable.BitSet(1)
assert(im.map(i=> i.toLong).isInstanceOf[collection.immutable.TreeSet[Long]])
assert(im.map(i=> i + 1).isInstanceOf[collection.immutable.BitSet])
// SI-10879
assert(m.flatMap(i => Seq(i.toLong)).isInstanceOf[TreeSet[Long]])
assert(m.flatMap(i => Seq(i + 1)).isInstanceOf[BitSet])
assert(im.flatMap(i => Seq(i.toLong)).isInstanceOf[collection.immutable.TreeSet[Long]])
assert(im.flatMap(i => Seq(i + 1)).isInstanceOf[collection.immutable.BitSet])
assert(m.collect { case i => i.toLong }.isInstanceOf[TreeSet[Long]])
assert(m.collect { case i => i + 1 }.isInstanceOf[BitSet])
assert(im.collect { case i => i.toLong }.isInstanceOf[collection.immutable.TreeSet[Long]])
assert(im.collect { case i => i + 1 }.isInstanceOf[collection.immutable.BitSet])
}
@Test def strawman_507: Unit = {
val m = BitSet(1,2,3)
assert(m.collect{case i if i%2 == 1 => i.toLong}.isInstanceOf[TreeSet[Long]])
assert(m.collect{case i if i%2 == 1 => i.toLong} == TreeSet(1L, 3L))
assert(m.collect{case i if i%2 == 1 => i}.isInstanceOf[BitSet])
assert(m.collect{case i if i%2 == 1 => i} == BitSet(1, 3))
val im = collection.immutable.BitSet(1,2,3)
assert(im.collect{case i if i%2 == 1 => i.toLong}.isInstanceOf[collection.immutable.TreeSet[Long]])
assert(im.collect{case i if i%2 == 1 => i.toLong} == collection.immutable.TreeSet(1L, 3L))
assert(im.collect{case i if i%2 == 1 => i}.isInstanceOf[collection.immutable.BitSet])
assert(im.collect{case i if i%2 == 1 => i} == collection.immutable.BitSet(1, 3))
}
@Test def concat(): Unit = {
val a = BitSet(1, 2, 3)
val b = BitSet(2, 4, 6)
assert(a.concat(b) == BitSet(1, 2, 3, 4, 6))
assert(a.union(b) == BitSet(1, 2, 3, 4, 6))
assert(a.concat(BitSet()) == BitSet(1, 2, 3))
assert(BitSet().concat(a) == BitSet(1, 2, 3))
assert(BitSet().concat(BitSet()) == BitSet())
}
@Test def intersect(): Unit = {
val a = BitSet(1, 2, 3)
val b = BitSet(2, 4, 6)
assert(a.intersect(b) == BitSet(2))
assert(a.intersect(BitSet(4, 6)) == BitSet())
assert(a.intersect(BitSet()) == BitSet())
assert(BitSet().intersect(a) == BitSet())
assert(BitSet().intersect(BitSet()) == BitSet())
}
@Test def diff(): Unit = {
val a = BitSet(1, 2, 3)
val b = BitSet(2, 4, 6)
assertEquals(BitSet(1, 3), a.diff(b))
assert(b.diff(a) == BitSet(4, 6))
assert(a.diff(BitSet(4, 6)) == BitSet(1, 2, 3))
assert(a.diff(BitSet()) == BitSet(1, 2, 3))
assert(BitSet().diff(a) == BitSet())
assert(BitSet().diff(BitSet()) == BitSet())
}
@Test def buildFromRange(): Unit = {
import scala.util.chaining._
assert((1 to 1000).to(BitSet) == BitSet().tap(bs => (1 to 1000).foreach(bs.addOne)))
}
}
| martijnhoekstra/scala | test/junit/scala/collection/mutable/BitSetTest.scala | Scala | apache-2.0 | 4,770 |
package org.positronicnet.sample.contacts
import org.positronicnet.ui._
import org.positronicnet.notifications.Actions._
import org.positronicnet.notifications.Future
import org.positronicnet.content.PositronicContentResolver
import android.util.Log
import android.os.Bundle
import android.content.{Context, Intent}
import android.view.{View, LayoutInflater}
class ViewContactActivity
extends AggregatedContactActivity( layoutResourceId = R.layout.view_contact )
{
onCreate {
findView( TR.edit_button ).onClick {
val rawContacts = contactState.rawContacts
val intent = new Intent( this, classOf[ EditContactActivity ])
intent.putExtra( "raw_contacts",
rawContacts.asInstanceOf[ java.io.Serializable ])
startActivity( intent )
// I *never* want to view again after a save here, so...
finish
}
findView( TR.join_split_button ).onClick {
toastLong( "not yet ...")
}
}
def bindContactState = {
val contactSlug = getIntent.getSerializableExtra( "contact" )
val contact = contactSlug.asInstanceOf[ Contact ]
ContactsActivityUiBinder.show( contact, findView( TR.contact_general ) )
findView( TR.contact_data_items ).bind( contactState.aggregatedData )
}
def syncContactState = () // no change...
}
| rst/positronic_net | sample/contacts_app/src/main/scala/ViewContactActivity.scala | Scala | bsd-3-clause | 1,324 |
import scala.tools.partest.ReplTest
object Test extends ReplTest {
override def extraSettings = s"-Yrepl-outdir ${testOutput.path}"
def code = """
case class Bippy(x: Int)
val x = Bippy(1)
$intp.reporter.withoutUnwrapping {
println($intp.showDirectory)
}
"""
}
| lrytz/scala | test/files/run/repl-out-dir.scala | Scala | apache-2.0 | 272 |
import javax.servlet.ServletContext
import com.example.www.servlet.TestServlet
import org.scalatra.LifeCycle
class ScalatraBootstrap extends LifeCycle {
override def init(context: ServletContext) {
context.mount(new TestServlet, "/*")
}
}
| shyknight786/demo-scalatra-angularapp | src/main/scala/ScalatraBootstrap.scala | Scala | apache-2.0 | 253 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.zk
import org.I0Itec.zkclient.ZkClient
import kafka.utils.{ZkUtils, CoreUtils}
import org.junit.{After, Before}
import org.scalatest.junit.JUnitSuite
trait ZooKeeperTestHarness extends JUnitSuite {
var zkPort: Int = -1
var zookeeper: EmbeddedZookeeper = null
var zkClient: ZkClient = null
val zkConnectionTimeout = 6000
val zkSessionTimeout = 6000
def zkConnect: String = "127.0.0.1:" + zkPort
@Before
def setUp() {
zookeeper = new EmbeddedZookeeper()
zkPort = zookeeper.port
zkClient = ZkUtils.createZkClient(zkConnect, zkSessionTimeout, zkConnectionTimeout)
}
@After
def tearDown() {
CoreUtils.swallow(zkClient.close())
CoreUtils.swallow(zookeeper.shutdown())
}
}
| usakey/kafka | core/src/test/scala/unit/kafka/zk/ZooKeeperTestHarness.scala | Scala | apache-2.0 | 1,534 |
package io.drakon.icarus
import cpw.mods.fml.common.Mod
import cpw.mods.fml.common.Mod.EventHandler
import cpw.mods.fml.common.event.{FMLPostInitializationEvent, FMLInitializationEvent, FMLPreInitializationEvent}
import io.drakon.icarus.jetty.JettyHandler
import io.drakon.icarus.lib.Config
import io.drakon.icarus.prometheus.PrometheusHandler
import org.apache.logging.log4j.LogManager
/**
* Icarus - Prometheus stat tracker for Minecraft Forge servers.
*
* @author Arkan <[email protected]>
*/
@Mod(modid = "icarus", name = "Icarus", version = "@VERSION@", acceptableRemoteVersions = "*", modLanguage = "scala")
object Icarus {
val log = LogManager.getLogger("Icarus")
@EventHandler
def preinit(evt:FMLPreInitializationEvent) {
log.info("Preinit.")
Config.loadConfig(evt.getSuggestedConfigurationFile)
PrometheusHandler.init()
JettyHandler.init()
}
@EventHandler
def init(evt:FMLInitializationEvent) {
log.info("Init.")
}
@EventHandler
def postinit(evt:FMLPostInitializationEvent) {
log.info("Postinit.")
}
}
| Emberwalker/Icarus | src/main/scala/io/drakon/icarus/Icarus.scala | Scala | mit | 1,064 |
/*
@meta {
"processorId": "org.helgoboss.scala_bundle:1.0.0",
"projectId": "org.helgoboss:drop-cms-groovy-request-processor:1.0",
"dependencies": [
"com.weiglewilczek.scala-lang-osgi:scala-library:2.9.1",
"javax.servlet:servlet-api:2.5",
"org.helgoboss:drop-cms-request-processor:1.0",
"org.helgoboss:drop-cms-meta-snippet-evaluator:1.0",
"org.helgoboss:drop-cms-meta-snippet-extractor:1.0",
"org.helgoboss:drop-cms-script-context:1.0",
"org.helgoboss:storage:1.0-SNAPSHOT",
"org.helgoboss:commons-scala-osgi:1.0-SNAPSHOT",
"org.codehaus.groovy:groovy:1.7.3"
],
"transformers": [
"org.helgoboss.my_oss:1.0.0"
]
}
*/
package org.helgoboss.drop_cms_groovy_request_processor
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
import org.helgoboss.drop_cms_request_processor.{ RequestProcessor, ProcessingContext }
import org.helgoboss.commons_scala_osgi.ConvenientBundleActivator
import reflect.BeanProperty
import groovy.lang.{ Binding, GroovyShell }
import org.helgoboss.meta_snippet._
import org.helgoboss.registry._
import org.helgoboss.drop_cms_meta_snippet_evaluator._
import org.helgoboss.drop_cms_script_context._
import org.helgoboss.storage._
import org.helgoboss.drop_cms_meta_snippet_extractor._
class Activator extends ConvenientBundleActivator {
whenBundleActive {
whenServicesPresent[StorageReader, MetaSnippetExtractor, Registry[MetaSnippetEvaluator]] {
new GroovyRequestProcessor(_, _, _).providesService[RequestProcessor, MetaSnippetEvaluator]
}
}
}
class GroovyRequestProcessor(
storageReader: StorageReader,
metaSnippetExtractor: MetaSnippetExtractor,
metaSnippetEvaluatorRegistry: Registry[MetaSnippetEvaluator]) extends RequestProcessor with MetaSnippetEvaluator {
val id = "groovy"
def process(c: ProcessingContext) {
// Content type by default text/html
c.response.setContentType("text/html")
val scriptContext = new SimpleRequestProcessingScriptContext(c, storageReader, metaSnippetExtractor, metaSnippetEvaluatorRegistry)
val result = evaluate(scriptContext)
c.response.getWriter.write(result)
}
def evaluate(metaSnippet: MetaSnippet) = {
val scriptContext = new SimpleMetaSnippetEvaluatingScriptContext(metaSnippet, storageReader, metaSnippetExtractor, metaSnippetEvaluatorRegistry)
evaluate(scriptContext)
}
private def evaluate(scriptContext: ScriptContext) = {
val binding = new Binding
binding.setVariable("context", scriptContext)
/* Init Groovy */
val groovyShell = new GroovyShell(binding)
/* Execute script */
groovyShell.evaluate(scriptContext.metaSnippet.content).toString
}
} | helgoboss/drop-cms-groovy-request-processor | org.helgoboss.drop-cms-groovy-request-processor.scala | Scala | mit | 2,734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.File
import java.nio.charset.StandardCharsets._
import com.google.common.io.Files
import org.apache.spark._
import org.apache.spark.util.Utils
class HistoryServerArgumentsSuite extends SparkFunSuite {
private val logDir = new File("src/test/resources/spark-events")
private val conf = new SparkConf()
.set("spark.history.fs.logDirectory", logDir.getAbsolutePath)
.set("spark.history.fs.updateInterval", "1")
.set("spark.testing", "true")
test("No Arguments Parsing") {
val argStrings = Array.empty[String]
val hsa = new HistoryServerArguments(conf, argStrings)
assert(conf.get("spark.history.fs.logDirectory") === logDir.getAbsolutePath)
assert(conf.get("spark.history.fs.updateInterval") === "1")
assert(conf.get("spark.testing") === "true")
}
test("Directory Arguments Parsing --dir or -d") {
val argStrings = Array("--dir", "src/test/resources/spark-events1")
val hsa = new HistoryServerArguments(conf, argStrings)
assert(conf.get("spark.history.fs.logDirectory") === "src/test/resources/spark-events1")
}
test("Directory Param can also be set directly") {
val argStrings = Array("src/test/resources/spark-events2")
val hsa = new HistoryServerArguments(conf, argStrings)
assert(conf.get("spark.history.fs.logDirectory") === "src/test/resources/spark-events2")
}
test("Properties File Arguments Parsing --properties-file") {
val tmpDir = Utils.createTempDir()
val outFile = File.createTempFile("test-load-spark-properties", "test", tmpDir)
try {
Files.write("spark.test.CustomPropertyA blah\\n" +
"spark.test.CustomPropertyB notblah\\n", outFile, UTF_8)
val argStrings = Array("--properties-file", outFile.getAbsolutePath)
val hsa = new HistoryServerArguments(conf, argStrings)
assert(conf.get("spark.test.CustomPropertyA") === "blah")
assert(conf.get("spark.test.CustomPropertyB") === "notblah")
} finally {
Utils.deleteRecursively(tmpDir)
}
}
}
| bravo-zhang/spark | core/src/test/scala/org/apache/spark/deploy/history/HistoryServerArgumentsSuite.scala | Scala | apache-2.0 | 2,853 |
package se.lu.nateko.cp.data.test.formats.delimitedheadercsv
import java.io.File
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl.{Sink, StreamConverters}
import org.scalatest.BeforeAndAfterAll
import org.scalatest.funsuite.AnyFunSuite
import se.lu.nateko.cp.data.formats._
import se.lu.nateko.cp.data.formats.bintable.BinTableSink
import se.lu.nateko.cp.data.formats.delimitedheadercsv.SitesDelimitedHeaderCsvStreams
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
class SitesDailyDelimitedHeaderCsvStreamsTests extends AnyFunSuite with BeforeAndAfterAll {
private implicit val system: ActorSystem = ActorSystem("sitesdailydelimitedheadercsvstreamstest")
import system.dispatcher
override def afterAll(): Unit = {
Await.ready(system.terminate(), 3.seconds)
}
private val nRows = 6
private val binTableSink = BinTableSink(
new File(getClass.getResource("/").getFile + "/sites_daily_delimiter.cpb"),
overwrite = true
)
private val formats = ColumnsMetaWithTsCol(
new ColumnsMeta(Seq(
PlainColumn(Iso8601Date, "TIMESTAMP", isOptional = false),
PlainColumn(FloatValue, "SR_IN", isOptional = false),
PlainColumn(FloatValue, "PPFD", isOptional = false),
PlainColumn(FloatValue, "TA", isOptional = true)
)),
"TEMP_UTC_TIMESTAMP_FOR_EXTRACTING_DATES"
)
private val rowsSource = StreamConverters
.fromInputStream(() => getClass.getResourceAsStream("/sites_daily_delimiter.csv"))
.via(TimeSeriesStreams.linesFromUtf8Binary)
.via(new SitesDelimitedHeaderCsvStreams(formats.colsMeta).standardCsvParser(nRows, formats))
test("Parsing a SITES time series with delimited header example") {
val rowsFut = rowsSource.runWith(Sink.seq)
val rows = Await.result(rowsFut, 1.second)
assert(rows.size === nRows)
}
test("Timestamp column is injected into the table") {
val rowFut = rowsSource
.runWith(Sink.head[TableRow])
val row = Await.result(rowFut, 1.second)
assert(row.header.columnNames.contains(formats.timeStampColumn))
assert(row.cells.contains("2014-12-31T23:00:00Z"))
}
test("Parsing a SITES time series with delimited header example and streaming to bintable") {
val converter = new TimeSeriesToBinTableConverter(formats.colsMeta)
val graph = rowsSource
.wireTapMat(Sink.head[TableRow])(_ zip _)
.map(converter.parseRow)
.toMat(binTableSink)(_ zip _)
val ((readResult, firstRow), nRowsWritten) = Await.result(graph.run(), 1.second)
assert(readResult.count === 1135)
assert(firstRow.header.nRows === nRows)
assert(nRowsWritten === nRows)
assert(formats.colsMeta.plainCols.keySet.diff(firstRow.header.columnNames.toSet) ===
Set())
assert(formats.colsMeta.findMissingColumns(firstRow.header.columnNames.toSeq).toSet === Set())
}
}
| ICOS-Carbon-Portal/data | src/test/scala/se/lu/nateko/cp/data/test/formats/delimitedheadercsv/SitesDailyDelimitedHeaderCsvStreamsTests.scala | Scala | gpl-3.0 | 2,795 |
package messages
import messages.parser.MessageConstants._
import messages.parser._
import tests.TestWordSpec
class MessageParserSpec extends TestWordSpec {
val parser = new MessageParser()
"Parser" should {
"parse simple send with text" in {
parser.parse("send to \\"user\\" message \\"sample-text\\"").toOption should equal(
Some(Send(Seq(Content(Entity("sample-text"), MessageOperator)), To(Seq(Entity("user"))))))
}
"fail if message does not contains subject" in {
parser.parse("send message \\"sample-text\\"").toOption should equal(None)
}
"parse send request with multiple users and text" in {
parser.parse("send to \\"user-1\\", \\"user-2\\", \\"user-3\\" message \\"sample-text\\"").toOption should equal(
Some(Send(Seq(Content(Entity("sample-text"), MessageOperator)),
To(Seq(Entity("user-1"), Entity("user-2"), Entity("user-3"))))))
}
"parse send request with multiple users and content" in {
parser
.parse(
"send to \\"user-1\\", \\"user-2\\", \\"user-3\\" " +
"message \\"sample-text-1\\", message \\"sample-text-2\\", " +
"attachment \\"../some-path/some-file\\"")
.toOption should equal(
Some(Send(
Seq(
Content(Entity("sample-text-1"), MessageOperator),
Content(Entity("sample-text-2"), MessageOperator),
Content(Entity("../some-path/some-file"), AttachmentOperator)
),
To(Seq(Entity("user-1"), Entity("user-2"), Entity("user-3")))
)))
}
}
}
| lymr/fun-chat | fun-chat-server/src/test/scala/messages/MessageParserSpec.scala | Scala | mit | 1,569 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.io.IOException
import java.util.Locale
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning._
import org.apache.spark.sql.catalyst.plans.logical.{InsertIntoDir, InsertIntoTable, LogicalPlan,
ScriptTransformation}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.command.{CreateTableCommand, DDLUtils}
import org.apache.spark.sql.execution.datasources.{CreateTable, LogicalRelation}
import org.apache.spark.sql.execution.datasources.parquet.{ParquetFileFormat, ParquetOptions}
import org.apache.spark.sql.hive.execution._
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
/**
* Determine the database, serde/format and schema of the Hive serde table, according to the storage
* properties.
*/
class ResolveHiveSerdeTable(session: SparkSession) extends Rule[LogicalPlan] {
private def determineHiveSerde(table: CatalogTable): CatalogTable = {
if (table.storage.serde.nonEmpty) {
table
} else {
if (table.bucketSpec.isDefined) {
throw new AnalysisException("Creating bucketed Hive serde table is not supported yet.")
}
val defaultStorage = HiveSerDe.getDefaultStorage(session.sessionState.conf)
val options = new HiveOptions(table.storage.properties)
val fileStorage = if (options.fileFormat.isDefined) {
HiveSerDe.sourceToSerDe(options.fileFormat.get) match {
case Some(s) =>
CatalogStorageFormat.empty.copy(
inputFormat = s.inputFormat,
outputFormat = s.outputFormat,
serde = s.serde)
case None =>
throw new IllegalArgumentException(s"invalid fileFormat: '${options.fileFormat.get}'")
}
} else if (options.hasInputOutputFormat) {
CatalogStorageFormat.empty.copy(
inputFormat = options.inputFormat,
outputFormat = options.outputFormat)
} else {
CatalogStorageFormat.empty
}
val rowStorage = if (options.serde.isDefined) {
CatalogStorageFormat.empty.copy(serde = options.serde)
} else {
CatalogStorageFormat.empty
}
val storage = table.storage.copy(
inputFormat = fileStorage.inputFormat.orElse(defaultStorage.inputFormat),
outputFormat = fileStorage.outputFormat.orElse(defaultStorage.outputFormat),
serde = rowStorage.serde.orElse(fileStorage.serde).orElse(defaultStorage.serde),
properties = options.serdeProperties)
table.copy(storage = storage)
}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case c @ CreateTable(t, _, query) if DDLUtils.isHiveTable(t) =>
// Finds the database name if the name does not exist.
val dbName = t.identifier.database.getOrElse(session.catalog.currentDatabase)
val table = t.copy(identifier = t.identifier.copy(database = Some(dbName)))
// Determines the serde/format of Hive tables
val withStorage = determineHiveSerde(table)
// Infers the schema, if empty, because the schema could be determined by Hive
// serde.
val withSchema = if (query.isEmpty) {
val inferred = HiveUtils.inferSchema(withStorage)
if (inferred.schema.length <= 0) {
throw new AnalysisException("Unable to infer the schema. " +
s"The schema specification is required to create the table ${inferred.identifier}.")
}
inferred
} else {
withStorage
}
c.copy(tableDesc = withSchema)
}
}
class DetermineTableStats(session: SparkSession) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case relation: HiveTableRelation
if DDLUtils.isHiveTable(relation.tableMeta) && relation.tableMeta.stats.isEmpty =>
val table = relation.tableMeta
val sizeInBytes = if (session.sessionState.conf.fallBackToHdfsForStatsEnabled) {
try {
val hadoopConf = session.sessionState.newHadoopConf()
val tablePath = new Path(table.location)
val fs: FileSystem = tablePath.getFileSystem(hadoopConf)
fs.getContentSummary(tablePath).getLength
} catch {
case e: IOException =>
logWarning("Failed to get table size from hdfs.", e)
session.sessionState.conf.defaultSizeInBytes
}
} else {
session.sessionState.conf.defaultSizeInBytes
}
val withStats = table.copy(stats = Some(CatalogStatistics(sizeInBytes = BigInt(sizeInBytes))))
relation.copy(tableMeta = withStats)
}
}
/**
* Replaces generic operations with specific variants that are designed to work with Hive.
*
* Note that, this rule must be run after `PreprocessTableCreation` and
* `PreprocessTableInsertion`.
*/
object HiveAnalysis extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case InsertIntoTable(r: HiveTableRelation, partSpec, query, overwrite, ifPartitionNotExists)
if DDLUtils.isHiveTable(r.tableMeta) =>
InsertIntoHiveTable(r.tableMeta, partSpec, query, overwrite,
ifPartitionNotExists, query.output)
case CreateTable(tableDesc, mode, None) if DDLUtils.isHiveTable(tableDesc) =>
DDLUtils.checkDataColNames(tableDesc)
CreateTableCommand(tableDesc, ignoreIfExists = mode == SaveMode.Ignore)
case CreateTable(tableDesc, mode, Some(query)) if DDLUtils.isHiveTable(tableDesc) =>
DDLUtils.checkDataColNames(tableDesc)
CreateHiveTableAsSelectCommand(tableDesc, query, query.output, mode)
case InsertIntoDir(isLocal, storage, provider, child, overwrite)
if DDLUtils.isHiveTable(provider) =>
val outputPath = new Path(storage.locationUri.get)
if (overwrite) DDLUtils.verifyNotReadPath(child, outputPath)
InsertIntoHiveDirCommand(isLocal, storage, child, overwrite, child.output)
}
}
/**
* Relation conversion from metastore relations to data source relations for better performance
*
* - When writing to non-partitioned Hive-serde Parquet/Orc tables
* - When scanning Hive-serde Parquet/ORC tables
*
* This rule must be run before all other DDL post-hoc resolution rules, i.e.
* `PreprocessTableCreation`, `PreprocessTableInsertion`, `DataSourceAnalysis` and `HiveAnalysis`.
*/
case class RelationConversions(
conf: SQLConf,
sessionCatalog: HiveSessionCatalog) extends Rule[LogicalPlan] {
private def isConvertible(relation: HiveTableRelation): Boolean = {
val serde = relation.tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
serde.contains("parquet") && conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET) ||
serde.contains("orc") && conf.getConf(HiveUtils.CONVERT_METASTORE_ORC)
}
private def convert(relation: HiveTableRelation): LogicalRelation = {
val serde = relation.tableMeta.storage.serde.getOrElse("").toLowerCase(Locale.ROOT)
if (serde.contains("parquet")) {
val options = relation.tableMeta.storage.properties + (ParquetOptions.MERGE_SCHEMA ->
conf.getConf(HiveUtils.CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING).toString)
sessionCatalog.metastoreCatalog
.convertToLogicalRelation(relation, options, classOf[ParquetFileFormat], "parquet")
} else {
val options = relation.tableMeta.storage.properties
if (conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native") {
sessionCatalog.metastoreCatalog.convertToLogicalRelation(
relation,
options,
classOf[org.apache.spark.sql.execution.datasources.orc.OrcFileFormat],
"orc")
} else {
sessionCatalog.metastoreCatalog.convertToLogicalRelation(
relation,
options,
classOf[org.apache.spark.sql.hive.orc.OrcFileFormat],
"orc")
}
}
}
override def apply(plan: LogicalPlan): LogicalPlan = {
plan transformUp {
// Write path
case InsertIntoTable(r: HiveTableRelation, partition, query, overwrite, ifPartitionNotExists)
// Inserting into partitioned table is not supported in Parquet/Orc data source (yet).
if query.resolved && DDLUtils.isHiveTable(r.tableMeta) &&
!r.isPartitioned && isConvertible(r) =>
InsertIntoTable(convert(r), partition, query, overwrite, ifPartitionNotExists)
// Read path
case relation: HiveTableRelation
if DDLUtils.isHiveTable(relation.tableMeta) && isConvertible(relation) =>
convert(relation)
}
}
}
private[hive] trait HiveStrategies {
// Possibly being too clever with types here... or not clever enough.
self: SparkPlanner =>
val sparkSession: SparkSession
object Scripts extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ScriptTransformation(input, script, output, child, ioschema) =>
val hiveIoSchema = HiveScriptIOSchema(ioschema)
ScriptTransformationExec(input, script, output, planLater(child), hiveIoSchema) :: Nil
case _ => Nil
}
}
/**
* Retrieves data using a HiveTableScan. Partition pruning predicates are also detected and
* applied.
*/
object HiveTableScans extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case PhysicalOperation(projectList, predicates, relation: HiveTableRelation) =>
// Filter out all predicates that only deal with partition keys, these are given to the
// hive table scan operator to be used for partition pruning.
val partitionKeyIds = AttributeSet(relation.partitionCols)
val (pruningPredicates, otherPredicates) = predicates.partition { predicate =>
!predicate.references.isEmpty &&
predicate.references.subsetOf(partitionKeyIds)
}
pruneFilterProject(
projectList,
otherPredicates,
identity[Seq[Expression]],
HiveTableScanExec(_, relation, pruningPredicates)(sparkSession)) :: Nil
case _ =>
Nil
}
}
}
| brad-kaiser/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveStrategies.scala | Scala | apache-2.0 | 11,059 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
/**
* Converts Java-object-based rows into [[UnsafeRow]]s.
*/
case class ConvertToUnsafe(child: SparkPlan) extends UnaryNode {
require(UnsafeProjection.canSupport(child.schema), s"Cannot convert ${child.schema} to Unsafe")
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def outputsUnsafeRows: Boolean = true
override def canProcessUnsafeRows: Boolean = false
override def canProcessSafeRows: Boolean = true
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitions { iter =>
val convertToUnsafe = UnsafeProjection.create(child.schema)
iter.map(convertToUnsafe)
}
}
}
/**
* Converts [[UnsafeRow]]s back into Java-object-based rows.
*/
case class ConvertToSafe(child: SparkPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def outputsUnsafeRows: Boolean = false
override def canProcessUnsafeRows: Boolean = true
override def canProcessSafeRows: Boolean = false
override protected def doExecute(): RDD[InternalRow] = {
child.execute().mapPartitions { iter =>
val convertToSafe = FromUnsafeProjection(child.output.map(_.dataType))
iter.map(convertToSafe)
}
}
}
private[sql] object EnsureRowFormats extends Rule[SparkPlan] {
private def onlyHandlesSafeRows(operator: SparkPlan): Boolean =
operator.canProcessSafeRows && !operator.canProcessUnsafeRows
private def onlyHandlesUnsafeRows(operator: SparkPlan): Boolean =
operator.canProcessUnsafeRows && !operator.canProcessSafeRows
private def handlesBothSafeAndUnsafeRows(operator: SparkPlan): Boolean =
operator.canProcessSafeRows && operator.canProcessUnsafeRows
override def apply(operator: SparkPlan): SparkPlan = operator.transformUp {
case operator: SparkPlan if onlyHandlesSafeRows(operator) =>
if (operator.children.exists(_.outputsUnsafeRows)) {
operator.withNewChildren {
operator.children.map {
c => if (c.outputsUnsafeRows) ConvertToSafe(c) else c
}
}
} else {
operator
}
case operator: SparkPlan if onlyHandlesUnsafeRows(operator) =>
if (operator.children.exists(!_.outputsUnsafeRows)) {
operator.withNewChildren {
operator.children.map {
c => if (!c.outputsUnsafeRows) ConvertToUnsafe(c) else c
}
}
} else {
operator
}
case operator: SparkPlan if handlesBothSafeAndUnsafeRows(operator) =>
if (operator.children.map(_.outputsUnsafeRows).toSet.size != 1) {
// If this operator's children produce both unsafe and safe rows,
// convert everything unsafe rows if all the schema of them are support by UnsafeRow
if (operator.children.forall(c => UnsafeProjection.canSupport(c.schema))) {
operator.withNewChildren {
operator.children.map {
c => if (!c.outputsUnsafeRows) ConvertToUnsafe(c) else c
}
}
} else {
operator.withNewChildren {
operator.children.map {
c => if (c.outputsUnsafeRows) ConvertToSafe(c) else c
}
}
}
} else {
operator
}
}
}
| pronix/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/rowFormatConverters.scala | Scala | apache-2.0 | 4,591 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.serving.core.models
object StreamingContextStatusEnum extends Enumeration {
type Status = Value
val GettingPolicyFromZookeeper, Initializing, Initialized, Error, ConfigurationError, Stopped, Removed = Value
}
| danielcsant/sparta | serving-core/src/main/scala/com/stratio/sparta/serving/core/models/StreamingContextStatusEnum.scala | Scala | apache-2.0 | 861 |
/*
* SVMAdapter.scala is part of grado_informatica_tfg_naturallanguageprocessing (grado_informatica_TFG_NaturalLanguageProcessing).
*
* grado_informatica_TFG_NaturalLanguageProcessing is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* grado_informatica_TFG_NaturalLanguageProcessing is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with grado_informatica_TFG_NaturalLanguageProcessing. If not, see <http://www.gnu.org/licenses/>.
*/
package com.elbauldelprogramador.nlp.svm
import com.elbauldelprogramador.nlp.svm.SVMTypes.DblArray
import libsvm._
import scala.collection.mutable.ArrayBuffer
/**
* Partially borrowed from https://github.com/prnicolas/ScalaMl/blob/master/src/main/scala/org/scalaml/libraries/libsvm/SVMAdapter.scala
*
*/
object SVMAdapter {
type SVMNodes = Array[Array[svm_node]]
def createNode(features: Vector[Int]): Array[svm_node] = {
// Create a new row for SVM, with the format x -> [ ] -> (2,0.1) (3,0.2) (-1,?)
// Where each tuple correspont with the feature number and its value,
val newNode = new Array[svm_node](features.size)
features.zipWithIndex.foreach { case (f, i) =>
val node = new svm_node
node.index = f
node.value = 1.0
newNode(i) = node
}
newNode
}
def trainSVM(problem: SVMProblem, param: svm_parameter): svm_model =
svm.svm_train(problem.problem, param)
def predictSVM(model: svm_model, x: Vector[Int]): Double =
svm.svm_predict(model, toNodes(x))
private def toNodes(x: Vector[Int]): Array[svm_node] =
x.view.zipWithIndex./:(new ArrayBuffer[svm_node])((xs, f) => {
val node = new svm_node
node.index = f._1
node.value = 1.0
xs.append(node)
xs
}).toArray
class SVMProblem(numObs: Int, labels: DblArray) {
val problem = new svm_problem
// Size of the problem (How many rows of data we have
problem.l = numObs
// Values of each class for the rows of data
problem.y = labels
// feature fectors, will be in sparse form, size lxNFeatures (But sparse)
problem.x = new SVMNodes(numObs)
def update(n: Int, node: Array[svm_node]): Unit = {
problem.x(n) = node
}
}
}
| algui91/NLP_Dependency_Parsing | src/main/scala/com/elbauldelprogramador/nlp/svm/SVMAdapter.scala | Scala | gpl-3.0 | 2,669 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009 Mark Harrah
*/
package xsbt
import xsbti.Logger
import scala.tools.nsc.{GenericRunnerCommand, Interpreter, InterpreterLoop, ObjectRunner, Settings}
import scala.tools.nsc.interpreter.InteractiveReader
import scala.tools.nsc.reporters.Reporter
import scala.tools.nsc.util.ClassPath
class ConsoleInterface
{
def run(args: Array[String], bootClasspathString: String, classpathString: String, initialCommands: String, cleanupCommands: String, loader: ClassLoader, bindNames: Array[String], bindValues: Array[Any], log: Logger)
{
val options = args.toList
lazy val interpreterSettings = MakeSettings.sync(options, log)
val compilerSettings = MakeSettings.sync(options, log)
if(!bootClasspathString.isEmpty)
compilerSettings.bootclasspath.value = bootClasspathString
compilerSettings.classpath.value = classpathString
log.info(Message("Starting scala interpreter..."))
log.debug(Message(" Boot classpath: " + compilerSettings.bootclasspath.value))
log.debug(Message(" Classpath: " + compilerSettings.classpath.value))
log.info(Message(""))
val loop = new InterpreterLoop {
override def createInterpreter() = {
if(loader ne null)
{
in = InteractiveReader.createDefault()
interpreter = new Interpreter(settings)
{
override protected def parentClassLoader = if(loader eq null) super.parentClassLoader else loader
override protected def newCompiler(settings: Settings, reporter: Reporter) = super.newCompiler(compilerSettings, reporter)
}
interpreter.setContextClassLoader()
}
else
super.createInterpreter()
for( (id, value) <- bindNames zip bindValues)
interpreter.beQuietDuring(interpreter.bind(id, value.asInstanceOf[AnyRef].getClass.getName, value))
if(!initialCommands.isEmpty)
interpreter.interpret(initialCommands)
}
override def closeInterpreter()
{
if(!cleanupCommands.isEmpty)
interpreter.interpret(cleanupCommands)
super.closeInterpreter()
}
}
loop.main(if(loader eq null) compilerSettings else interpreterSettings)
}
}
object MakeSettings
{
def apply(args: List[String], log: Logger) =
{
val command = new GenericRunnerCommand(args, message => log.error(Message(message)))
if(command.ok)
command.settings
else
throw new InterfaceCompileFailed(Array(), Array(), command.usageMsg)
}
def sync(options: List[String], log: Logger) =
{
val settings = apply(options, log)
// -Yrepl-sync is only in 2.9.1+
final class Compat {
def Yreplsync = settings.BooleanSetting("-Yrepl-sync", "For compatibility only.")
}
implicit def compat(s: Settings): Compat = new Compat
settings.Yreplsync.value = true
settings
}
}
| olove/xsbt | compile/interface/src/main/scala/xsbt/ConsoleInterface.scala | Scala | bsd-3-clause | 2,753 |
package pl.touk.nussknacker.engine.requestresponse
import cats.Monad
import cats.data.Validated.{Invalid, Valid}
import cats.data.{NonEmptyList, Validated, ValidatedNel, WriterT}
import cats.implicits.toFunctorOps
import io.circe.Json
import io.circe.syntax._
import pl.touk.nussknacker.engine.Interpreter.InterpreterShape
import pl.touk.nussknacker.engine.ModelData
import pl.touk.nussknacker.engine.api._
import pl.touk.nussknacker.engine.api.context.ProcessCompilationError
import pl.touk.nussknacker.engine.api.process.ComponentUseCase
import pl.touk.nussknacker.engine.api.typed.typing
import pl.touk.nussknacker.engine.graph.EspProcess
import pl.touk.nussknacker.engine.lite.ScenarioInterpreterFactory.ScenarioInterpreterWithLifecycle
import pl.touk.nussknacker.engine.lite.TestRunner.EffectUnwrapper
import pl.touk.nussknacker.engine.lite.api.commonTypes.ErrorType
import pl.touk.nussknacker.engine.lite.api.customComponentTypes.CapabilityTransformer
import pl.touk.nussknacker.engine.lite.api.interpreterTypes.{EndResult, ScenarioInputBatch}
import pl.touk.nussknacker.engine.lite.api.runtimecontext.{LiteEngineRuntimeContext, LiteEngineRuntimeContextPreparer}
import pl.touk.nussknacker.engine.lite.{InterpreterTestRunner, ScenarioInterpreterFactory, TestRunner}
import pl.touk.nussknacker.engine.requestresponse.api.RequestResponseSource
import pl.touk.nussknacker.engine.requestresponse.openapi.RequestResponseOpenApiGenerator
import pl.touk.nussknacker.engine.requestresponse.openapi.RequestResponseOpenApiGenerator.OutputSchemaProperty
import pl.touk.nussknacker.engine.resultcollector.ResultCollector
import scala.concurrent.ExecutionContext
import scala.language.higherKinds
/*
This is request-response-specific part of engine:
- Future as effects
- only one source, simple one input variable
- if there is one error we fail whole computation
- handling OpenAPI definition
*/
object RequestResponseInterpreter {
type RequestResponseResultType[T] = ValidatedNel[ErrorType, T]
def apply[Effect[_]:Monad:InterpreterShape:CapabilityTransformer](process: EspProcess, processVersion: ProcessVersion, context: LiteEngineRuntimeContextPreparer, modelData: ModelData,
additionalListeners: List[ProcessListener], resultCollector: ResultCollector, componentUseCase: ComponentUseCase)
(implicit ec: ExecutionContext):
Validated[NonEmptyList[ProcessCompilationError], RequestResponseScenarioInterpreter[Effect]] = {
ScenarioInterpreterFactory.createInterpreter[Effect, Any, AnyRef](process, modelData, additionalListeners, resultCollector, componentUseCase)
.map(new RequestResponseScenarioInterpreter(context.prepare(JobData(process.metaData, processVersion)), _))
}
// TODO: Some smarter type in Input than Context?
class RequestResponseScenarioInterpreter[Effect[_]:Monad](val context: LiteEngineRuntimeContext,
statelessScenarioInterpreter: ScenarioInterpreterWithLifecycle[Effect, Any, AnyRef])
(implicit ec: ExecutionContext) extends AutoCloseable {
val id: String = context.jobData.metaData.id
val sinkTypes: Map[NodeId, typing.TypingResult] = statelessScenarioInterpreter.sinkTypes
val (sourceId, source) = statelessScenarioInterpreter.sources.toList match {
case Nil => throw new IllegalArgumentException("No source found")
case (sourceId, source) :: Nil => (sourceId, source.asInstanceOf[RequestResponseSource[Any]])
case more => throw new IllegalArgumentException(s"More than one source for request-response: ${more.map(_._1)}")
}
private def invoke(input: Any): Effect[ValidatedNel[ErrorType, List[EndResult[AnyRef]]]] = {
val inputBatch = ScenarioInputBatch((sourceId -> input) :: Nil)
statelessScenarioInterpreter.invoke(inputBatch).map { case WriterT((errors, results)) =>
NonEmptyList.fromList(errors).map(Invalid(_)).getOrElse(Valid(results))
}
}
def invokeToOutput(input: Any): Effect[ValidatedNel[ErrorType, List[Any]]] = {
invoke(input).map(_.map(_.map(_.result)))
}
def open(): Unit = statelessScenarioInterpreter.open(context)
def close(): Unit = {
statelessScenarioInterpreter.close()
context.close()
}
/*
* TODO : move inputSchema and outputSchema to one place
* It is better to have both schemas in one place (properties or some new/custom place)
* */
def getSchemaOutputProperty: Json = {
context.jobData.metaData.additionalFields.flatMap(_.properties.get(OutputSchemaProperty)) match {
case None => Map("type" -> "object".asJson, "properties" -> Json.Null).asJson
case Some(outputSchemaStr) => CirceUtil.decodeJsonUnsafe[Json](outputSchemaStr, "Provided json is not valid")
}
}
def generateOpenApiDefinition(): Option[Json] = {
for {
sourceDefinition <- source.openApiDefinition
responseDefinition = getSchemaOutputProperty
} yield {
RequestResponseOpenApiGenerator.generateScenarioDefinition(
id,
sourceDefinition.definition,
responseDefinition,
sourceDefinition.description,
sourceDefinition.tags
)
}
}
}
def testRunner[Effect[_]:InterpreterShape:CapabilityTransformer:EffectUnwrapper]: TestRunner = new InterpreterTestRunner[Effect, Context, AnyRef]
}
| TouK/nussknacker | engine/lite/request-response/runtime/src/main/scala/pl/touk/nussknacker/engine/requestresponse/RequestResponseInterpreter.scala | Scala | apache-2.0 | 5,404 |
package lila.socket
import akka.actor.ActorRef
import akka.pattern.{ ask, pipe }
import play.api.libs.iteratee.{ Iteratee, Enumerator }
import play.api.libs.json._
import actorApi._
import lila.common.PimpedJson._
import lila.hub.actorApi.relation.ReloadOnlineFriends
import makeTimeout.large
object Handler {
type Controller = PartialFunction[(String, JsObject), Unit]
type Connecter = PartialFunction[Any, (Controller, JsEnumerator, SocketMember)]
def apply(
hub: lila.hub.Env,
socket: ActorRef,
uid: String,
join: Any,
userId: Option[String])(connecter: Connecter): Fu[JsSocketHandler] = {
def baseController(member: SocketMember): Controller = {
case ("p", _) => socket ! Ping(uid)
case ("following_onlines", _) => userId foreach { u =>
hub.actor.relation ! ReloadOnlineFriends(u)
}
case ("anaMove", o) =>
AnaMove parse o foreach { anaMove =>
anaMove.step match {
case scalaz.Success(step) =>
member push lila.socket.Socket.makeMessage("step", Json.obj(
"step" -> step.toJson,
"path" -> anaMove.path
))
case scalaz.Failure(err) =>
member push lila.socket.Socket.makeMessage("stepFailure", err.toString)
}
}
case _ => // logwarn("Unhandled msg: " + msg)
}
def iteratee(controller: Controller, member: SocketMember): JsIteratee = {
val control = controller orElse baseController(member)
Iteratee.foreach[JsValue](jsv =>
jsv.asOpt[JsObject] foreach { obj =>
obj str "t" foreach { t =>
control.lift(t -> obj)
}
}
).map(_ => socket ! Quit(uid))
}
socket ? join map connecter map {
case (controller, enum, member) => iteratee(controller, member) -> enum
}
}
}
| danilovsergey/i-bur | modules/socket/src/main/Handler.scala | Scala | mit | 1,862 |
/*
* Copyright (c) 2014-2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
// Iglu
import iglu.client.{
SchemaKey,
Resolver
}
// Scalaz
import scalaz._
import Scalaz._
// Joda-Time
import org.joda.time.DateTime
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// This project
import loaders.{
CollectorPayload,
CollectorContext
}
import utils.ConversionUtils
/**
* Transforms a Cloudfront access log into raw events
*/
object CloudfrontAccessLogAdapter {
/**
* Adapter for Cloudfront web distribution access log files
*/
object WebDistribution extends Adapter {
private val FieldNames = List(
"dateTime",
"xEdgeLocation",
"scBytes",
"cIp",
"csMethod",
"csHost",
"csUriStem",
"scStatus",
"csReferer",
"csUserAgent",
"csUriQuery",
"csCookie",
"xEdgeResultType",
"xEdgeRequestId",
"xHostHeader",
"csProtocol",
"csBytes",
"timeTaken",
"xForwardedFor",
"sslProtocol",
"sslCipher",
"xEdgeResponseResultType"
)
// Tracker version for Cloudfront access log
private val TrackerVersion = "com.amazon.aws.cloudfront/wd_access_log"
/**
* Converts a CollectorPayload instance into raw events.
* Chooses a wd_access_log schema version based on the length of the TSV
* Extracts the collector timestamp and IP address from the TSV
*
* @param payload Generated by the TsvLoader. Its body is the raw TSV.
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation. Not used
* @return a validation boxing either a NEL of raw events or a NEL of failure strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
payload.body match {
case Some(p) => {
val fields = p.split("\\t", -1)
val schemaVersion = fields.size match {
case 12 => "1-0-0".successNel // Before 12 Sep 2012
case 15 => "1-0-1".successNel // 12 Sep 2012
case 18 => "1-0-2".successNel // 21 Oct 2013
case 19 => "1-0-3".successNel // 29 Apr 2014
case 23 => "1-0-4".successNel // 01 Jul 2015
case n => s"Access log TSV line contained $n fields, expected 12, 15, 18, 19, or 23".failNel
}
schemaVersion.flatMap(v => {
// Combine the first two fields into a timestamp
val schemaCompatibleFields = "%sT%sZ".format(fields(0), fields(1)) :: fields.toList.tail.tail
// Attempt to build the json, accumulating errors from unparseable fields
def buildJson(errors: List[String], fields: List[(String, String)], json: JObject): (List[String], JObject) = {
fields match {
case Nil => (errors, json)
case head :: tail => head match {
case (name, "") => buildJson(errors, tail, json ~ (name, null))
case ("timeTaken", field) => try {
buildJson(errors, tail, json ~ ("timeTaken", field.toDouble))
} catch {
case e: NumberFormatException => buildJson("Field [timeTaken]: cannot convert [%s] to Double".format(field) :: errors, tail, json)
}
case (name, field) if name == "csBytes" || name == "scBytes" => try {
buildJson(errors, tail, json ~ (name, field.toInt))
} catch {
case e: NumberFormatException => buildJson("Field [%s]: cannot convert [%s] to Int".format(name, field) :: errors, tail, json)
}
case (name, field) if name == "csReferer" || name == "csUserAgent" => ConversionUtils.doubleDecode(name, field).fold(
e => buildJson(e :: errors, tail, json),
s => buildJson(errors, tail, json ~ (name, s))
)
case ("csUriQuery", field) => buildJson(errors, tail, json ~ ("csUriQuery", ConversionUtils.singleEncodePcts(field)))
case (name, field) => buildJson(errors, tail, json ~ (name, field))
}
}
}
val (errors, ueJson) = buildJson(Nil, FieldNames zip schemaCompatibleFields, JObject())
val failures = errors match {
case Nil => None.successNel
case h :: t => (NonEmptyList(h) :::> t).fail // list to nonemptylist
}
val validatedTstamp = toTimestamp(fields(0), fields(1)).map(Some(_)).toValidationNel
(validatedTstamp |@| failures) {(tstamp, e) =>
val ip = schemaCompatibleFields(3) match {
case "" => None
case nonempty => nonempty.some
}
val qsParams: Map[String, String] = schemaCompatibleFields(8) match {
case "" => Map()
case url => Map("url" -> url)
}
val userAgent = schemaCompatibleFields(9) match {
case "" => None
case nonempty => ConversionUtils.singleEncodePcts(nonempty).some
}
val parameters = toUnstructEventParams(
TrackerVersion,
qsParams,
s"iglu:com.amazon.aws.cloudfront/wd_access_log/jsonschema/$v",
ueJson,
"srv"
)
NonEmptyList(RawEvent(
api = payload.api,
parameters = parameters,
contentType = payload.contentType,
source = payload.source,
context = CollectorContext(tstamp, ip, userAgent, None, Nil, None)
))
}
})
}
case None => "Cloudfront TSV has no body - this should be impossible".failNel
}
/**
* Converts a CloudFront log-format date and
* a time to a timestamp.
*
* @param date The CloudFront log-format date
* @param time The CloudFront log-format time
* @return the timestamp as a Joda DateTime
* or an error String, all wrapped in
* a Scalaz Validation
*/
def toTimestamp(date: String, time: String): Validation[String, DateTime] =
try {
DateTime.parse("%sT%s+00:00".format(date, time)).success // Construct a UTC ISO date from CloudFront date and time
} catch {
case e => "Unexpected exception converting Cloudfront web distribution access log date [%s] and time [%s] to timestamp: [%s]".format(date, time, e.getMessage).fail
}
}
}
| mdavid/lessig-bigdata | lib/snowplow/3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/CloudfrontAccessLogAdapter.scala | Scala | mit | 7,497 |
package com.plasmaconduit.framework.middleware
import com.plasmaconduit.edge.http._
import com.plasmaconduit.framework.mvc.controllers.AdhocController
import com.plasmaconduit.framework.mvc.{Controller, Middleware}
import com.plasmaconduit.waterhouse.{Hash, HashAlgorithm}
import io.netty.buffer.{ByteBuf, Unpooled}
final case class EtagCacheMiddleware(algorithm: HashAlgorithm) extends Middleware {
def intercept(next: Controller): Controller = AdhocController({ implicit request =>
next.action(request).flatMap({response =>
val body = toByteArray(response.body)
val hash = Hash.digest(algorithm, body.toArray).toString()
val matched = request
.headers
.get("If-None-Match")
.filter(_ == hash)
matched match {
case Some(x) => response.withStatus(NotModified).withBody("")
case None => response.withBody(Unpooled.wrappedBuffer(body)).withHeader(Etag(hash))
}
})
})
def toByteArray(buf: ByteBuf): Array[Byte] = {
if (buf.hasArray) {
buf.array()
} else {
val bytes = new Array[Byte](buf.readableBytes())
buf.getBytes(buf.readerIndex(), bytes)
bytes
}
}
}
| plasmaconduit/etag-cache-middleware | src/main/scala/com/plasmaconduit/framework/middleware/EtagCacheMiddleware.scala | Scala | mit | 1,189 |
package com.kostassoid.materialist
case class Route(source: Source, target: Target, operationPredicate: StorageOperation ⇒ Boolean) | Kostassoid/materialist | src/main/scala/com/kostassoid/materialist/Route.scala | Scala | apache-2.0 | 134 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic
import org.scalatest._
import scala.util.Failure
import scala.util.Success
import scala.util.Try
// SKIP-SCALATESTJS,NATIVE-START
import SharedHelpers.serializeRoundtrip
// SKIP-SCALATESTJS,NATIVE-END
class OrSpec extends UnitSpec with Accumulation with TypeCheckedTripleEquals {
def isRound(i: Int): Validation[ErrorMessage] =
if (i % 10 != 0) Fail(i + " was not a round number") else Pass
def isDivBy3(i: Int): Validation[ErrorMessage] =
if (i % 3 != 0) Fail(i + " was not divisible by 3") else Pass
"An Or" can "be either Good or Bad" in {
Good(7).isGood shouldBe true
Bad("oops").isBad shouldBe true
Good(7) shouldBe an [Or[_, _]]
Good(7) shouldBe an [Good[_]]
Bad("oops") shouldBe an [Or[_, _]]
Bad("oops") shouldBe an [Bad[_]]
}
it can "have its non-inferred type widened by an apply call with a type param" in {
/*
scala> Good[Int].orBad("hi")
res0: org.scalautils.Bad[Int,String] = Bad(hi)
scala> Good(3).orBad[String]
res1: org.scalautils.Good[Int,String] = Good(3)
scala> Good(3).orBad[ErrorMessage]
res2: org.scalautils.Good[Int,org.scalautils.ErrorMessage] = Good(3)
scala> Good(3).orBad("oops")
<console>:11: error: type mismatch;
found : String("oops")
required: <:<[Nothing,?]
Good(3).orBad("oops")
^
scala> Good[Int].orBad[String]
<console>:11: error: missing arguments for method orBad in class GoodieGoodieGumdrop;
follow this method with `_' if you want to treat it as a partially applied function
Good[Int].orBad[String]
^
*/
// If the expected type is known, then you can just say Good or Bad:
Good(3) shouldBe Good(3)
Bad("oops") shouldBe Bad("oops")
// But if the expected type is not known, the inferred type of the other side will be Nothing:
// Good(3) will be a Good[Int, Nothing]
// Bad("oops") will be a Bad[Nothing, String]
// If you want to specify a more specific type than Nothing, you can use this syntax:
Good(3).orBad[String] shouldBe Good(3)
Good[Int].orBad("oops") shouldBe Bad("oops")
// You could also do it this way:
Good[Int](3) shouldBe Good(3)
Bad[String]("oops") shouldBe Bad("oops")
// But that requires that you also give a type that would be inferred from the value. This
// would only be necessary if you wanted a more general type than that which
// would otherwise be inferred from the given value, such as:
Good[AnyVal](3) shouldBe Good(3)
Bad[AnyRef]("oops") shouldBe Bad("oops")
// In that case, though, I recommend a type ascription, because I think it is easier to read:
(Good(3): AnyVal Or String) shouldBe Good(3)
(Bad("oops"): Int Or AnyRef) shouldBe Bad("oops")
}
it can "be used in infix notation" in {
def div(a: Int, b: Int): Int Or ArithmeticException = {
// Division by zero results in SIGFPE (Floating point exception)
// SKIP-SCALATESTNATIVE-START
try Good(a / b)
catch { case ae: ArithmeticException => Bad(ae) }
// SKIP-SCALATESTNATIVE-END
if (b == 0)
Bad(new ArithmeticException("/ by zero"))
else
Good(a / b)
}
div(1, 1) shouldEqual Good(1)
div(6, 2) shouldEqual Good(3)
div(6, 2) shouldEqual Good(3)
div(1, 0).isBad shouldBe true
val ae = div(1, 0) match {
case Bad(ae) => ae
case result => fail("didn't get an Bad" + result)
}
ae should have message "/ by zero"
}
it can "be used with map" in {
Good(8) map (_ + 1) should equal (Good(9))
Good[Int].orBad("eight") map (_ + 1) should equal (Bad("eight"))
}
it can "be used with badMap" in {
Good(8).orBad[ErrorMessage] badMap (_.toUpperCase) should equal (Good(8))
Good[Int].orBad("eight") badMap (_.toUpperCase) should equal (Bad("EIGHT"))
}
it can "be used with recover" in {
Good(8).orBad[Throwable] recover {
case iae: IllegalArgumentException => 9
} should equal (Good(8))
Good[Int].orBad(new IllegalArgumentException) recover {
case iae: IllegalArgumentException => 9
} should equal (Good(9))
}
it can "be used with recoverWith" in {
Good(8).orBad[Throwable] recoverWith {
case iae: IllegalArgumentException => Good(9)
} should equal (Good(8))
Good[Int].orBad(new IllegalArgumentException) recoverWith {
case iae: IllegalArgumentException => Good(9)
} should equal (Good(9))
}
it can "be used with foreach" in {
var vCount = 0
var eCount = 0
Good(8) foreach { vCount += _ }
vCount should equal (8)
Good[Int].orBad("eight") foreach { eCount += _ }
eCount should equal (0)
}
it can "be used with flatMap" in {
Good(8).orBad[String] flatMap ((x: Int) => Good(x + 1)) should equal (Good(9))
Good[Int].orBad("eight") flatMap ((x: Int) => Good(x + 1)) should equal (Bad("eight"))
}
it can "be used with filter" in {
Good(12).filter(isRound) shouldBe Bad("12 was not a round number")
Good(10).filter(isRound) shouldBe Good(10)
Good[Int].orBad(12).filter(isRound) shouldBe Bad(12)
(for (i <- Good(10) if isRound(i)) yield i) shouldBe Good(10)
(for (i <- Good(12) if isRound(i)) yield i) shouldBe Bad("12 was not a round number")
(for (i <- Good(12) if isRound(i)) yield i) shouldBe Bad("12 was not a round number")
(for (i <- Good(30) if isRound(i) && isDivBy3(i)) yield i) shouldBe Good(30)
(for (i <- Good(10) if isRound(i) && isDivBy3(i)) yield i) shouldBe Bad("10 was not divisible by 3")
(for (i <- Good(3) if isRound(i) && isDivBy3(i)) yield i) shouldBe Bad("3 was not a round number")
(for (i <- Good(2) if isRound(i) && isDivBy3(i)) yield i) shouldBe Bad("2 was not a round number")
}
it can "be used with exists" in {
Good(12).exists(_ == 12) shouldBe true
Good(12).exists(_ == 13) shouldBe false
Good[Int].orBad(12).exists(_ == 12) shouldBe false
}
it can "be used with forall" in {
Good(12).forall(_ > 10) shouldBe true
Good(7).forall(_ > 10) shouldBe false
Good[Int].orBad(12).forall(_ > 10) shouldBe true
Good[Int].orBad(7).forall(_ > 10) shouldBe true
}
it can "be used with getOrElse, which takes a by-name" in {
Good(12).getOrElse(17) shouldBe 12
Good[Int].orBad(12).getOrElse(17) shouldBe 17
var x = 16 // should not increment if Good
Good(12) getOrElse { x += 1; x } shouldBe 12
x shouldBe 16
Good[Int].orBad(12) getOrElse { x += 1; x } shouldBe 17
x shouldBe 17
}
it can "be used with orElse, which takes a by-name" in {
Good(12).orElse(Good(13)) shouldBe Good(12)
Bad(12).orElse(Good(13)) shouldBe Good(13)
Good(12).orElse(Bad(13)) shouldBe Good(12)
Bad(12).orElse(Bad(13)) shouldBe Bad(13)
var x = 16 // should not increment if Good
Good(12) orElse { x += 1; Good(x) } shouldBe Good(12)
x shouldBe 16
Good[Int].orBad(12) orElse { x += 1; Good(x) } shouldBe Good(17)
x shouldBe 17
var y = 16 // should not increment if Good
Good(12) orElse { y += 1; Bad(y) } shouldBe Good(12)
y shouldBe 16
Good[Int].orBad(12) orElse { y += 1; Bad(y) } shouldBe Bad(17)
y shouldBe 17
}
it can "be used with toOption" in {
Good(12).toOption shouldBe Some(12)
Good[Int].orBad(12).toOption shouldBe None
}
it can "be used with toSeq" in {
Good(12).toSeq shouldEqual Seq(12)
Good[Int].orBad(12).toSeq shouldEqual Seq.empty
}
// toArray, toBuffer, toIndexedSeq, toIterable, toIterator, toList,
// toSeq, toStream, toTraversable, toVector
it can "be used with toEither" in {
Good(12).toEither shouldBe Right(12)
Bad(12).toEither shouldBe Left(12)
}
it can "be used with accumulating" in {
Good(12).orBad[Int].accumulating shouldBe Good(12).orBad[Every[Int]]
Good[Int].orBad(12).accumulating shouldBe Good[Int].orBad(One(12))
}
it can "be used with toTry, if the error type is a subtype of Throwable" in {
Good(12).orBad[Throwable].toTry shouldBe Success(12)
Good(12).orBad[RuntimeException].toTry shouldBe Success(12)
val ex = new RuntimeException("oops")
Good[Int].orBad(ex).toTry shouldBe Failure(ex)
Good[Int].orBad(ex).toTry shouldBe Failure(ex)
// Does not compile: Good[Int, Int](12).toTry shouldBe Success(12)
}
it can "be used with transform" in {
Good(12).orBad[String].transform((i: Int) => Good(i + 1), (s: String) => Bad(s.toUpperCase)) should === (Good(13))
Good[Int].orBad("hi").transform((i: Int) => Good(i + 1), (s: String) => Bad(s.toUpperCase)) should === (Bad("HI"))
Good(12).orBad[String].transform((i: Int) => Bad(i + 1), (s: String) => Good(s.toUpperCase)) should === (Bad(13))
Good[Int].orBad("hi").transform((i: Int) => Bad(i + 1), (s: String) => Good(s.toUpperCase)) should === (Good("HI"))
}
it can "be used with swap" in {
Good(12).orBad[String].swap should === (Good[String].orBad(12))
Good[Int].orBad("hi").swap should === (Good("hi").orBad[Int])
}
it can "be used with zip" in {
Good(12).orBad[Every[ErrorMessage]] zip Good("hi").orBad[Every[ErrorMessage]] should === (Good((12, "hi")).orBad[Every[ErrorMessage]])
Good[Int].orBad(One("so")) zip Good[String].orBad(One("ho")) should === (Bad(Many("so", "ho")))
(Good(12): Int Or Every[ErrorMessage]) zip Bad[Every[ErrorMessage]](One("ho")) should === (Bad(One("ho")))
Bad[Every[ErrorMessage]](One("so")) zip Good[String]("hi") should === (Bad(One("so")))
Good[Int](12) zip Good[String]("hi") should === (Good[(Int, String)]((12, "hi")))
Bad[One[ErrorMessage]](One("so")) zip Bad[Every[ErrorMessage]](One("ho")) should === (Bad(Many("so", "ho")))
Good[Int](12) zip Bad[Every[ErrorMessage]](One("ho")) should === (Bad(One("ho")))
Bad[One[ErrorMessage]](One("so")) zip Good[String]("hi") should === (Bad(One("so")))
Good[Int](12) zip Good[String]("hi") should === (Good[(Int, String)]((12, "hi")))
Bad[Every[ErrorMessage]](One("so")) zip Bad[One[ErrorMessage]](One("ho")) should === (Bad(Many("so", "ho")))
Good[Int](12) zip Bad[One[ErrorMessage]](One("ho")) should === (Bad(One("ho")))
Bad[Every[ErrorMessage]](One("so")) zip Good[String]("hi") should === (Bad(One("so")))
// Works when right hand side ERR type is a supertype of left hand side ERR type, because that's what Every's ++ does.
Good[Int].orBad(One("oops")) zip Good[Int].orBad(One(-1: Any)) shouldBe Bad(Many("oops", -1))
Good[Int].orBad(One("oops": Any)) zip Good[Int].orBad(One(-1)) shouldBe Bad(Many("oops", -1))
Good[Int].orBad(One("oops")) zip Good[Int].orBad(One(-1)) shouldBe Bad(Many("oops", -1))
Good[Int].orBad(One(-1)) zip Good[Int].orBad(One("oops": Any)) shouldBe Bad(Many(-1, "oops"))
}
it can "be used with when" in {
Good[Int](12).when(
(i: Int) => if (i > 0) Pass else Fail(i + " was not greater than 0"),
(i: Int) => if (i < 100) Pass else Fail(i + " was not less than 100"),
(i: Int) => if (i % 2 == 0) Pass else Fail(i + " was not even")
) shouldBe Good(12)
Good[Int](12).when(
(i: Int) => if (i > 0) Pass else Fail(i + " was not greater than 0"),
(i: Int) => if (i < 3) Pass else Fail(i + " was not less than 3"),
(i: Int) => if (i % 2 == 0) Pass else Fail(i + " was not even")
) shouldBe Bad(One("12 was not less than 3"))
Good[Int](12).when(
(i: Int) => if (i > 0) Pass else Fail(i + " was not greater than 0"),
(i: Int) => if (i < 3) Pass else Fail(i + " was not less than 3"),
(i: Int) => if (i % 2 == 1) Pass else Fail(i + " was not odd")
) shouldBe Bad(Many("12 was not less than 3", "12 was not odd"))
Good[Int](12).when(
(i: Int) => if (i > 99) Pass else Fail(i + " was not greater than 99"),
(i: Int) => if (i < 3) Pass else Fail(i + " was not less than 3"),
(i: Int) => if (i % 2 == 1) Pass else Fail(i + " was not odd")
) shouldBe Bad(Many("12 was not greater than 99", "12 was not less than 3", "12 was not odd"))
Good[Int].orBad[Every[ErrorMessage]](One("original error")).when(
(i: Int) => if (i > 0) Pass else Fail(i + " was not greater than 0"),
(i: Int) => if (i < 3) Pass else Fail(i + " was not less than 3"),
(i: Int) => if (i % 2 == 0) Pass else Fail(i + " was not even")
) shouldBe Bad(One("original error"))
Good[Int].orBad[Every[ErrorMessage]](Many("original error 1", "original error 2")).when(
(i: Int) => if (i > 0) Pass else Fail(i + " was not greater than 0"),
(i: Int) => if (i < 3) Pass else Fail(i + " was not less than 3"),
(i: Int) => if (i % 2 == 0) Pass else Fail(i + " was not even")
) shouldBe Bad(Many("original error 1", "original error 2"))
Good("hi").orBad[Every[Int]].when((i: String) => Fail(2.0)) shouldBe Bad(One(2.0))
(for (i <- Good(10) when isRound) yield i) shouldBe Good(10)
(for (i <- Good(12) when isRound) yield i) shouldBe Bad(One("12 was not a round number"))
(for (i <- Good(12) when isRound) yield i) shouldBe Bad(One("12 was not a round number"))
(for (i <- Good(30) when (isRound, isDivBy3)) yield i) shouldBe Good(30)
(for (i <- Good(10) when (isRound, isDivBy3)) yield i) shouldBe Bad(One("10 was not divisible by 3"))
(for (i <- Good(3) when (isRound, isDivBy3)) yield i) shouldBe Bad(One("3 was not a round number"))
(for (i <- Good(2) when (isRound, isDivBy3)) yield i) shouldBe Bad(Many("2 was not a round number", "2 was not divisible by 3"))
}
it can "be created with the attempt helper method" in {
attempt { 2 / 1 } should === (Good(2))
val divByZero = attempt { throw new ArithmeticException("/ by zero") }
divByZero.isBad shouldBe true
divByZero match {
case Bad(ex) =>
ex shouldBe an [ArithmeticException]
ex.getMessage shouldBe "/ by zero"
case _ => fail()
}
divByZero.isBad shouldBe true
intercept[VirtualMachineError] {
attempt { throw new VirtualMachineError {} }
}
}
it can "be created from a Try via the from(Try) factory method" in {
Or.from(Success(12)) shouldBe Good(12)
Or.from(Success(12): Try[Int]) shouldBe Good(12)
val ex = new Exception("oops")
Or.from(Failure(ex)) shouldBe Bad(ex)
Or.from(Failure(ex): Try[Int]) shouldBe Bad(ex)
}
it can "be created with the from(Either) factory method" in {
Or.from(Right(12)) shouldBe Good(12)
Or.from(Right(12): Either[String, Int]) shouldBe Good(12)
val ex = new Exception("oops")
Or.from(Left(ex)) shouldBe Bad(ex)
Or.from(Left("oops")) shouldBe Bad("oops")
Or.from(Left("oops"): Either[String, String]) shouldBe Bad("oops")
}
it can "be created with the from(Option, BadIfNone) factory method" in {
Or.from(Some(12), "won't be used") shouldBe Good(12)
Or.from(Some(12): Option[Int], "won't be used") shouldBe Good(12)
val ex = new Exception("oops")
Or.from(None, ex) shouldBe Bad(ex)
Or.from(None, "oops") shouldBe Bad("oops")
Or.from(None: Option[String], "oops") shouldBe Bad("oops")
}
it can "be validated with collection.validatedBy" in {
def isOdd(i: Int): Int Or One[ErrorMessage] =
if (i % 2 == 1) Good(i) else Bad(One(s"$i was not odd"))
// List
List.empty[Int].validatedBy(isOdd) shouldBe Good(List.empty[Int])
List(3).validatedBy(isOdd) shouldBe Good(List(3))
List(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(3, 5).validatedBy(isOdd) shouldBe Good(List(3, 5))
List(4, 6).validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
List(3, 4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(4, 3).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(3, 5, 7).validatedBy(isOdd) shouldBe Good(List(3, 5, 7))
// Vector
Vector.empty[Int].validatedBy(isOdd) shouldBe Good(Vector.empty[Int])
Vector(3).validatedBy(isOdd) shouldBe Good(Vector(3))
Vector(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Vector(3, 5).validatedBy(isOdd) shouldBe Good(Vector(3, 5))
Vector(4, 6).validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
Vector(3, 4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Vector(4, 3).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Vector(3, 5, 7).validatedBy(isOdd) shouldBe Good(Vector(3, 5, 7))
// Iterator
List.empty[Int].iterator.validatedBy(isOdd).map(_.toStream) shouldBe Good(List.empty[Int].iterator).map(_.toStream)
List(3).iterator.validatedBy(isOdd).map(_.toStream) shouldBe Good(List(3).iterator).map(_.toStream)
List(4).iterator.validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(3, 5).iterator.validatedBy(isOdd).map(_.toStream) shouldBe Good(List(3, 5).iterator).map(_.toStream)
List(4, 6).iterator.validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
List(3, 4).iterator.validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(4, 3).iterator.validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
List(3, 5, 7).iterator.validatedBy(isOdd).map(_.toStream) shouldBe Good(List(3, 5, 7).iterator).map(_.toStream)
// Set
Set.empty[Int].validatedBy(isOdd) shouldBe Good(Set.empty[Int])
Set(3).validatedBy(isOdd) shouldBe Good(Set(3))
Set(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Set(3, 5).validatedBy(isOdd) shouldBe Good(Set(3, 5))
Set(4, 6).validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
Set(3, 4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Set(4, 3).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Set(3, 5, 7).validatedBy(isOdd) shouldBe Good(Set(3, 5, 7))
Set.empty[Int].validatedBy(isOdd) shouldBe Good(Set.empty[Int])
// Every
One(3).validatedBy(isOdd) shouldBe Good(One(3))
Every(3).validatedBy(isOdd) shouldBe Good(One(3))
One(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Every(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Many(3, 5).validatedBy(isOdd) shouldBe Good(Many(3, 5))
Every(3, 5).validatedBy(isOdd) shouldBe Good(Many(3, 5))
Many(4, 6).validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
Every(4, 6).validatedBy(isOdd) shouldBe Bad(Every("4 was not odd", "6 was not odd"))
Many(3, 4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Every(3, 4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Many(4, 3).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Every(4, 3).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
Many(3, 5, 7).validatedBy(isOdd) shouldBe Good(Every(3, 5, 7))
Every(3, 5, 7).validatedBy(isOdd) shouldBe Good(Many(3, 5, 7))
// Option
Some(3).validatedBy(isOdd) shouldBe Good(Some(3))
(None: Option[Int]).validatedBy(isOdd) shouldBe Good(None)
Some(4).validatedBy(isOdd) shouldBe Bad(One("4 was not odd"))
}
it can "be validated with collection.validatedBy when the map goes to a different type" in {
def parseAge(input: String): Int Or One[ErrorMessage] = {
try {
val age = input.trim.toInt
if (age >= 0) Good(age) else Bad(One(s""""${age}" is not a valid age"""))
}
catch {
case _: NumberFormatException => Bad(One(s""""${input}" is not a valid integer"""))
}
}
Some("29").validatedBy(parseAge) shouldBe Good(Some(29))
Some("-30").validatedBy(parseAge) shouldBe Bad(One("\\"-30\\" is not a valid age"))
Every("29", "30", "31").validatedBy(parseAge) shouldBe Good(Many(29, 30, 31))
Every("29", "-30", "31").validatedBy(parseAge) shouldBe Bad(One("\\"-30\\" is not a valid age"))
Every("29", "-30", "-31").validatedBy(parseAge) shouldBe Bad(Many("\\"-30\\" is not a valid age", "\\"-31\\" is not a valid age"))
List("29", "30", "31").validatedBy(parseAge) shouldBe Good(List(29, 30, 31))
List("29", "-30", "31").validatedBy(parseAge) shouldBe Bad(One("\\"-30\\" is not a valid age"))
List("29", "-30", "-31").validatedBy(parseAge) shouldBe Bad(Many("\\"-30\\" is not a valid age", "\\"-31\\" is not a valid age"))
}
it can "be combined with collection.combined" in {
// List
// Is this the right answer? Has to be, because couldn't come up with an error anyway.
List.empty[Int Or Every[String]].combined shouldBe Good(List.empty[Int])
// def combine[G, ELE, EVERY[b] <: Every[b], SEQ[s]](xs: SEQ[G Or EVERY[ELE]])(implicit seq: Sequenceable[SEQ]): SEQ[G] Or Every[ELE] =
// G = Int, ELE = Nothing, SEQ = List
List(Good(3)).combined shouldBe Good(List(3))
List(Bad(One("oops"))).combined shouldBe Bad(One("oops"))
List(Good(3), Good(4)).combined shouldBe Good(List(3, 4))
List(Bad(One("darn")), Bad(One("oops"))).combined shouldBe Bad(Every("darn", "oops"))
List(Good(3), Bad(One("oops"))).combined shouldBe Bad(One("oops"))
List(Bad(One("oops")), Good(3)).combined shouldBe Bad(One("oops"))
List(Good(3), Good(4), Good(5)).combined shouldBe Good(List(3, 4, 5))
// Vector
Vector.empty[Int Or Every[String]].combined shouldBe Good(Vector.empty[Int])
Vector(Good(3)).combined shouldBe Good(Vector(3))
Vector(Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Vector(Good(3), Good(4)).combined shouldBe Good(Vector(3, 4))
Vector(Bad(One("darn")), Bad(One("oops"))).combined shouldBe Bad(Every("darn", "oops"))
Vector(Good(3), Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Vector(Bad(One("oops")), Good(3)).combined shouldBe Bad(One("oops"))
Vector(Good(3), Good(4), Good(5)).combined shouldBe Good(Vector(3, 4, 5))
// Do the same thing with Iterator
(List.empty[Int Or Every[String]].iterator).combined.map(_.toStream) shouldEqual (Good(List.empty[Int].iterator).map(_.toStream))
List(Good(3)).iterator.combined.map(_.toStream) shouldEqual (Good(List(3).iterator).map(_.toStream))
List(Bad(One("oops"))).iterator.combined shouldEqual (Bad(One("oops")))
List(Good(3), Good(4)).iterator.combined.map(_.toStream) shouldEqual (Good(List(3, 4).iterator).map(_.toStream))
List(Bad(One("darn")), Bad(One("oops"))).iterator.combined shouldEqual (Bad(Every("darn", "oops")))
List(Good(3), Bad(One("oops"))).iterator.combined shouldEqual (Bad(One("oops")))
List(Bad(One("oops")), Good(3)).iterator.combined shouldEqual (Bad(One("oops")))
List(Good(3), Good(4), Good(5)).iterator.combined.map(_.toStream) shouldEqual (Good(List(3, 4, 5).iterator).map(_.toStream))
// Set
Set.empty[Int Or Every[String]].combined shouldBe Good(Set.empty[Int])
Set(Good[Int](3), Bad[Every[String]](Every("oops"))).asInstanceOf[Set[Int Or Every[String]]].combined shouldBe Bad(One("oops"))
Set(Good[Int](3), Bad[Every[String]](Every("oops"))).combined shouldBe Bad(One("oops"))
Set(Good(3)).combined shouldBe Good(Set(3))
convertGenSetOnceToCombinable3(Set(Bad(One("oops")))).combined shouldBe Bad(One("oops"))
Set(Good(3), Good(4)).combined shouldBe Good(Set(3, 4))
Set(Bad(One("darn")), Bad(One("oops"))).combined shouldBe Bad(Every("darn", "oops"))
Set(Good(3), Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Set(Bad(One("oops")), Good(3)).combined shouldBe Bad(One("oops"))
Set(Good(3), Good(4), Good(5)).combined shouldBe Good(Set(3, 4, 5))
// Every
Every(Good(3).orBad[Every[String]], Good[Int].orBad(Every("oops"))).combined shouldBe Bad(One("oops"))
Every(Good(3)).combined shouldBe Good(Every(3))
One(Good(3)).combined shouldBe Good(Every(3))
Every(Bad(One("oops"))).combined shouldBe Bad(One("oops"))
One(Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Every(Good(3), Good(4)).combined shouldBe Good(Every(3, 4))
Many(Good(3), Good(4)).combined shouldBe Good(Every(3, 4))
Every(Bad(One("darn")), Bad(One("oops"))).combined shouldBe Bad(Every("darn", "oops"))
Many(Bad(One("darn")), Bad(One("oops"))).combined shouldBe Bad(Every("darn", "oops"))
Every(Good(3), Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Every(Bad(One("oops")), Good(3)).combined shouldBe Bad(One("oops"))
Every(Good(3), Good(4), Good(5)).combined shouldBe Good(Every(3, 4, 5))
// Option
Some(Good(3)).combined shouldBe Good(Some(3))
(None: Option[Int Or Every[ErrorMessage]]).combined shouldBe Good(None)
Some(Bad(One("oops"))).combined shouldBe Bad(One("oops"))
Some(Bad(Many("oops", "idoops"))).combined shouldBe Bad(Many("oops", "idoops"))
}
it can "be folded with fold" in {
Good(3).orBad[String].fold(_ + 1, _.length) shouldBe 4
Good[Int].orBad("howdy").fold(_ + 1, _.length) shouldBe 5
}
// SKIP-SCALATESTJS,NATIVE-START
it can "be serialized correctly" in {
serializeRoundtrip(Or.from(Success(12)) shouldBe Good(12))
val ex = new Exception("oops")
serializeRoundtrip(Or.from(Failure(ex)) shouldBe Bad(ex))
}
// SKIP-SCALATESTJS,NATIVE-END
"A Good" can "be widened to an Or type via .asOr" in {
Good(1).asOr shouldBe Good(1)
/*
scala> xs.foldLeft(Good(6).orBad[ErrorMessage]) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
<console>:12: error: type mismatch;
found : org.scalautils.Or[Int,org.scalautils.ErrorMessage]
required: org.scalautils.Good[Int,org.scalautils.ErrorMessage]
xs.foldLeft(Good(6).orBad[ErrorMessage]) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
^
scala> xs.foldLeft(Good(6).orBad[ErrorMessage].asOr) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
res2: org.scalautils.Or[Int,org.scalautils.ErrorMessage] = Good(6)
*/
val xs = List(1, 2, 3)
xs.foldLeft(Good(6).orBad[ErrorMessage].asOr) {
(acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc)
} shouldBe Good(6)
}
// SKIP-SCALATESTJS,NATIVE-START
it can "be serialized correctly" in {
serializeRoundtrip(Good(1)) shouldBe Good(1)
}
// SKIP-SCALATESTJS,NATIVE-END
"A Bad" can "be widened to an Or type via .asOr" in {
Bad("oops").asOr shouldBe Bad("oops")
/*
scala> xs.foldLeft(Good[Int].orBad("no evens")) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
<console>:12: error: type mismatch;
found : org.scalautils.Or[Int,String]
required: org.scalautils.Bad[Int,String]
xs.foldLeft(Good[Int].orBad("no evens")) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
^
scala> xs.foldLeft(Good[Int].orBad("no evens").asOr) { (acc, x) => acc orElse (if (x % 2 == 0) Good(x) else acc) }
res7: org.scalautils.Or[Int,String] = Good(2)
*/
val xs = List(1, 2, 3)
xs.foldLeft(Good[Int].orBad("no evens").asOr) { (acc, x) =>
acc orElse (if (x % 2 == 0) Good(x) else acc)
} shouldBe Good(2)
val ys = List(1, 3, 5)
ys.foldLeft(Good[Int].orBad("no evens").asOr) { (acc, x) =>
acc orElse (if (x % 2 == 0) Good(x) else acc)
} shouldBe Bad("no evens")
}
// SKIP-SCALATESTJS,NATIVE-START
it can "be serialized correctly" in {
serializeRoundtrip(Bad("oops")) shouldBe Bad("oops")
}
// SKIP-SCALATESTJS,NATIVE-END
"The Or companion" should "offer a concise type lambda syntax" in {
// One way:
"""
trait Functor[Context[_]] {
def map[A, B](ca: Context[A])(f: A => B): Context[B]
}
class OrFunctor[BAD] extends Functor[Or.B[BAD]#G] {
override def map[G, H](ca: G Or BAD)(f: G => H): H Or BAD = ca.map(f)
}
class BadOrFunctor[GOOD] extends Functor[Or.G[GOOD]#B] {
override def map[B, C](ca: GOOD Or B)(f: B => C): GOOD Or C = ca.badMap(f)
}
""" should compile
/*
// Other way:
class OrFunctor[B] extends Functor[Or.BAD[B]#GOOD] {
override def map[G, H](ca: G Or B)(f: G => H): H Or B = ca.map(f)
}
class BadOrFunctor[G] extends Functor[Or.GOOD[G]#BAD] {
override def map[B, C](ca: G Or B)(f: B => C): G Or C = ca.badMap(f)
}
*/
}
} | scalatest/scalatest | jvm/scalactic-test/src/test/scala/org/scalactic/OrSpec.scala | Scala | apache-2.0 | 28,773 |
package com.arcusys.learn.web
import com.arcusys.learn.controllers.api.BaseApiController
import com.arcusys.valamis.file.service.FileService
import com.arcusys.valamis.util.FileSystemUtil
import com.escalatesoft.subcut.inject.BindingModule
import com.arcusys.learn.ioc.Configuration
import org.scalatra.servlet.FileUploadSupport
import org.scalatra.SinatraRouteMatcher
class FileStorageFilter(configuration: BindingModule) extends BaseApiController(configuration) with ServletBase with FileUploadSupport {
def this() = this(Configuration)
//next line fixes 404
implicit override def string2RouteMatcher(path: String) = new SinatraRouteMatcher(path)
private val fileService = inject[FileService]
get("/*.*") {
val filename = multiParams("splat").mkString(".")
val extension = multiParams("splat").last.split('.').last
contentType = extension match {
case "css" => "text/css"
case "htm" => "text/html"
case "html" => "text/html"
case "js" => "application/javascript"
case "png" => "image/png"
case "jpg" => "image/jpeg"
case "jpeg" => "image/jpeg"
case "gif" => "image/gif"
case "swf" => "application/x-shockwave-flash"
case _ => FileSystemUtil.getMimeType(filename)
}
val fileContentOption = fileService.getFileContentOption(filename)
if (fileContentOption.isDefined) {
response.getOutputStream.write(fileContentOption.getOrElse(halt(405)))
} else halt(404)
}
}
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/web/FileStorageFilter.scala | Scala | lgpl-3.0 | 1,522 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Wed May 28 16:06:12 EDT 2014
* @see LICENSE (MIT style license file).
*
* @see www.math.pitt.edu/~sussmanm//2071Spring08/lab09/index.html
* @see www.netlib.org/lapack/lawnspdf/lawn03.pdf
* @see www.netlib.org/lapack/lawns/lawn11.ps
* @see fortranwiki.org/fortran/show/svd
*
* Code translated from LAPACK Fortran code
*/
// U N D E R D E V E L O P M E N T
// FIX Q and P incorrected permuted
package scalation.linalgebra
import scala.math.{abs, max, min, sqrt}
import scala.util.control.Breaks.{break, breakable}
import scalation.linalgebra.Rotation._
import scalation.math.{double_exp, sign}
import scalation.math.ExtremeD.{EPSILON, MIN_NORMAL, TOL}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD3` class is used to solve Singular Value Decomposition for bidiagonal matrices.
*
* It computes the singular values and, optionally, the right and/or left singular vectors
* from the singular value decomposition 'SVD' of a real n-by-n (upper) bidiagonal matrix B
* using the implicit zero-shift 'QR' algorithm. The 'SVD' of B has the form
*
* B = Q * S * P.t
*
* where S is the diagonal matrix of singular values, Q is an orthogonal matrix of
* left singular vectors, and P is an orthogonal matrix of right singular vectors.
* If left singular vectors are requested, this subroutine actually returns U*Q
* instead of Q, and, if right singular vectors are requested, this subroutine
* returns P.t * VT instead of P.T, for given real input matrices U and VT. When
* U and VT are the orthogonal matrices that reduce a general matrix A to bidiagonal
* form: A = U*B*VT, as computed by DGEBRD, then
*
* A = (U*Q) * S * (P.t*VT)
*
* is the 'SVD' of the general matrix A. A positive tolerance 'TOL' gives relative accuracy;
* for absolute accuracy negate it.
*
* @see "Computing Small Singular Values of Bidiagonal Matrices With Guaranteed High Relative Accuracy,"
* @see J. Demmel and W. Kahan, LAPACK Working Note #3 (or SIAM J. Sci. Statist. Comput. 11:5, pp. 873-912, Sept 1990)
*
* @see "Accurate singular values and differential qd algorithms," B. Parlett and V. Fernando,
* @see Technical Report CPAM-554, Mathematics Department, University of California at Berkeley, July 1992
*
* @see fortranwiki.org/fortran/show/svd
* @see LAPACK SUBROUTINE DBDSQR (UPLO, N, NCVT, NRU, NCC, D, E, VT, LDVT, U, LDU, C, LDC, WORK, INFO)
*
* @param a the bidiagonal matrix A consisting of a diagonal and super-diagonal
* @param vt the right orthogonal matrix from b = bidiagonalize (a)
* @param u the left orthogonal matrix from b = bidiagonalize (a)
*/
class SVD3 (b: BidMatrixD, vt: MatrixD = new MatrixD (0, 0),
u: MatrixD = new MatrixD (0, 0))
extends SVDecomp
{
private val DEBUG = true // debug flag
private val ONLY_S = false // only interested in singular values, not vectors
private val DO_SORT = false // do sort the singular values
private val MAXITR = 6 // interation factor
private val n = b.dim1 // the size (rows and columns) of the bidiagonal matrix B
private val ncvt = vt.dim2 // the number of columns in matrix VT
private val nru = u.dim1 // the number of rows in matrix U
private val work = Array.ofDim [Double] (4*n) // workspace -- FIX: replace
private val NM1 = n - 1 // one less than n
private val NM12 = NM1 + NM1 // 2 * NM1
private val NM13 = NM12 + NM1 // 3 * NM1
private val maxit = MAXITR * n * n // maximum number of iterations allowed
private val d = b.dg // the main diagonal
private val e = b.sd // the super-diagonal (one above main)
private var notflat = true // whether matrix B is yet to be deflated
private var oldll = -1 // old saved lower index
private var oldm = -1 // old saved upper index
private var m = n-1 // m points to last element of unconverged part of matrix
private var idir = 0 // the bulge chasing direction
private var smax = d.mag max e.mag // estimate for largest singular value
private var smin = 0.0 // estimate for smallest singular value
private var smin_l = 0.0 // lower bound on smallest singular value
private var cs = 1.0 // cosine in rotation matrix
private var sn = 0.0 // sine in rotation matrix
private var r = 1.0 // remaining nonzero value
private var oldcs = 1.0 // old saved cosine in rotation matrix
private var oldsn = 0.0 // old saved sine in rotation matrix
private var sigmn = 0.0 // minimum singular value
private var sigmx = 0.0 // maximum singular value
private var sinr = 0.0 // sine right
private var cosr = 1.0 // cosine right
private var sinl = 0.0 // sine left
private var cosl = 1.0 // cosine right
private val thresh = calcThreshold () // threshold for setting values to zero
def max3 (x: Double, y: Double, z: Double) = (x max y) max z
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the desired threshold for setting elements to zero.
*/
def calcThreshold (): Double =
{
if (TOL >= 0.0) { // relative accuracy desired
var smin_oa = abs (d(0))
if (smin_oa > 0.0) {
var mu = smin_oa
breakable { for (i <- 1 until n) {
mu = abs (d(i)) * (mu / (mu + abs(e(i-1))))
smin_oa = min (smin_oa, mu)
if (smin_oa =~ 0.0) break
}} // breakable for
} // if
smin_oa = smin_oa / sqrt (n.toDouble)
return max (TOL * smin_oa, MAXITR * n * n * MIN_NORMAL)
} // if
max (abs (TOL) * smax, MAXITR * n * n * MIN_NORMAL) // absolute accuracy desired
} // calcThreshold
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Factor matrix 'a' forming a diagonal matrix consisting of singular
* values and return the singular values in a vector.
*/
override def factor (): Tuple3 [MatrixD, VectorD, MatrixD] =
{
(null, null, null) // FIX
} // factor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Deflate the bidiagonal matrix by iteratively turning super-diagonal elements
* to zero. Then return the vector of singular values (i.e., the main diagonal).
*/
def deflate (): VectorD =
{
var go = true // go flag, continue deflation
var iter = 0 // cumulative iterations of inner loop
do { // begin main iteration loop
var idir = 0 // bulge (nonzero e-values) chasing direction
if (m == 0) go = false // upper index m is at lower limit, done
if (go) { // find block (ll, m) to work on
val ll = findBlock () // e(ll) through e(m-1) are nonzero, e(ll-1) is zero
if (DEBUG) trace (iter, ll)
if (ll >= 0) {
if (ll == m-1) { // block is 2-by-2, handle as a special case
deflate_2by2 (ll)
go = false // no blocks left, done
} else { // block >= 3-by-3
chooseDirection (ll) // choose bulge chasing direction
if (idir == 1) convergeForward (ll) // apply convergence tests (set almost zero to zero)
else convergeBackward (ll)
oldll = ll // save ll and m
oldm = m
val shift = computeShift (ll) // compute amount of shift
take_QRstep (ll, shift, idir) // take one QR step (use rotation to clear an e-value)
iter += m - ll // increment iteration count
} // if
} // if
} // if
} while (go && iter < maxit)
if (go) { // loop exited due to iteration limit
val nz = countNonzeroElements ()
if (nz > 0) {
println ("deflate: failed to converge - " + nz + " nonzero elements in super-diagonal")
return null
} // if
} // if
if (DEBUG) println ("diagonal d = " + d + "\\nsup-diag e = " + e)
makePositive () // make singular values positive
if (DO_SORT) sortValues () // sort singular values into decreasing order
notflat = false // matrix B is now deflated
d // return the singular values
} // deflate
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Deflate the bidiagonal matrix by iteratively turning super-diagonal elements
* to zero. Then return the vector of singular values and the matrices of
* singular vectors.
*/
def deflateV (): Tuple3 [VectorD, MatrixD, MatrixD] =
{
if (ONLY_S) {
println ("deflateV: cannot be called when ONLY_S is true, set it to false")
return null
} // if
if (notflat) deflate ()
if (vt.dim1 < 1 || u.dim1 < 1) println ("deflateV: warning matrix vt or u is empty")
(d, vt, u)
} // deflateV
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Deflate 2 by 2 block, handle separately.
* @param ll the lower index
*/
def deflate_2by2 (ll: Int)
{
val svd2 = new SVD_2by2 (d(m-1), e(m-1), d(m))
val d1 = svd2.deflateV ()
sigmn = d1._1; sigmx = d1._2; sinr = d1._3; cosr = d1._4; sinl = d1._5; cosl = d1._6
// CALL DLASV2 (d(m-1), e(m-1), d(m), sigmn, sigmx, sinr, cosr, sinl, cosl)
d(m-1) = sigmx
e(m-1) = 0.0
d(m) = sigmn
if (! ONLY_S) { // compute singular vectors, if desired
if (ncvt > 0) rot (ncvt, vt(m-1), vt(m), cosr, sinr)
if (nru > 0) rotCol (nru, u, m-1, m, cosl, sinl)
// if (ncvt > 0) CALL DROT (ncvt, vt(m-1, 1), ldvt, vt(m, 1), ldvt, cosr, sinr)
// if (nru > 0) CALL DROT (nru, u(1, m-1), 1, u(1, m) , 1, cosl, sinl)
} // if
m -= 2
} // deflate_2by2
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Count the number of nonzero elements in the super-diagonal. Call if the
* maximum number of iterations exceeded, failure to converge
*/
def countNonzeroElements (): Int =
{
var nzero = 0
for (i <- 0 until n-1 if ! (e(i) =~ 0.0)) nzero += 1
if (nzero > 0) println ("deflate failed: nzero = " + nzero)
nzero
} // countNonzeroElements
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Trace this outer iteration.
* @param iter the total iteration count
* @param ll the lower index
*/
private def trace (iter: Int, ll: Int)
{
println ("iter = " + iter)
println ("diagonal d = " + d)
println ("sup-diag e = " + e)
println ("block (ll, m) = " + (ll, m))
println ("-------------------------------------------")
} // trace
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Find diagonal block '(ll, m)' of matrix to work on, returning the lower
* index 'll'. Also decrements upper index 'm,' if needed. 'e(j)' must be zero
* before and after the block.
*/
private def findBlock (): Int =
{
var ll = 0 // lower index of block
if (TOL < 0.0 && abs (d(m)) <= thresh) d(m) = 0.0
smax = abs (d(m))
smin = smax
// for (i <- 1 to m-1) {
for (i <- 1 to m) {
ll = m - i
val abs_d = abs (d(ll))
val abs_e = abs (e(ll))
if (TOL < 0.0 && abs_d <= thresh) d(ll) = 0.0
if (abs_e <= thresh) {
e(ll) = 0.0 // matrix splits since e(ll) = 0
if (ll == m-1) {
m -= 1 // reduce upper index by 1
return -1 // return and try again to find block
} // if
return ll // return and try again
} // if
smin = min (smin, abs_d)
smax = max3 (smax, abs_d, abs_e)
} // for
ll // return the lower index ll
} // findBlock
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Working on new submatrix, choose shift direction
* (from larger end diagonal element towards smaller).
* @param ll the lower index
*/
private def chooseDirection (ll: Int)
{
if (ll > oldm || m < oldll) {
if (abs (d(ll) ) >= abs (d(m))) {
idir = 1 // chase bulge from top to bottom
} else {
idir = 2 // chase bulge from bottom to top
} // if
} // if
} // chooseDirection
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Run convergence test in forward direction. First apply standard test to
* bottom of matrix
* @param ll the lower index
*/
private def convergeForward (ll: Int)
{
if (abs (e(m-1)) <= abs (TOL) * abs (d(m)) || (TOL < 0.0 && abs (e(m-1)) <= thresh)) {
e(m-1) = 0.0
return
} // if
if (TOL >= 0.0 ) { // if relative accuracy desired, apply convergence criterion forward
var mu = abs (d(ll))
smin_l = mu
for (i <- ll to m-1) {
if (abs (e(i)) <= TOL * mu) {
e(i) = 0.0
return
} // if
mu = abs (d(i+1)) * (mu / (mu + abs (e(i))))
smin_l = min (smin_l, mu)
} // for
} // if
} // convergeForward
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Run convergence test in backward direction. First apply standard test to
* top of matrix.
* @param ll the lower index
*/
private def convergeBackward (ll: Int)
{
if (abs (e(ll)) <= abs (TOL) * abs (d(ll)) || (TOL < 0.0 && abs (e(ll)) <= thresh)) {
e(ll) = 0.0
return
} // if
if (TOL >= 0.0) { // if relative accuracy desired, apply convergence criterion backward
var mu = abs (d(m))
smin_l = mu
for (i <- m-1 to ll by -1) {
if (abs (e(i)) <= TOL * mu) {
e(i) = 0.0
return
} // if
mu = abs (d(i)) * (mu / (mu + abs (e(i))))
smin_l = min (smin_l, mu)
} // for
} // if
} // convergeForward
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute shift by first, test if shifting would ruin relative accuracy,
* and if so set the shift to zero.
* @param ll the lower index
*/
private def computeShift (ll: Int): Double =
{
var shft = 0.0
var sll = 0.0
if (TOL >= 0.0 && n * TOL * (smin_l / smax ) <= max (EPSILON, 0.01 * TOL)) {
return shft // use a zero shift to avoid loss of relative accuracy
} // if
// Compute the shift from 2-by-2 block at end of matrix
if (idir == 1) {
sll = abs (d(ll))
val svd2 = new SVD_2by2 (d(m-1), e(m-1), d(m))
val d1 = svd2.deflate (); shft = d1(0); r = d1(1)
// CALL DLAS2 (d(m-1), e(m-1), d(m), shift, r)
} else {
sll = abs (d(m))
val svd2 = new SVD_2by2 (d(ll), e(ll), d(ll+1))
val d1 = svd2.deflate (); shft = d1(0); r = d1(1)
// CALL DLAS2 (d(ll), e(ll), d(ll+1), shift, r)
} // if
// Test if shift negligible, and if so set to zero
if (sll > 0.0 && shft*shft / sll*sll < EPSILON) shft = 0.0
shft
} // computeShift
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Take one 'QR' step to push a super-diagonal element toward zero.
* @param ll the lower index
* @param shift the amount of shift
* @param idir the direction, t2b or b2t
*/
private def take_QRstep (ll: Int, shift: Double, idir: Int)
{
if (shift =~ 0.0) {
if (idir == 1) {
zeroShiftQR_t2b (ll)
} else {
zeroShiftQR_b2t (ll)
} // if
} else {
if (idir == 1) {
shiftedQR_t2b (ll, shift)
} else {
shiftedQR_b2t (ll, shift)
} // if
} // if
} // take_QRstep
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Chase bulge from top to bottom. Save cos's and'sin's for later singular
* vector updates.
* @param ll the lower index
*/
private def zeroShiftQR_t2b (ll: Int)
{
cs = 1.0
oldcs = 1.0
for (i <- ll to m-1) {
val q1 = rotate (d(i) * cs, e(i)); cs = q1.cs; sn = q1.sn; r = q1.r
// CALL DLARTG (d(i) * cs, e(i), cs, sn, r)
if (i > ll) e(i-1) = oldsn * r
val q2 = rotate (oldcs * r, d(i+1) * sn); oldcs = q2.cs; oldsn = q2.sn; d(i) = q2.r
// CALL DLARTG (oldcs * r, d(i+1) * sn, oldcs, oldsn, d(i))
work(i-ll+1) = cs
work(i-ll+1+NM1) = sn
work(i-ll+1+NM12) = oldcs
work(i-ll+1+NM13) = oldsn
} // for
val h = d(m) * cs
d(m) = h * oldcs
e(m-1) = h * oldsn
if (abs (e(m-1)) <= thresh) e(m-1) = 0.0 // test convergence
if (! ONLY_S) { // update singular vectors, if desired
if (ncvt > 0) rotateV (true, true, m-ll+1, ncvt, work, 0, work, n-1, vt.slice(ll, vt.dim1))
if (nru > 0) rotateV (false, true, nru, m-ll+1, work, NM12, work, NM13, u.sliceCol(ll, u.dim2))
// if (ncvt > 0) CALL DLASR ('l', 'v', 'f', m-ll+1, ncvt, work(1), work(n), vt(ll, 1), ldvt)
// if (nru > 0) CALL DLASR ('r', 'v', 'f', nru, m-ll+1, work(NM12+1), work(NM13+1), u(1, ll), ldu)
} // if
} // zeroShiftQR_t2b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Chase bulge from bottom to top. Save cos's and sin's for later singular
* vector updates.
* @param ll the lower index
*/
private def zeroShiftQR_b2t (ll: Int)
{
cs = 1.0
oldcs = 1.0
for (i <- m to ll+1 by -1) {
val q1 = rotate (d(i) * cs, e(i-1)); cs = q1.cs; sn = q1.sn; r = q1.r
// CALL DLARTG (d(i) * cs, e(i-1), cs, sn, r)
if( i < m ) e(i) = oldsn * r
val q2 = rotate (oldcs * r, d(i-1) * sn); oldcs = q2.cs; oldsn = q2.sn; d(i) = q2.r
// CALL DLARTG (oldcs * r, d(i-1) * sn, oldcs, oldsn, d(i))
work(i-ll) = cs
work(i-ll+NM1) = -sn
work(i-ll+NM12) = oldcs
work(i-ll+NM13) = -oldsn
} // for
val h = d(ll) * cs
d(ll) = h * oldcs
e(ll) = h * oldsn
if (abs (e(ll)) <= thresh) e(ll) = 0.0 // test convergence
if (! ONLY_S) { // update singular vectors, if desired
if (ncvt > 0) rotateV (true, false, m-ll+1, ncvt, work, NM12, work, NM13, vt.slice(ll, vt.dim1))
if (nru > 0) rotateV (false, false, nru, m-ll+1, work, 0, work, n-1, u.sliceCol(ll, u.dim2))
// if (ncvt > 0) CALL DLASR ('l', 'v', 'b', m-ll+1, ncvt, work(NM12+1), work(NM13+1), vt(ll, 1), ldvt)
// if (nru > 0) CALL DLASR ('r', 'v', 'b', nru, m-ll+1, work(1), work(n), u(1, ll), ldu)
} // if
} // zeroShiftQR_b2t
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Using nonzero shift, chase bulge from top to bottom. Save cos's and
* sin's for later singular vector updates
* @param ll the lower index
* @param shift the amount of shift
*/
private def shiftedQR_t2b (ll: Int, shift: Double)
{
var f = (abs (d(ll)) - shift) * (sign (1.0, d(ll)) + shift / d(ll))
var g = e(ll)
for (i <- ll to m-1) {
val q1 = rotate (f, g); cosr = q1.cs; sinr = q1.sn; r = q1.r
// CALL DLARTG (f, g, cosr, sinr, r)
if (i > ll) e(i-1) = r
f = cosr * d(i) + sinr * e(i)
e(i) = cosr * e(i) - sinr * d(i)
g = sinr * d(i+1)
d(i+1) = cosr * d(i+1)
val q2 = rotate (f, g); cosl = q2.cs; sinl = q2.sn; r = q2.r
// CALL DLARTG (f, g, cosl, sinl, r)
d(i) = r
f = cosl * e(i) + sinl * d(i+1)
d(i+1) = cosl * d(i+1) - sinl * e(i)
if (i < m-1) {
g = sinl * e(i+1)
e(i+1) = cosl * e(i+1)
} // if
work(i-ll+1) = cosr
work(i-ll+1+NM1) = sinr
work(i-ll+1+NM12) = cosl
work(i-ll+1+NM13) = sinl
} // for
e(m-1) = f
if (abs (e( m-1)) <= thresh ) e(m-1) = 0.0 // test convergence
if (! ONLY_S) { // update singular vectors, if desired
if (ncvt > 0) rotateV (true, true, m-ll+1, ncvt, work, 0, work, n-1, vt.slice(ll, vt.dim1))
if (nru > 0) rotateV (false, true, nru, m-ll+1, work, NM12, work, NM13, u.sliceCol(ll, u.dim2))
// if (ncvt > 0) CALL DLASR ('l', 'v', 'f', m-ll+1, ncvt, work(1), work(n), vt(ll, 1), ldvt)
// if (nru > 0) CALL DLASR ('r', 'v', 'f', nru, m-ll+1, work(NM12+1), work(NM13+1), u(1, ll), ldu)
} // if
} // shiftedQR_t2b
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Using nonzero shift, chase bulge from bottom to top. Save cos's and
* sin's for later singular vector updates
* @param ll the lower index
* @param shift the amount of shift
*/
private def shiftedQR_b2t (ll: Int, shift: Double)
{
var f = (abs (d(m)) - shift) * (sign(1.0, d(m)) + shift / d(m))
var g = e(m-1)
for (i <- m to ll+1 by -1) {
val q1 = rotate (f, g); cosr = q1.cs; sinr = q1.sn; r = q1.r
// CALL DLARTG (f, g, cosr, sinr, r)
if (i < m) e(i) = r
f = cosr * d(i) + sinr * e(i-1)
e(i-1) = cosr * e(i-1) - sinr * d(i)
g = sinr * d(i-1)
d(i-1) = cosr * d(i-1)
val q2 = rotate (f, g); cosl = q2.cs; sinl = q2.sn; r = q2.r
// CALL DLARTG (F, G, COSL, SINL, R)
d(i) = r
f = cosl * e(i-1) + sinl * d(i-1)
d(i-1) = cosl * d(i-1) - sinl * e(i-1)
if (i > ll+1) {
g = sinl * e(i-2)
e(i-2) = cosl * e(i-2)
} // if
work(i-ll) = cosr
work(i-ll+NM1) = -sinr
work(i-ll+NM12) = cosl
work(i-ll+NM13) = -sinl
} // for
e(ll) = f
if (abs (e(ll)) <= thresh) e(ll) = 0.0 // test convergence
if (! ONLY_S) { // update singular vectors, if desired
if (ncvt > 0) rotateV (true, false, m-ll+1, ncvt, work, NM12, work, NM13, vt.slice(ll, vt.dim1))
if (nru > 0) rotateV (false, false, nru, m-ll+1, work, 0, work, n-1, u.sliceCol(ll, u.dim2))
// if (ncvt > 0) CALL DLASR ('l', 'v', 'b', m-ll+1, ncvt, work(NM12+1), work(NM13+1), vt(ll, 1), ldvt)
// if (nru > 0) CALL DLASR ('r', 'v', 'b', nru, m-ll+1, work(1), work(n), u(1, ll), ldu)
} // if
} // shiftedQR_b2t
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** All singular values converged, so make them positive.
*/
private def makePositive ()
{
for (i <- 0 until n) {
if (d(i) < 0.0) {
d(i) = -d(i)
if (! ONLY_S) { // change sign of singular vectors, if desired
if (ncvt > 0) vt(i) *= -1.0
// if (ncvt > 0) CALL DSCAL (ncvt, -1.0, vt(i, 1), ldvt)
} // if
} // if
} // for
} // makePositive
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Sort the singular values into decreasing order. Selection sort is used
* to minimize the swapping of singular vectors. If only sorting singular
* values use 'sort2' that uses 'quicksort'.
*/
private def sortValues ()
{
if (ONLY_S) { // only interested in singular values
d.sort2 () // sort vector d in descending order (use if ignoring vectors)
return
} // if
for (i <- 0 until n-1) {
var k = i
for (j <- i+1 until n if d(j) > d(k)) k = j
if (i != k) {
d.swap (i, k) // swap singular values in vector d
if (ncvt > 0) vt.swap (i, k) // swap singular vectors (rows) in matrix vt
if (nru > 0) u.swapCol (i, k) // swap singular vectors (columns) in matrix u
// if (ncvt > 0) CALL DSWAP (ncvt, vt(isub, 1), ldvt, vt(n+1-i, 1), ldvt)
// if (nru > 0) CALL DSWAP (nru, u(1, isub), 1, u(1, n+1-i), 1)
} // if
} // for
} // sortValues
} // SVD3 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD3Test` is used to test the `SVD3` class.
* Answer: singular values = (2.28825, 0.87403)
* @see http://comnuan.com/cmnn01004/
*/
object SVD3Test extends App
{
import MatrixD.eye
val a = new MatrixD ((2, 2), 1.0, 1.0,
0.0, 2.0)
val b = new BidMatrixD (a) // a is already biagonal
val vt0 = eye (b.dim1)
val u0 = eye (b.dim1)
val svd = new SVD3 (b, vt0, u0)
println ("----------------------------------------")
println ("Test SVD3")
println ("----------------------------------------")
println ("a = " + a)
println ("b = " + b)
println ("----------------------------------------")
println ("singular values = " + svd.deflate ())
val (s, vt, u) = svd.deflateV ()
println ("----------------------------------------")
println ("singular val/vec = " + (s, vt, u))
println ("----------------------------------------")
println ("u ** s * vt = " + u ** s * vt)
} // SVD3Test
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SVD3Test2` is used to test the `SVD3` class.
* Answer: singular values = (3.82983, 1.91368, 0.81866)
*/
object SVD3Test2 extends App
{
import MatrixD.eye
val a = new MatrixD ((3, 3), 1.0, 1.0, 0.0,
0.0, 2.0, 2.0,
0.0, 0.0, 3.0)
val b = new BidMatrixD (a) // a is already biagonal
val vt = eye (a.dim1)
val u = eye (a.dim1)
val svd = new SVD3 (b, vt, u)
println ("----------------------------------------")
println ("Test SVD3")
println ("----------------------------------------")
println ("a = " + a)
println ("b = " + b)
println ("----------------------------------------")
println ("singular values = " + svd.deflate ())
// println ("----------------------------------------")
// println ("singular val/vec = " + svd.deflateV ())
} // SVD3Test2
| NBKlepp/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/linalgebra/SVD3.scala | Scala | mit | 29,510 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import java.io.{DataInputStream, File, FileInputStream, FileOutputStream}
import org.mockito.{Mock, MockitoAnnotations}
import org.mockito.Answers.RETURNS_SMART_NULLS
import org.mockito.ArgumentMatchers.any
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.shuffle.{IndexShuffleBlockResolver, ShuffleBlockInfo}
import org.apache.spark.storage._
import org.apache.spark.util.Utils
class IndexShuffleBlockResolverSuite extends SparkFunSuite with BeforeAndAfterEach {
@Mock(answer = RETURNS_SMART_NULLS) private var blockManager: BlockManager = _
@Mock(answer = RETURNS_SMART_NULLS) private var diskBlockManager: DiskBlockManager = _
private var tempDir: File = _
private val conf: SparkConf = new SparkConf(loadDefaults = false)
override def beforeEach(): Unit = {
super.beforeEach()
tempDir = Utils.createTempDir()
MockitoAnnotations.initMocks(this)
when(blockManager.diskBlockManager).thenReturn(diskBlockManager)
when(diskBlockManager.getFile(any[BlockId])).thenAnswer(
(invocation: InvocationOnMock) => new File(tempDir, invocation.getArguments.head.toString))
when(diskBlockManager.localDirs).thenReturn(Array(tempDir))
}
override def afterEach(): Unit = {
try {
Utils.deleteRecursively(tempDir)
} finally {
super.afterEach()
}
}
test("commit shuffle files multiple times") {
val shuffleId = 1
val mapId = 2
val idxName = s"shuffle_${shuffleId}_${mapId}_0.index"
val resolver = new IndexShuffleBlockResolver(conf, blockManager)
val lengths = Array[Long](10, 0, 20)
val dataTmp = File.createTempFile("shuffle", null, tempDir)
val out = new FileOutputStream(dataTmp)
Utils.tryWithSafeFinally {
out.write(new Array[Byte](30))
} {
out.close()
}
resolver.writeIndexFileAndCommit(shuffleId, mapId, lengths, dataTmp)
val indexFile = new File(tempDir.getAbsolutePath, idxName)
val dataFile = resolver.getDataFile(shuffleId, mapId)
assert(indexFile.exists())
assert(indexFile.length() === (lengths.length + 1) * 8)
assert(dataFile.exists())
assert(dataFile.length() === 30)
assert(!dataTmp.exists())
val lengths2 = new Array[Long](3)
val dataTmp2 = File.createTempFile("shuffle", null, tempDir)
val out2 = new FileOutputStream(dataTmp2)
Utils.tryWithSafeFinally {
out2.write(Array[Byte](1))
out2.write(new Array[Byte](29))
} {
out2.close()
}
resolver.writeIndexFileAndCommit(shuffleId, mapId, lengths2, dataTmp2)
assert(indexFile.length() === (lengths.length + 1) * 8)
assert(lengths2.toSeq === lengths.toSeq)
assert(dataFile.exists())
assert(dataFile.length() === 30)
assert(!dataTmp2.exists())
// The dataFile should be the previous one
val firstByte = new Array[Byte](1)
val dataIn = new FileInputStream(dataFile)
Utils.tryWithSafeFinally {
dataIn.read(firstByte)
} {
dataIn.close()
}
assert(firstByte(0) === 0)
// The index file should not change
val indexIn = new DataInputStream(new FileInputStream(indexFile))
Utils.tryWithSafeFinally {
indexIn.readLong() // the first offset is always 0
assert(indexIn.readLong() === 10, "The index file should not change")
} {
indexIn.close()
}
// remove data file
dataFile.delete()
val lengths3 = Array[Long](7, 10, 15, 3)
val dataTmp3 = File.createTempFile("shuffle", null, tempDir)
val out3 = new FileOutputStream(dataTmp3)
Utils.tryWithSafeFinally {
out3.write(Array[Byte](2))
out3.write(new Array[Byte](34))
} {
out3.close()
}
resolver.writeIndexFileAndCommit(shuffleId, mapId, lengths3, dataTmp3)
assert(indexFile.length() === (lengths3.length + 1) * 8)
assert(lengths3.toSeq != lengths.toSeq)
assert(dataFile.exists())
assert(dataFile.length() === 35)
assert(!dataTmp3.exists())
// The dataFile should be the new one, since we deleted the dataFile from the first attempt
val dataIn2 = new FileInputStream(dataFile)
Utils.tryWithSafeFinally {
dataIn2.read(firstByte)
} {
dataIn2.close()
}
assert(firstByte(0) === 2)
// The index file should be updated, since we deleted the dataFile from the first attempt
val indexIn2 = new DataInputStream(new FileInputStream(indexFile))
Utils.tryWithSafeFinally {
indexIn2.readLong() // the first offset is always 0
assert(indexIn2.readLong() === 7, "The index file should be updated")
} {
indexIn2.close()
}
}
test("SPARK-33198 getMigrationBlocks should not fail at missing files") {
val resolver = new IndexShuffleBlockResolver(conf, blockManager)
assert(resolver.getMigrationBlocks(ShuffleBlockInfo(Int.MaxValue, Long.MaxValue)).isEmpty)
}
}
| shuangshuangwang/spark | core/src/test/scala/org/apache/spark/shuffle/sort/IndexShuffleBlockResolverSuite.scala | Scala | apache-2.0 | 5,788 |
package shapes.untouchable
sealed trait Shape
final case class Square(a: Double) extends Shape
final case class Circle(r: Double) extends Shape
| tupol/scala-patterns-tc-pml | src/main/scala/shapes/untouchable/Shape.scala | Scala | apache-2.0 | 148 |
package com.typesafe.slick.docs
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
//#imports
import slick.jdbc.H2Profile.api._
//#imports
import slick.jdbc.GetResult
import slick.jdbc.JdbcProfile
import slick.basic.{DatabaseConfig, StaticDatabaseConfig}
/** A simple example that uses plain SQL queries against an in-memory
* H2 database. The example data comes from Oracle's JDBC tutorial at
* http://docs.oracle.com/javase/tutorial/jdbc/basics/tables.html. */
object PlainSQL extends App {
var out = new ArrayBuffer[String]()
def println(s: String): Unit = out += s
//#getresult
// Case classes for our data
case class Supplier(id: Int, name: String, street: String, city: String, state: String, zip: String)
case class Coffee(name: String, supID: Int, price: Double, sales: Int, total: Int)
// Result set getters
implicit val getSupplierResult = GetResult(r => Supplier(r.nextInt(), r.nextString(), r.nextString(),
r.nextString(), r.nextString(), r.nextString()))
implicit val getCoffeeResult = GetResult(r => Coffee(r.<<, r.<<, r.<<, r.<<, r.<<))
//#getresult
val db = Database.forConfig("h2mem1")
try {
val f: Future[_] = {
val a: DBIO[Unit] = DBIO.seq(
createSuppliers,
createCoffees,
insertSuppliers,
insertCoffees,
printAll,
printParameterized,
coffeeByName("Colombian").map { s =>
println(s"Coffee Colombian: $s")
},
deleteCoffee("Colombian").map { rows =>
println(s"Deleted $rows rows")
},
coffeeByName("Colombian").map { s =>
println(s"Coffee Colombian: $s")
}
)
db.run(a)
}
Await.result(f, Duration.Inf)
} finally db.close
out.foreach(Console.out.println)
//#sqlu
def createCoffees: DBIO[Int] =
sqlu"""create table coffees(
name varchar not null,
sup_id int not null,
price double not null,
sales int not null,
total int not null,
foreign key(sup_id) references suppliers(id))"""
def createSuppliers: DBIO[Int] =
sqlu"""create table suppliers(
id int not null primary key,
name varchar not null,
street varchar not null,
city varchar not null,
state varchar not null,
zip varchar not null)"""
def insertSuppliers: DBIO[Unit] = DBIO.seq(
// Insert some suppliers
sqlu"insert into suppliers values(101, 'Acme, Inc.', '99 Market Street', 'Groundsville', 'CA', '95199')",
sqlu"insert into suppliers values(49, 'Superior Coffee', '1 Party Place', 'Mendocino', 'CA', '95460')",
sqlu"insert into suppliers values(150, 'The High Ground', '100 Coffee Lane', 'Meadows', 'CA', '93966')"
)
//#sqlu
def insertCoffees: DBIO[Unit] = {
//#bind
def insert(c: Coffee): DBIO[Int] =
sqlu"insert into coffees values (${c.name}, ${c.supID}, ${c.price}, ${c.sales}, ${c.total})"
//#bind
// Insert some coffees. The SQL statement is the same for all calls:
// "insert into coffees values (?, ?, ?, ?, ?)"
//#sequence
val inserts: Seq[DBIO[Int]] = Seq(
Coffee("Colombian", 101, 7.99, 0, 0),
Coffee("French_Roast", 49, 8.99, 0, 0),
Coffee("Espresso", 150, 9.99, 0, 0),
Coffee("Colombian_Decaf", 101, 8.99, 0, 0),
Coffee("French_Roast_Decaf", 49, 9.99, 0, 0)
).map(insert)
val combined: DBIO[Seq[Int]] = DBIO.sequence(inserts)
combined.map(_.sum)
//#sequence
}
def printAll: DBIO[Unit] =
// Iterate through all coffees and output them
sql"select * from coffees".as[Coffee].map { cs =>
println("Coffees:")
for(c <- cs)
println("* " + c.name + "\\t" + c.supID + "\\t" + c.price + "\\t" + c.sales + "\\t" + c.total)
}
def namesByPrice(price: Double): DBIO[Seq[(String, String)]] = {
//#sql
sql"""select c.name, s.name
from coffees c, suppliers s
where c.price < $price and s.id = c.sup_id""".as[(String, String)]
//#sql
}
def supplierById(id: Int): DBIO[Seq[Supplier]] =
sql"select * from suppliers where id = $id".as[Supplier]
def printParameterized: DBIO[Unit] = {
// Perform a join to retrieve coffee names and supplier names for
// all coffees costing less than $9.00
namesByPrice(9.0).flatMap { l2 =>
println("Parameterized StaticQuery:")
for (t <- l2)
println("* " + t._1 + " supplied by " + t._2)
supplierById(49).map(s => println(s"Supplier #49: $s"))
}
}
def coffeeByName(name: String): DBIO[Option[Coffee]] = {
//#literal
val table = "coffees"
sql"select * from #$table where name = $name".as[Coffee].headOption
//#literal
}
def deleteCoffee(name: String): DBIO[Int] =
sqlu"delete from coffees where name = $name"
}
/* Can't test this properly because reference.conf is not on the compiler class path when it
doesn't come from a JAR:
@StaticDatabaseConfig("file:common-test-resources/application.conf#tsql")
object TypedSQL extends App {
//#staticdatabaseconfig
val dc = DatabaseConfig.forAnnotation[JdbcProfile]
import dc.profile.api._
//#staticdatabaseconfig
//#tsql
def getSuppliers(id: Int): DBIO[Seq[(Int, String, String, String, String, String)]] =
tsql"select * from suppliers where id > $id"
//#tsql
//#staticdatabaseconfig
val db = dc.db
//#staticdatabaseconfig
try {
val a: DBIO[Unit] =
getSuppliers(50).map { s =>
println("All suppliers > 50:")
s.foreach(println)
}
val f: Future[Unit] = db.run(a)
Await.result(f, Duration.Inf)
} finally db.close
}
*/
| slick/slick | doc/code/PlainSQL.scala | Scala | bsd-2-clause | 5,715 |
trait X { def foo: PartialFunction[Int, Int] }
trait Y extends X {
// Inferred type was AbstractPartialFunction[Int, Int] with Serializable
abstract override def foo = { case i => super.foo(i) * 2 }
}
trait Z extends X {
// ditto
abstract override def foo = { case i => super.foo(i) + 3 }
}
trait Comb extends Y with Z {
// ... which led to a type error here.
abstract override def foo: PartialFunction[Int, Int] = { case i => super.foo(i) - 2 }
}
| som-snytt/dotty | tests/pos/t6575a.scala | Scala | apache-2.0 | 462 |
package domain.user
import domain.{ Entity, Enum, EnumEntry, ValueObject }
/**
* ユーザ情報.
* @param userId ユーザID
* @param name ユーザ名
* @param loginId ログインID
* @param authority ユーザ権限
* @param lockVersion lockVersion
*/
case class User(
userId: Option[UserId],
loginId: String,
name: String,
authority: UserAuthority,
lockVersion: Long = 1L
) extends Entity[User] {
/**
* @inheritdoc
*/
override def sameIdentityAs(other: User): Boolean = (for {
thisId <- this.userId
otherId <- other.userId
} yield thisId.sameValueAs(otherId)) getOrElse false
/** ユーザ権限取得. */
def authentications: Seq[UserAuthority] = Seq(authority)
}
//ユーザに対する権限
sealed abstract class UserAuthority(override val code: String) extends EnumEntry
object UserAuthority extends Enum[UserAuthority] {
/** Application管理者. */
case object ApplicationAdministrator extends UserAuthority("1")
/** 一般. */
case object Normal extends UserAuthority("0")
protected val values = Seq(ApplicationAdministrator, Normal)
}
| nemuzuka/vss-kanban | src/main/scala/domain/user/User.scala | Scala | mit | 1,115 |
/*
* Copyright 2013 - 2020 Outworkers Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.outworkers.phantom.finagle.query.prepared
import com.outworkers.phantom.PhantomSuite
import com.outworkers.phantom.builder.query.prepared.ExecutablePreparedQuery
import com.outworkers.phantom.finagle._
import com.outworkers.phantom.tables.Recipe
import com.outworkers.util.samplers._
class BatchablePreparedInsertQueryTest extends PhantomSuite with TwitterFutures {
override def beforeAll(): Unit = {
super.beforeAll()
val _ = database.recipes.createSchema()
}
it should "serialize an prepared batch query" in {
val sample1 = gen[Recipe]
val sample2 = gen[Recipe]
val query = database.recipes.insert
.p_value(_.uid, ?)
.p_value(_.url, ?)
.p_value(_.servings, ?)
.p_value(_.ingredients, ?)
.p_value(_.description, ?)
.p_value(_.lastcheckedat, ?)
.p_value(_.props, ?)
.prepare()
def storePrepared(recipe: Recipe): ExecutablePreparedQuery = query.bind(
recipe.uid,
recipe.url,
recipe.servings,
recipe.ingredients,
recipe.description,
recipe.lastCheckedAt,
recipe.props
)
val exec1 = storePrepared(sample1)
val exec2 = storePrepared(sample2)
val chain = for {
truncate <- database.recipes.truncate.future()
store <- Batch.unlogged.add(exec1, exec2).future()
get <- database.recipes.select.fetch()
} yield get
whenReady(chain) { res =>
res should contain theSameElementsAs Seq(sample1, sample2)
}
}
}
| outworkers/phantom | phantom-finagle/src/test/scala/com/outworkers/phantom/finagle/query/prepared/BatchablePreparedInsertQueryTest.scala | Scala | apache-2.0 | 2,098 |
package tastytest
object AnythingIsPossible {
class Box[A](val a: A)
class Class extends Box({ class X { final val x = Map(("", 3)) } ; val foo = new X(); foo.x: foo.x.type })
class Lambda extends Box((x: Int) => (y: String) => y.length == x)
object Zero {
def unapply[A: Numeric](a: A): Boolean = implicitly[Numeric[A]].zero == a
}
type IntSpecial = Int @unchecked
class Match extends Box((0: @unchecked) match {
case n if n > 50 => "big"
case 26 | 24 => "26 | 24"
case a @ _ if a > 0 => "small"
case Zero() => "zero"
case -1 => throw new IllegalArgumentException("-1")
case _: IntSpecial => "negative"
})
class While extends Box(while (false) {})
class Try extends Box(try throw new IllegalArgumentException("nothing") catch { case e: Exception => e.getMessage() })
class Assign extends Box({ var t = 0; t = t + 1 })
trait SomeSuper {
def foo: Double = 23.451
}
object Within extends SomeSuper {
class Super extends Box(super[SomeSuper].foo)
}
}
| lrytz/scala | test/tasty/run/src-3/tastytest/AnythingIsPossible.scala | Scala | apache-2.0 | 1,058 |
package com.twitter.finatra.example
import java.util.concurrent.atomic.AtomicInteger
import javax.inject.Singleton
@Singleton
class TestQueue extends Queue(Integer.MAX_VALUE) {
val addCounter: AtomicInteger = new AtomicInteger(0)
override def add(value: String): Boolean = {
addCounter.incrementAndGet()
super.add(value)
}
}
| twitter/finatra | examples/injectable-twitter-server/scala/src/test/scala/com/twitter/finatra/example/TestQueue.scala | Scala | apache-2.0 | 342 |
/*
* Copyright 2017-2020 47 Degrees, LLC. <http://www.47deg.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package higherkindness.mu.format
trait Deserialiser[A] {
def deserialise(a: Array[Byte]): A
}
| frees-io/freestyle-rpc | modules/kafka/src/main/scala/higherkindness/mu/format/Deserialiser.scala | Scala | apache-2.0 | 726 |
package scalaz.stream.mongodb.query
import com.mongodb.{BasicDBObject, DBObject}
import collection.JavaConverters._
/**
* Simple key-value query to mongo
*/
case class BasicQuery(o: DBObject = new BasicDBObject) {
/**
* Normalizes query so in case there are no operands between predicates, `$and` is injected
* Returns tuple for each operation.
*/
def normalize: (String, Iterable[DBObject]) = o.keySet().asScala.find(_ match {
case "$and" | "$or" | "$nor" => true
case _ => false
}) match {
case Some(op) => o.get(op) match {
case l: java.util.List[DBObject@unchecked] => {
(op, l.asScala)
}
case o => sys.error(s"Unexpected content of $op : $o ")
}
case None =>
("$and", o.keySet().asScala.map(k => new BasicDBObject().append(k, o.get(k))))
}
}
/**
* Helper to build Basic query in case there are multiple key conditions.
* In such case we must enforce `$and` between predicates
*/
object BasicQuery {
def apply[A](pairs: QueryPair[A]*): BasicQuery = {
val o = new BasicDBObject()
if (pairs.map(_.key).distinct.size != pairs.size) {
//indicates multiple $and condition, we have to force $and here
o.append("$and", pairs.map { case qp: QueryPair[_] => qp.append(new BasicDBObject()) }.asJava)
} else {
pairs.foreach { case qp: QueryPair[_] => qp.append(o) }
}
BasicQuery(o)
}
}
| Spinoco/scalaz-stream-mongodb | core/src/main/scala/scalaz/stream/mongodb/query/BasicQuery.scala | Scala | mit | 1,409 |
package filodb.query.exec.aggregator
import filodb.core.metadata.Column.ColumnType
import filodb.core.query._
import filodb.memory.format.RowReader
/**
* Map: Every sample is mapped to three values: (a) the stddev value "0" and (b) the value itself
* (c) and its count value "1"
* ReduceAggregate: Similar as reduceAggregate for stdvar, since stddev = sqrt(stdvar)
* ReduceMappedRow: Same as ReduceAggregate
* Present: The current stddev is presented. Mean and Count value is dropped from presentation
*/
object StddevRowAggregator extends RowAggregator {
class StddevHolder(var timestamp: Long = 0L,
var stddev: Double = Double.NaN,
var mean: Double = Double.NaN,
var count: Long = 0) extends AggregateHolder {
val row = new StdValAggTransientRow()
def toRowReader: MutableRowReader = {
row.setLong(0, timestamp)
row.setDouble(1, stddev)
row.setDouble(2, mean)
row.setLong(3, count)
row
}
def resetToZero(): Unit = { count = 0; mean = Double.NaN; stddev = Double.NaN }
}
type AggHolderType = StddevHolder
def zero: StddevHolder = new StddevHolder()
def newRowToMapInto: MutableRowReader = new StdValAggTransientRow()
def map(rvk: RangeVectorKey, item: RowReader, mapInto: MutableRowReader): RowReader = {
mapInto.setLong(0, item.getLong(0))
mapInto.setDouble(1, 0L)
mapInto.setDouble(2, item.getDouble(1))
mapInto.setLong(3, if (item.getDouble(1).isNaN) 0L else 1L)
mapInto
}
def reduceAggregate(acc: StddevHolder, aggRes: RowReader): StddevHolder = {
acc.timestamp = aggRes.getLong(0)
if (!aggRes.getDouble(1).isNaN && !aggRes.getDouble(2).isNaN) {
if (acc.mean.isNaN) acc.mean = 0d
if (acc.stddev.isNaN) acc.stddev = 0d
val aggStddev = aggRes.getDouble(1)
val aggMean = aggRes.getDouble(2)
val aggCount = aggRes.getLong(3)
val newMean = (acc.mean * acc.count + aggMean * aggCount) / (acc.count + aggCount)
val accSquareSum = (Math.pow(acc.stddev, 2) + math.pow(acc.mean, 2)) * acc.count
val aggSquareSum = (Math.pow(aggStddev, 2) + math.pow(aggMean, 2)) * aggCount
val newStddev = Math.pow((accSquareSum + aggSquareSum) / (acc.count + aggCount) - math.pow(newMean, 2), 0.5)
acc.stddev = newStddev
acc.mean = newMean
acc.count += aggCount
}
acc
}
// ignore last two column. we rely on schema change
def present(aggRangeVector: RangeVector, limit: Int, rangeParams: RangeParams): Seq[RangeVector] = Seq(aggRangeVector)
def reductionSchema(source: ResultSchema): ResultSchema = {
source.copy(source.columns :+ ColumnInfo("mean", ColumnType.DoubleColumn)
:+ ColumnInfo("count", ColumnType.LongColumn))
}
def presentationSchema(reductionSchema: ResultSchema): ResultSchema = {
// drop last two column with mean and count
reductionSchema.copy(reductionSchema.columns.filterNot(c => (c.name.equals("mean") || c.name.equals("count"))))
}
} | filodb/FiloDB | query/src/main/scala/filodb/query/exec/aggregator/StddevRowAggregator.scala | Scala | apache-2.0 | 3,015 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.framework.operator
import org.apache.spark.streaming.Seconds
import org.apache.spark.streaming.dstream.DStream
abstract class AbstractOperator extends Serializable {
def process(stream: DStream[Event])
def windowStream[U](stream: DStream[U],
window: (Option[Long], Option[Long])) = {
window match {
case (Some(a), Some(b)) =>
stream.window(Seconds(a), Seconds(b))
case (Some(a), None) =>
stream.window(Seconds(a))
case _ =>
stream
}
}
}
| linzhe/matrix | src/main/scala/org/apache/spark/framework/operator/AbstractOperator.scala | Scala | apache-2.0 | 1,322 |