code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import kafka.log.Log
import kafka.utils.Logging
import kafka.server.{LogOffsetMetadata, LogReadResult}
import kafka.common.KafkaException
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.OffsetOutOfRangeException
import org.apache.kafka.common.utils.Time
class Replica(val brokerId: Int,
val topicPartition: TopicPartition,
time: Time = Time.SYSTEM,
initialHighWatermarkValue: Long = 0L,
@volatile var log: Option[Log] = None) extends Logging {
// the high watermark offset value, in non-leader replicas only its message offsets are kept
@volatile private[this] var highWatermarkMetadata = new LogOffsetMetadata(initialHighWatermarkValue)
// the log end offset value, kept in all replicas;
// for local replica it is the log's end offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var logEndOffsetMetadata = LogOffsetMetadata.UnknownOffsetMetadata
// the log start offset value, kept in all replicas;
// for local replica it is the log's start offset, for remote replicas its value is only updated by follower fetch
@volatile private[this] var _logStartOffset = Log.UnknownLogStartOffset
// The log end offset value at the time the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchLeaderLogEndOffset = 0L
// The time when the leader received the last FetchRequest from this follower
// This is used to determine the lastCaughtUpTimeMs of the follower
@volatile private[this] var lastFetchTimeMs = 0L
// lastCaughtUpTimeMs is the largest time t such that the offset of most recent FetchRequest from this follower >=
// the LEO of leader at time t. This is used to determine the lag of this follower and ISR of this partition.
@volatile private[this] var _lastCaughtUpTimeMs = 0L
def isLocal: Boolean = log.isDefined
def lastCaughtUpTimeMs = _lastCaughtUpTimeMs
val epochs = log.map(_.leaderEpochCache)
info(s"Replica loaded for partition $topicPartition with initial high watermark $initialHighWatermarkValue")
log.foreach(_.onHighWatermarkIncremented(initialHighWatermarkValue))
/*
* If the FetchRequest reads up to the log end offset of the leader when the current fetch request is received,
* set `lastCaughtUpTimeMs` to the time when the current fetch request was received.
*
* Else if the FetchRequest reads up to the log end offset of the leader when the previous fetch request was received,
* set `lastCaughtUpTimeMs` to the time when the previous fetch request was received.
*
* This is needed to enforce the semantics of ISR, i.e. a replica is in ISR if and only if it lags behind leader's LEO
* by at most `replicaLagTimeMaxMs`. These semantics allow a follower to be added to the ISR even if the offset of its
* fetch request is always smaller than the leader's LEO, which can happen if small produce requests are received at
* high frequency.
*/
def updateLogReadResult(logReadResult: LogReadResult) {
if (logReadResult.info.fetchOffsetMetadata.messageOffset >= logReadResult.leaderLogEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, logReadResult.fetchTimeMs)
else if (logReadResult.info.fetchOffsetMetadata.messageOffset >= lastFetchLeaderLogEndOffset)
_lastCaughtUpTimeMs = math.max(_lastCaughtUpTimeMs, lastFetchTimeMs)
logStartOffset = logReadResult.followerLogStartOffset
logEndOffset = logReadResult.info.fetchOffsetMetadata
lastFetchLeaderLogEndOffset = logReadResult.leaderLogEndOffset
lastFetchTimeMs = logReadResult.fetchTimeMs
}
def resetLastCaughtUpTime(curLeaderLogEndOffset: Long, curTimeMs: Long, lastCaughtUpTimeMs: Long) {
lastFetchLeaderLogEndOffset = curLeaderLogEndOffset
lastFetchTimeMs = curTimeMs
_lastCaughtUpTimeMs = lastCaughtUpTimeMs
}
private def logEndOffset_=(newLogEndOffset: LogOffsetMetadata) {
if (isLocal) {
throw new KafkaException(s"Should not set log end offset on partition $topicPartition's local replica $brokerId")
} else {
logEndOffsetMetadata = newLogEndOffset
trace(s"Setting log end offset for replica $brokerId for partition $topicPartition to [$logEndOffsetMetadata]")
}
}
def logEndOffset: LogOffsetMetadata =
if (isLocal)
log.get.logEndOffsetMetadata
else
logEndOffsetMetadata
/**
* Increment the log start offset if the new offset is greater than the previous log start offset. The replica
* must be local and the new log start offset must be lower than the current high watermark.
*/
def maybeIncrementLogStartOffset(newLogStartOffset: Long) {
if (isLocal) {
if (newLogStartOffset > highWatermark.messageOffset)
throw new OffsetOutOfRangeException(s"Cannot increment the log start offset to $newLogStartOffset of partition $topicPartition " +
s"since it is larger than the high watermark ${highWatermark.messageOffset}")
log.get.maybeIncrementLogStartOffset(newLogStartOffset)
} else {
throw new KafkaException(s"Should not try to delete records on partition $topicPartition's non-local replica $brokerId")
}
}
private def logStartOffset_=(newLogStartOffset: Long) {
if (isLocal) {
throw new KafkaException(s"Should not set log start offset on partition $topicPartition's local replica $brokerId " +
s"without attempting to delete records of the log")
} else {
_logStartOffset = newLogStartOffset
trace(s"Setting log start offset for remote replica $brokerId for partition $topicPartition to [$newLogStartOffset]")
}
}
def logStartOffset: Long =
if (isLocal)
log.get.logStartOffset
else
_logStartOffset
def highWatermark_=(newHighWatermark: LogOffsetMetadata) {
if (isLocal) {
if (newHighWatermark.messageOffset < 0)
throw new IllegalArgumentException("High watermark offset should be non-negative")
highWatermarkMetadata = newHighWatermark
log.foreach(_.onHighWatermarkIncremented(newHighWatermark.messageOffset))
trace(s"Setting high watermark for replica $brokerId partition $topicPartition to [$newHighWatermark]")
} else {
throw new KafkaException(s"Should not set high watermark on partition $topicPartition's non-local replica $brokerId")
}
}
def highWatermark: LogOffsetMetadata = highWatermarkMetadata
/**
* The last stable offset (LSO) is defined as the first offset such that all lower offsets have been "decided."
* Non-transactional messages are considered decided immediately, but transactional messages are only decided when
* the corresponding COMMIT or ABORT marker is written. This implies that the last stable offset will be equal
* to the high watermark if there are no transactional messages in the log. Note also that the LSO cannot advance
* beyond the high watermark.
*/
def lastStableOffset: LogOffsetMetadata = {
log.map { log =>
log.firstUnstableOffset match {
case Some(offsetMetadata) if offsetMetadata.messageOffset < highWatermark.messageOffset => offsetMetadata
case _ => highWatermark
}
}.getOrElse(throw new KafkaException(s"Cannot fetch last stable offset on partition $topicPartition's " +
s"non-local replica $brokerId"))
}
/*
* Convert hw to local offset metadata by reading the log at the hw offset.
* If the hw offset is out of range, return the first offset of the first log segment as the offset metadata.
*/
def convertHWToLocalOffsetMetadata() {
if (isLocal) {
highWatermarkMetadata = log.get.convertToOffsetMetadata(highWatermarkMetadata.messageOffset).getOrElse {
log.get.convertToOffsetMetadata(logStartOffset).getOrElse {
val firstSegmentOffset = log.get.logSegments.head.baseOffset
new LogOffsetMetadata(firstSegmentOffset, firstSegmentOffset, 0)
}
}
} else {
throw new KafkaException(s"Should not construct complete high watermark on partition $topicPartition's non-local replica $brokerId")
}
}
override def equals(that: Any): Boolean = that match {
case other: Replica => brokerId == other.brokerId && topicPartition == other.topicPartition
case _ => false
}
override def hashCode: Int = 31 + topicPartition.hashCode + 17 * brokerId
override def toString: String = {
val replicaString = new StringBuilder
replicaString.append("ReplicaId: " + brokerId)
replicaString.append("; Topic: " + topicPartition.topic)
replicaString.append("; Partition: " + topicPartition.partition)
replicaString.append("; isLocal: " + isLocal)
replicaString.append("; lastCaughtUpTimeMs: " + lastCaughtUpTimeMs)
if (isLocal) {
replicaString.append("; Highwatermark: " + highWatermark)
replicaString.append("; LastStableOffset: " + lastStableOffset)
}
replicaString.toString
}
}
| sebadiaz/kafka | core/src/main/scala/kafka/cluster/Replica.scala | Scala | apache-2.0 | 9,893 |
package ml.combust.mleap.xgboost.runtime
import biz.k11i.xgboost.util.FVec
import ml.combust.mleap.core.types.NodeShape
import ml.combust.mleap.runtime.function.UserDefinedFunction
import ml.combust.mleap.tensor.Tensor
import ml.combust.mleap.runtime.frame.{MultiTransformer, Row, Transformer}
import ml.combust.mleap.core.util.VectorConverters._
import XgbConverters._
case class XGBoostPredictorClassification(
override val uid: String = Transformer.uniqueName("xgboost.classification"),
override val shape: NodeShape,
override val model: XGBoostPredictorClassificationModel) extends MultiTransformer {
override val exec: UserDefinedFunction = {
// Since the Predictor is our performant implementation, we only compute probability for performance reasons.
val probability = shape.getOutput("probability").map {
_ => (data: FVec) => Some(model.predictProbabilities(data): Tensor[Double])
}.getOrElse((_: FVec) => None)
val f = (features: Tensor[Double]) => {
val data: FVec = features.asXGBPredictor
val rowData = Seq(probability(data).get)
Row(rowData: _*)
}
UserDefinedFunction(f, outputSchema, inputSchema)
}
}
| combust/mleap | mleap-xgboost-runtime/src/main/scala/ml/combust/mleap/xgboost/runtime/XGBoostPredictorClassification.scala | Scala | apache-2.0 | 1,273 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ly.stealth.mesos.exhibitor
import com.google.protobuf.ByteString
import org.apache.mesos.Protos
import org.apache.mesos.Protos._
import play.api.libs.functional.syntax._
import play.api.libs.json.{JsValue, Json, Writes, _}
import scala.collection.JavaConversions._
import scala.collection.mutable
case class TaskConfig(exhibitorConfig: mutable.Map[String, String], sharedConfigOverride: mutable.Map[String, String], id: String, var hostname: String = "", var sharedConfigChangeBackoff: Long = 10000, var cpus: Double = 0.2, var mem: Double = 256)
object TaskConfig {
implicit val reader = (
(__ \\ 'exhibitorConfig).read[Map[String, String]].map(m => mutable.Map(m.toSeq: _*)) and
(__ \\ 'sharedConfigOverride).read[Map[String, String]].map(m => mutable.Map(m.toSeq: _*)) and
(__ \\ 'id).read[String] and
(__ \\ 'hostname).read[String] and
(__ \\ 'sharedConfigChangeBackoff).read[Long] and
(__ \\ 'cpu).read[Double] and
(__ \\ 'mem).read[Double])(TaskConfig.apply _)
implicit val writer = new Writes[TaskConfig] {
def writes(tc: TaskConfig): JsValue = {
Json.obj(
"exhibitorConfig" -> tc.exhibitorConfig.toMap[String, String],
"sharedConfigOverride" -> tc.sharedConfigOverride.toMap[String, String],
"id" -> tc.id,
"hostname" -> tc.hostname,
"cpu" -> tc.cpus,
"mem" -> tc.mem,
"sharedConfigChangeBackoff" -> tc.sharedConfigChangeBackoff
)
}
}
}
case class ExhibitorServer(id: String) {
private[exhibitor] var task: ExhibitorServer.Task = null
val config = TaskConfig(new mutable.HashMap[String, String](), new mutable.HashMap[String, String](), id)
private[exhibitor] val constraints: mutable.Map[String, Constraint] = new mutable.HashMap[String, Constraint]
private[exhibitor] var state: ExhibitorServer.State = ExhibitorServer.Added
def createTask(offer: Offer): TaskInfo = {
val port = getPort(offer).getOrElse(throw new IllegalStateException("No suitable port"))
val id = s"exhibitor-${this.id}-${offer.getHostname}-$port"
this.config.exhibitorConfig.put("port", port.toString)
this.config.hostname = offer.getHostname
val taskId = TaskID.newBuilder().setValue(id).build
TaskInfo.newBuilder().setName(taskId.getValue).setTaskId(taskId).setSlaveId(offer.getSlaveId)
.setExecutor(newExecutor(id))
.setData(ByteString.copyFromUtf8(Json.stringify(Json.toJson(this.config))))
.addResources(Protos.Resource.newBuilder().setName("cpus").setType(Protos.Value.Type.SCALAR).setScalar(Protos.Value.Scalar.newBuilder().setValue(this.config.cpus)))
.addResources(Protos.Resource.newBuilder().setName("mem").setType(Protos.Value.Type.SCALAR).setScalar(Protos.Value.Scalar.newBuilder().setValue(this.config.mem)))
.addResources(Protos.Resource.newBuilder().setName("ports").setType(Protos.Value.Type.RANGES).setRanges(
Protos.Value.Ranges.newBuilder().addRange(Protos.Value.Range.newBuilder().setBegin(port).setEnd(port))
)).build
}
def matches(offer: Offer, otherAttributes: String => List[String]): Option[String] = {
val offerResources = offer.getResourcesList.toList.map(res => res.getName -> res).toMap
if (getPort(offer).isEmpty) return Some("no suitable port")
offerResources.get("cpus") match {
case Some(cpusResource) => if (cpusResource.getScalar.getValue < config.cpus) return Some(s"cpus ${cpusResource.getScalar.getValue} < ${config.cpus}")
case None => return Some("no cpus")
}
offerResources.get("mem") match {
case Some(memResource) => if (memResource.getScalar.getValue < config.mem) return Some(s"mem ${memResource.getScalar.getValue} < ${config.mem}")
case None => return Some("no mem")
}
val offerAttributes = offer.getAttributesList.toList.foldLeft(Map("hostname" -> offer.getHostname)) { case (attributes, attribute) =>
if (attribute.hasText) attributes.updated(attribute.getName, attribute.getText.getValue)
else attributes
}
for ((name, constraint) <- constraints) {
offerAttributes.get(name) match {
case Some(attribute) => if (!constraint.matches(attribute, otherAttributes(name))) return Some(s"$name doesn't match $constraint")
case None => return Some(s"no $name")
}
}
None
}
private def newExecutor(id: String): ExecutorInfo = {
val cmd = s"java -cp ${HttpServer.jar.getName}${if (Config.debug) " -Ddebug" else ""} ly.stealth.mesos.exhibitor.Executor"
val commandBuilder = CommandInfo.newBuilder()
commandBuilder
.addUris(CommandInfo.URI.newBuilder().setValue(s"${Config.api}/exhibitor/" + HttpServer.exhibitorDist.getName))
.addUris(CommandInfo.URI.newBuilder().setValue(s"${Config.api}/zookeeper/" + HttpServer.zookeeperDist.getName).setExtract(true))
.addUris(CommandInfo.URI.newBuilder().setValue(s"${Config.api}/jar/" + HttpServer.jar.getName))
.setValue(cmd)
this.config.exhibitorConfig.get("s3credentials").foreach { creds =>
commandBuilder
.addUris(CommandInfo.URI.newBuilder().setValue(s"${Config.api}/s3credentials/" + creds))
}
ExecutorInfo.newBuilder()
.setExecutorId(ExecutorID.newBuilder().setValue(id))
.setCommand(commandBuilder)
.setName(s"exhibitor-$id")
.build
}
private def getPort(offer: Offer): Option[Long] = {
val ports = Util.getRangeResources(offer, "ports")
ports.headOption.map(_.getBegin)
}
def url: String = s"http://${config.hostname}:${config.exhibitorConfig("port")}"
}
object ExhibitorServer {
def idFromTaskId(taskId: String): String = {
taskId.split("-") match {
case Array(_, id, _, _) => id
case _ => throw new IllegalArgumentException(taskId)
}
}
sealed trait State
case object Unknown extends State
case object Added extends State
case object Stopped extends State
case object Staging extends State
case object Running extends State
implicit val writer = new Writes[ExhibitorServer] {
def writes(es: ExhibitorServer): JsValue = {
Json.obj(
"id" -> es.id,
"state" -> es.state.toString,
"constraints" -> Util.formatMap(es.constraints),
"config" -> es.config
)
}
}
implicit val reader = (
(__ \\ 'id).read[String] and
(__ \\ 'state).read[String] and
(__ \\ 'constraints).read[String].map(Util.parseMap(_).mapValues(Constraint(_))) and
(__ \\ 'config).read[TaskConfig])((id, state, constraints, config) => {
val server = ExhibitorServer(id)
state match {
case "Unknown" => server.state = Unknown
case "Added" => server.state = Added
case "Stopped" => server.state = Stopped
case "Staging" => server.state = Staging
case "Running" => server.state = Running
}
constraints.foreach(server.constraints += _)
config.exhibitorConfig.foreach(server.config.exhibitorConfig += _)
config.sharedConfigOverride.foreach(server.config.sharedConfigOverride += _)
server.config.cpus = config.cpus
server.config.mem = config.mem
server.config.sharedConfigChangeBackoff = config.sharedConfigChangeBackoff
server.config.hostname = config.hostname
server
})
case class Task(id: String, slaveId: String, executorId: String, attributes: Map[String, String])
}
| samklr/exhibitor-mesos-framework | src/main/scala/ly/stealth/mesos/exhibitor/ExhibitorServer.scala | Scala | apache-2.0 | 8,135 |
/*
* Copyright © 2011-2012 Sattvik Software & Technology Resources, Ltd. Co.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package com.sattvik.baitha.test
import android.content.SharedPreferences
import android.content.SharedPreferences.Editor
import org.scalatest.WordSpec
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.mock.MockitoSugar
import com.sattvik.baitha._
import com.sattvik.baitha.EnhancedPreferences.EnhancedEditor
import com.sattvik.baitha.TypedPreference._
import org.mockito.Matchers.any
import org.mockito.Mockito.{never, verify, when}
import org.mockito.ArgumentCaptor
/** Test suite for the `EnhancedPreferences` class.
*
* @author Daniel Solano Gómez */
class EnhancedPreferencesSpec
extends WordSpec with ShouldMatchers with MockitoSugar {
/** Basic fixture for the tests. */
trait Fixture {
val mockPrefs = mock[SharedPreferences]
val prefs = new EnhancedPreferences(mockPrefs)
val prefKey = "Foo"
}
/** Fixture for tests that do retrieval */
trait GetPreferenceFixture[A] extends Fixture {
def pref: TypedPreference[A]
def verifyGetFrom()
}
/** Fixture for tests that use an editor. */
trait EditorFixture extends Fixture {
val mockEditor = mock[SharedPreferences.Editor]
when(mockPrefs.edit()).thenReturn(mockEditor)
def verifyCommit() = verify(mockEditor).commit()
def verifyNoCommit() = verify(mockEditor, never()).commit()
}
/** Fixture for tests that do modification */
trait PutPreferenceFixture[A] extends Fixture with EditorFixture {
def pref: TypedPreference[A]
def value: A
def verifyPutInto()
}
/** All of the basic preference retrieval tests. */
def preferenceGetBehaviours(fixture: GetPreferenceFixture[_]) {
import fixture._
"allow checking for existence" in {
prefs.contains(pref)
verify(mockPrefs).contains(prefKey)
}
"retrieve a value out of the preferences" in {
prefs.get(pref)
verifyGetFrom()
}
"retrieve None when getOption is used and there is no preference" in {
when(mockPrefs.contains(prefKey)).thenReturn(false)
prefs.getOption(pref) should equal(None)
}
"retrieve something when getOption is used and there is a preference" in {
when(mockPrefs.contains(prefKey)).thenReturn(true)
prefs.getOption(pref) should be('defined)
}
"get the preference using an extractor" in {
val Pref = pref
prefs match {
case Pref(_) => // successful match
case _ => fail("Match failed")
}
}
}
/** All of the basic preference modification tests. */
def preferencePutBehaviours[A](newFixture: => PutPreferenceFixture[A]) {
trait PutFixture {
val fixture = newFixture
val putValue = fixture.value
val pref = fixture.pref
def performTest(editor: EnhancedPreferences.EnhancedEditor)
}
trait CommitPutIntoFixture extends PutFixture {
fixture.prefs.withEditor(performTest)
fixture.verifyPutInto()
fixture.verifyCommit()
}
trait CommitRemoveFixture extends PutFixture {
fixture.prefs.withEditor(performTest)
verify(fixture.mockEditor).remove(pref.key)
fixture.verifyCommit()
}
trait AbortFixture extends PutFixture {
fixture.prefs.withEditor(performTest)
fixture.verifyNoCommit()
}
"put in a value" in {
new CommitPutIntoFixture {
def performTest(editor: EnhancedEditor) {
editor.put(pref, putValue)
}
}
}
"remove the preference" in {
new CommitRemoveFixture {
def performTest(editor: EnhancedEditor) {
editor.remove(pref)
}
}
}
"put in a value passed in as a defined option" in {
new CommitPutIntoFixture {
def performTest(editor: EnhancedEditor) {
editor.put(pref, Some(putValue))
}
}
}
"remove the preference when given None" in {
new CommitRemoveFixture {
def performTest(editor: EnhancedEditor) {
editor.put(pref, None)
}
}
}
"put the preference if given as a value" in {
new CommitPutIntoFixture {
def performTest(editor: EnhancedEditor) {
val appliedValue = pref(putValue)
editor.put(appliedValue)
}
}
}
"not commit if aborted after a put" in {
new AbortFixture {
def performTest(editor: EnhancedEditor) {
editor.put(pref, putValue)
editor.abort()
}
}
}
"not commit if aborted before a put" in {
new AbortFixture {
def performTest(editor: EnhancedEditor) {
editor.abort()
editor.put(pref, Some(putValue))
}
}
}
}
"An EnhancedPreferences" when {
"working with a boolean preference" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Boolean] {
val pref = BooleanPreference(prefKey, true)
def verifyGetFrom() {
verify(mockPrefs).getBoolean(prefKey, true)
}
}
)
}
"working with a boolean preference (without a default)" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Boolean] {
val pref = TypedPreference[Boolean](prefKey)
def verifyGetFrom() {
verify(mockPrefs).getBoolean(prefKey, false)
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[Boolean] {
val pref = TypedPreference[Boolean](prefKey)
val value = true
def verifyPutInto() {verify(mockEditor).putBoolean(prefKey, value)}
}
}
}
"working with a float preference" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Float] {
val pref = FloatPreference(prefKey, 1.0f)
def verifyGetFrom() {
verify(mockPrefs).getFloat(prefKey, 1.0f)
}
}
)
}
"working with a float preference (without a default)" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Float] {
val pref = TypedPreference[Float](prefKey)
def verifyGetFrom() {
verify(mockPrefs).getFloat(prefKey, 0.0f)
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[Float] {
val pref = TypedPreference[Float](prefKey)
val value = 3.14f
def verifyPutInto() {verify(mockEditor).putFloat(prefKey, value)}
}
}
}
"working with a int preference" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Int] {
val pref = IntPreference(prefKey, 42)
def verifyGetFrom() {
verify(mockPrefs).getInt(prefKey, 42)
}
}
)
}
"working with a int preference (without a default)" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Int] {
val pref = TypedPreference[Int](prefKey)
def verifyGetFrom() {
verify(mockPrefs).getInt(prefKey, 0)
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[Int] {
val pref = TypedPreference[Int](prefKey)
val value = 42
def verifyPutInto() {verify(mockEditor).putInt(prefKey, value)}
}
}
}
"working with a long preference" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Long] {
val pref = LongPreference(prefKey, 42L)
def verifyGetFrom() {
verify(mockPrefs).getLong(prefKey, 42L)
}
}
)
}
"working with a long preference (without a default)" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Long] {
val pref = TypedPreference[Long](prefKey)
def verifyGetFrom() {
verify(mockPrefs).getLong(prefKey, 0L)
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[Long] {
val pref = TypedPreference[Long](prefKey)
val value = 42L
def verifyPutInto() {verify(mockEditor).putLong(prefKey, value)}
}
}
}
"working with a string preference" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[String] {
val pref = StringPreference(prefKey, "bar")
def verifyGetFrom() {
verify(mockPrefs).getString(prefKey, "bar")
}
}
)
}
"working with a string preference (without a default)" should {
behave like preferenceGetBehaviours(
new GetPreferenceFixture[String] {
val pref = TypedPreference[String](prefKey)
def verifyGetFrom() {
verify(mockPrefs).getString(prefKey, "")
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[String] {
val pref = TypedPreference[String](prefKey)
val value = "bar"
def verifyPutInto() {verify(mockEditor).putString(prefKey, "bar")}
}
}
}
"working with a complex preference type" should {
case class Complex(real: Float, imaginary: Float)
case class ComplexPreference(key: String)
extends TypedPreference[Complex] {
def getFrom(preferences: SharedPreferences) = {
Complex(
preferences.getFloat(key, 0.0f),
preferences.getFloat(key + ".imaginary", 0.0f))
}
def putInto(editor: Editor, value: Complex) {
editor.putFloat(key, value.real)
editor.putFloat(key + ".imaginary", value.imaginary)
}
}
behave like preferenceGetBehaviours(
new GetPreferenceFixture[Complex] {
val pref = ComplexPreference(prefKey)
def verifyGetFrom() {
verify(mockPrefs).getFloat(prefKey, 0.0f)
verify(mockPrefs).getFloat(prefKey + ".imaginary", 0.0f)
}
}
)
behave like preferencePutBehaviours {
new PutPreferenceFixture[Complex] {
val pref = ComplexPreference(prefKey)
val value = Complex(-1.0f, 3.0f)
def verifyPutInto() {
verify(mockEditor).putFloat(prefKey, -1.0f)
verify(mockEditor).putFloat(prefKey + ".imaginary", 3.0f)
}
}
}
"cannot use TypedReference apply method" in {
val ex = intercept[IllegalArgumentException] {
TypedPreference[Complex]("Foo")
}
ex.getMessage should include("supported")
}
}
"working with nothing in particular" should {
"commit, even in an empty block" in {
new EditorFixture {
prefs.withEditor {_ =>}
verifyCommit()
}
}
"clear and commit" in {
new EditorFixture {
prefs.withEditor {_.clear()}
verify(mockEditor).clear()
verifyCommit()
}
}
"not commit when abort is called" in {
new EditorFixture {
prefs.withEditor {_.abort()}
verifyNoCommit()
}
}
"implicit conversion works" in {
new Fixture {
import EnhancedPreferences._
mockPrefs.contains(BooleanPreference(prefKey))
}
}
"can register a listener" in {
new Fixture {
val captor = ArgumentCaptor.forClass(
classOf[SharedPreferences.OnSharedPreferenceChangeListener])
val token = prefs.onChange() { (_, _) => }
verify(mockPrefs).registerOnSharedPreferenceChangeListener(
captor.capture())
token should be theSameInstanceAs (captor.getValue)
}
}
"can unregister a listener" in {
new Fixture {
val token = prefs.onChange() { (_, _) => }
prefs.unregisterOnChange(token)
verify(mockPrefs).unregisterOnSharedPreferenceChangeListener(
token)
}
}
"listeners do filtering" in {
new Fixture {
val pref = TypedPreference[Int](prefKey)
val token = prefs.onChange(pref) { (tpref, value) =>
val OkKey = prefKey
tpref.key match {
case OkKey => // expected
case _ => fail("%s should not have been received".format(tpref))
}
}
token.onSharedPreferenceChanged(null, "FOOBAR")
token.onSharedPreferenceChanged(null, prefKey)
token.onSharedPreferenceChanged(null, "BARFOO")
}
}
}
}
} | sattvik/baitha | src/test/scala/com/sattvik/baitha/test/EnhancedPreferencesSpec.scala | Scala | apache-2.0 | 13,292 |
package com.eigenvektor.priorityqueue
/** A common supertrait for all heaps
*
* @type E the element type.
*/
trait Heap[E] {
/** Tells if the heap is empty */
val isEmpty:Boolean
/** The size of the heap */
val size:Int
/** Adds an element to this heap
*
* @param x The element to add.
* @return A copy of this heap with x added.
*/
def +(x:E):Heap[E]
/** Adds a series of elements to this.
*
* @param t The elements to add.
*/
def ++(t:Traversable[E]) = t.foldLeft(this)((x,y) => x + y)
/** Gets the minimum value in the heap */
val min:E
/** Removes the minimum element of the heap
*
* @return a pair of the removed element and the heap without the element.
*/
val removeMin:Pair[E, Heap[E]]
}
/** Companion object for heaps */
object Heap {
/** Creates a new, empty heap.
*
* Default implementation is skew binomial
*
* @param order the element ordering.
*/
def apply[E](implicit order:Ordering[E]):Heap[E] = SkewBinomialHeap(order);
/** Creates a heap populated from a traversable.
*
* @param x The traversable to populate with.
* @param order The ordering to use.
*/
def apply[E](x:Traversable[E])(implicit order:Ordering[E]):Heap[E] = {
x.foldLeft(Heap[E](order))(_ + _)
}
} | Vyzen/trout | src/main/scala/com/eigenvektor/priorityqueue/Heap.scala | Scala | gpl-3.0 | 1,328 |
package dotty.tools
package dotc
package reporting
import core.Contexts._
import util.{SourcePosition, NoSourcePosition}
import util.{SourceFile, NoSource}
import core.Decorators.PhaseListDecorator
import collection.mutable
import config.Settings.Setting
import config.Printers
import java.lang.System.currentTimeMillis
import typer.ErrorReporting.DiagnosticString
object Reporter {
private val ERROR = 2
private val WARNING = 1
private val INFO = 0
class Diagnostic(msgFn: => String, val pos: SourcePosition, val level: Int) extends Exception {
import DiagnosticString._
private var myMsg: String = null
private var myIsNonSensical: Boolean = false
/** The message to report */
def msg: String = {
if (myMsg == null) {
myMsg = msgFn
if (myMsg.contains(nonSensicalStartTag)) {
myIsNonSensical = true
// myMsg might be composed of several d"..." invocations -> nested nonsensical tags possible
myMsg = myMsg.replaceAllLiterally(nonSensicalStartTag, "").replaceAllLiterally(nonSensicalEndTag, "")
}
}
myMsg
}
/** Report in current reporter */
def report(implicit ctx: Context) = ctx.reporter.report(this)
def isNonSensical = { msg; myIsNonSensical }
def isSuppressed(implicit ctx: Context): Boolean = !ctx.settings.YshowSuppressedErrors.value && isNonSensical
override def toString = s"$getClass at $pos: $msg"
override def getMessage() = msg
def checkingStr: String = msgFn
}
class Error(msgFn: => String, pos: SourcePosition) extends Diagnostic(msgFn, pos, ERROR)
class Warning(msgFn: => String, pos: SourcePosition) extends Diagnostic(msgFn, pos, WARNING)
class Info(msgFn: => String, pos: SourcePosition) extends Diagnostic(msgFn, pos, INFO)
abstract class ConditionalWarning(msgFn: => String, pos: SourcePosition) extends Warning(msgFn, pos) {
def enablingOption(implicit ctx: Context): Setting[Boolean]
}
class FeatureWarning(msgFn: => String, pos: SourcePosition) extends ConditionalWarning(msgFn, pos) {
def enablingOption(implicit ctx: Context) = ctx.settings.feature
}
class UncheckedWarning(msgFn: => String, pos: SourcePosition) extends ConditionalWarning(msgFn, pos) {
def enablingOption(implicit ctx: Context) = ctx.settings.unchecked
}
class DeprecationWarning(msgFn: => String, pos: SourcePosition) extends ConditionalWarning(msgFn, pos) {
def enablingOption(implicit ctx: Context) = ctx.settings.deprecation
}
}
import Reporter._
trait Reporting { this: Context =>
/** For sending messages that are printed only if -verbose is set */
def inform(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
if (this.settings.verbose.value) echo(msg, pos)
def echo(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
reporter.report(new Info(msg, pos))
def deprecationWarning(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
reporter.report(new DeprecationWarning(msg, pos))
def uncheckedWarning(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
reporter.report(new UncheckedWarning(msg, pos))
def featureWarning(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
reporter.report(new FeatureWarning(msg, pos))
def warning(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
reporter.report(new Warning(msg, pos))
def error(msg: => String, pos: SourcePosition = NoSourcePosition): Unit = {
// println("*** ERROR: " + msg) // !!! DEBUG
reporter.report(new Error(msg, pos))
}
def restrictionError(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
error(s"Implementation restriction: $msg", pos)
def incompleteInputError(msg: String, pos: SourcePosition = NoSourcePosition)(implicit ctx: Context): Unit =
reporter.incomplete(new Error(msg, pos))(ctx)
/** Log msg if settings.log contains the current phase.
* See [[config.CompilerCommand#explainAdvanced]] for the exact meaning of
* "contains" here.
*/
def log(msg: => String): Unit =
if (this.settings.log.value.containsPhase(phase))
echo(s"[log ${ctx.phasesStack.reverse.mkString(" -> ")}] $msg")
def debuglog(msg: => String): Unit =
if (ctx.debug) log(msg)
def informTime(msg: => String, start: Long): Unit = {
def elapsed = s" in ${currentTimeMillis - start}ms"
informProgress(msg + elapsed)
}
def informProgress(msg: => String) =
inform("[" + msg + "]")
def trace[T](msg: => String)(value: T) = {
log(msg + " " + value)
value
}
def debugwarn(msg: => String, pos: SourcePosition = NoSourcePosition): Unit =
if (this.settings.debug.value) warning(msg, pos)
def debugTraceIndented[T](question: => String, printer: Printers.Printer = Printers.default, show: Boolean = false)(op: => T): T =
conditionalTraceIndented(this.settings.debugTrace.value, question, printer, show)(op)
def conditionalTraceIndented[T](cond: Boolean, question: => String, printer: Printers.Printer = Printers.default, show: Boolean = false)(op: => T): T =
if (cond) traceIndented(question, printer, show)(op)
else op
def traceIndented[T](question: => String, printer: Printers.Printer = Printers.default, show: Boolean = false)(op: => T): T = {
def resStr(res: Any): String = res match {
case res: printing.Showable if show => res.show
case _ => String.valueOf(res)
}
if (printer eq config.Printers.noPrinter) op
else traceIndented[T](s"==> $question?", (res: Any) => s"<== $question = ${resStr(res)}")(op)
}
def traceIndented[T](leading: => String, trailing: Any => String)(op: => T): T = {
var finalized = false
var logctx = this
while (logctx.reporter.isInstanceOf[StoreReporter]) logctx = logctx.outer
def finalize(result: Any, note: String) =
if (!finalized) {
base.indent -= 1
logctx.log(s"${base.indentTab * base.indent}${trailing(result)}$note")
finalized = true
}
try {
logctx.log(s"${base.indentTab * base.indent}$leading")
base.indent += 1
val res = op
finalize(res, "")
res
} catch {
case ex: Throwable =>
finalize("<missing>", s" (with exception $ex)")
throw ex
}
}
def errorsReported: Boolean = outersIterator exists (_.reporter.hasErrors)
}
/**
* This interface provides methods to issue information, warning and
* error messages.
*/
abstract class Reporter {
/** Report a diagnostic */
protected def doReport(d: Diagnostic)(implicit ctx: Context): Unit
/** Whether very long lines can be truncated. This exists so important
* debugging information (like printing the classpath) is not rendered
* invisible due to the max message length.
*/
private var _truncationOK: Boolean = true
def truncationOK = _truncationOK
def withoutTruncating[T](body: => T): T = {
val saved = _truncationOK
_truncationOK = false
try body
finally _truncationOK = saved
}
type ErrorHandler = Diagnostic => Context => Unit
private var incompleteHandler: ErrorHandler = d => c => report(d)(c)
def withIncompleteHandler[T](handler: ErrorHandler)(op: => T): T = {
val saved = incompleteHandler
incompleteHandler = handler
try op
finally incompleteHandler = saved
}
var errorCount = 0
var warningCount = 0
def hasErrors = errorCount > 0
def hasWarnings = warningCount > 0
val unreportedWarnings = new mutable.HashMap[String, Int] {
override def default(key: String) = 0
}
def report(d: Diagnostic)(implicit ctx: Context): Unit = if (!isHidden(d)) {
doReport(d)
d match {
case d: ConditionalWarning if !d.enablingOption.value => unreportedWarnings(d.enablingOption.name) += 1
case d: Warning => warningCount += 1
case d: Error => errorCount += 1
case d: Info => // nothing to do here
// match error if d is something else
}
}
def incomplete(d: Diagnostic)(implicit ctx: Context): Unit =
incompleteHandler(d)(ctx)
/** Print a summary */
def printSummary(implicit ctx: Context): Unit = {
if (warningCount > 0) ctx.echo(countString(warningCount, "warning") + " found")
if (errorCount > 0) ctx.echo(countString(errorCount, "error") + " found")
for ((settingName, count) <- unreportedWarnings)
ctx.echo(s"there were $count ${settingName.tail} warning(s); re-run with $settingName for details")
}
/** Returns a string meaning "n elements". */
private def countString(n: Int, elements: String): String = n match {
case 0 => "no " + elements + "s"
case 1 => "one " + elements
case 2 => "two " + elements + "s"
case 3 => "three " + elements + "s"
case 4 => "four " + elements + "s"
case _ => n + " " + elements + "s"
}
/** Should this diagnostic not be reported at all? */
def isHidden(d: Diagnostic)(implicit ctx: Context): Boolean = false
/** Does this reporter contain not yet reported errors or warnings? */
def hasPending: Boolean = false
/** Issue all error messages in this reporter to next outer one, or make sure they are written. */
def flush()(implicit ctx: Context): Unit = {}
}
| AlexSikia/dotty | src/dotty/tools/dotc/reporting/Reporter.scala | Scala | bsd-3-clause | 9,204 |
package backend.data.mongodb.service
import backend.data.service.ArchiveDataService
import backend.data.service.PostDataService
/**
* The ArchiveDataService using MongoDB
*
* @author Stefan Bleibinhaus
*
*/
object ArchiveDataServiceMongo extends ArchiveDataService {
override protected def postDataService: PostDataService = PostDataServiceMongo
updateArchive()
} | ExNexu/scablo | app/backend/data/mongodb/service/ArchiveDataServiceMongo.scala | Scala | bsd-2-clause | 380 |
/* Title: Pure/System/options.scala
Author: Makarius
System options with external string representation.
*/
package isabelle
import java.util.Calendar
object Options
{
type Spec = (String, Option[String])
val empty: Options = new Options()
/* representation */
sealed abstract class Type
{
def print: String = Word.lowercase(toString)
}
case object Bool extends Type
case object Int extends Type
case object Real extends Type
case object String extends Type
case object Unknown extends Type
case class Opt(
public: Boolean,
pos: Position.T,
name: String,
typ: Type,
value: String,
default_value: String,
description: String,
section: String)
{
private def print(default: Boolean): String =
{
val x = if (default) default_value else value
"option " + name + " : " + typ.print + " = " +
(if (typ == Options.String) quote(x) else x) +
(if (description == "") "" else "\\n -- " + quote(description))
}
def print: String = print(false)
def print_default: String = print(true)
def title(strip: String = ""): String =
{
val words = Word.explode('_', name)
val words1 =
words match {
case word :: rest if word == strip => rest
case _ => words
}
Word.implode(words1.map(Word.perhaps_capitalize(_)))
}
def unknown: Boolean = typ == Unknown
}
/* parsing */
private val SECTION = "section"
private val PUBLIC = "public"
private val OPTION = "option"
private val OPTIONS = Path.explode("etc/options")
private val PREFS_DIR = Path.explode("$ISABELLE_HOME_USER/etc")
private val PREFS = PREFS_DIR + Path.basic("preferences")
lazy val options_syntax =
Outer_Syntax.init() + ":" + "=" + "--" +
(SECTION, Keyword.DOCUMENT_HEADING) + (PUBLIC, Keyword.THY_DECL) + (OPTION, Keyword.THY_DECL)
lazy val prefs_syntax = Outer_Syntax.init() + "="
object Parser extends Parse.Parser
{
val option_name = atom("option name", _.is_xname)
val option_type = atom("option type", _.is_ident)
val option_value =
opt(token("-", tok => tok.is_sym_ident && tok.content == "-")) ~ atom("nat", _.is_nat) ^^
{ case s ~ n => if (s.isDefined) "-" + n else n } |
atom("option value", tok => tok.is_name || tok.is_float)
val option_entry: Parser[Options => Options] =
{
command(SECTION) ~! text ^^
{ case _ ~ a => (options: Options) => options.set_section(a) } |
opt(command(PUBLIC)) ~ command(OPTION) ~! (option_name ~ $$$(":") ~ option_type ~
$$$("=") ~ option_value ~ ($$$("--") ~! text ^^ { case _ ~ x => x } | success(""))) ^^
{ case a ~ pos ~ (b ~ _ ~ c ~ _ ~ d ~ e) =>
(options: Options) => options.declare(a.isDefined, pos, b, c, d, e) }
}
val prefs_entry: Parser[Options => Options] =
{
option_name ~ ($$$("=") ~! option_value) ^^
{ case a ~ (_ ~ b) => (options: Options) => options.add_permissive(a, b) }
}
def parse_file(syntax: Outer_Syntax, parser: Parser[Options => Options],
options: Options, file: Path): Options =
{
val toks = Token.explode(syntax.keywords, File.read(file))
val ops =
parse_all(rep(parser), Token.reader(toks, file.implode)) match {
case Success(result, _) => result
case bad => error(bad.toString)
}
try { (options.set_section("") /: ops) { case (opts, op) => op(opts) } }
catch { case ERROR(msg) => error(msg + Position.here(file.position)) }
}
}
def init_defaults(): Options =
{
var options = empty
for {
dir <- Isabelle_System.components()
file = dir + OPTIONS if file.is_file
} { options = Parser.parse_file(options_syntax, Parser.option_entry, options, file) }
options
}
def init(): Options = init_defaults().load_prefs()
/* encode */
val encode: XML.Encode.T[Options] = (options => options.encode)
/* command line entry point */
def main(args: Array[String])
{
Command_Line.tool0 {
args.toList match {
case get_option :: export_file :: more_options =>
val options = (Options.init() /: more_options)(_ + _)
if (get_option != "")
Console.println(options.check_name(get_option).value)
if (export_file != "")
File.write(Path.explode(export_file), YXML.string_of_body(options.encode))
if (get_option == "" && export_file == "")
Console.println(options.print)
case _ => error("Bad arguments:\\n" + cat_lines(args))
}
}
}
}
final class Options private(
val options: Map[String, Options.Opt] = Map.empty,
val section: String = "")
{
override def toString: String = options.iterator.mkString("Options (", ",", ")")
private def print_opt(opt: Options.Opt): String =
if (opt.public) "public " + opt.print else opt.print
def print: String = cat_lines(options.toList.sortBy(_._1).map(p => print_opt(p._2)))
def description(name: String): String = check_name(name).description
/* check */
def check_name(name: String): Options.Opt =
options.get(name) match {
case Some(opt) if !opt.unknown => opt
case _ => error("Unknown option " + quote(name))
}
private def check_type(name: String, typ: Options.Type): Options.Opt =
{
val opt = check_name(name)
if (opt.typ == typ) opt
else error("Ill-typed option " + quote(name) + " : " + opt.typ.print + " vs. " + typ.print)
}
/* basic operations */
private def put[A](name: String, typ: Options.Type, value: String): Options =
{
val opt = check_type(name, typ)
new Options(options + (name -> opt.copy(value = value)), section)
}
private def get[A](name: String, typ: Options.Type, parse: String => Option[A]): A =
{
val opt = check_type(name, typ)
parse(opt.value) match {
case Some(x) => x
case None =>
error("Malformed value for option " + quote(name) +
" : " + typ.print + " =\\n" + quote(opt.value))
}
}
/* internal lookup and update */
class Bool_Access
{
def apply(name: String): Boolean = get(name, Options.Bool, Properties.Value.Boolean.unapply)
def update(name: String, x: Boolean): Options =
put(name, Options.Bool, Properties.Value.Boolean(x))
}
val bool = new Bool_Access
class Int_Access
{
def apply(name: String): Int = get(name, Options.Int, Properties.Value.Int.unapply)
def update(name: String, x: Int): Options =
put(name, Options.Int, Properties.Value.Int(x))
}
val int = new Int_Access
class Real_Access
{
def apply(name: String): Double = get(name, Options.Real, Properties.Value.Double.unapply)
def update(name: String, x: Double): Options =
put(name, Options.Real, Properties.Value.Double(x))
}
val real = new Real_Access
class String_Access
{
def apply(name: String): String = get(name, Options.String, s => Some(s))
def update(name: String, x: String): Options = put(name, Options.String, x)
}
val string = new String_Access
class Seconds_Access
{
def apply(name: String): Time = Time.seconds(real(name))
}
val seconds = new Seconds_Access
/* external updates */
private def check_value(name: String): Options =
{
val opt = check_name(name)
opt.typ match {
case Options.Bool => bool(name); this
case Options.Int => int(name); this
case Options.Real => real(name); this
case Options.String => string(name); this
case Options.Unknown => this
}
}
def declare(
public: Boolean,
pos: Position.T,
name: String,
typ_name: String,
value: String,
description: String): Options =
{
options.get(name) match {
case Some(other) =>
error("Duplicate declaration of option " + quote(name) + Position.here(pos) +
Position.here(other.pos))
case None =>
val typ =
typ_name match {
case "bool" => Options.Bool
case "int" => Options.Int
case "real" => Options.Real
case "string" => Options.String
case _ =>
error("Unknown type for option " + quote(name) + " : " + quote(typ_name) +
Position.here(pos))
}
val opt = Options.Opt(public, pos, name, typ, value, value, description, section)
(new Options(options + (name -> opt), section)).check_value(name)
}
}
def add_permissive(name: String, value: String): Options =
{
if (options.isDefinedAt(name)) this + (name, value)
else {
val opt = Options.Opt(false, Position.none, name, Options.Unknown, value, value, "", "")
new Options(options + (name -> opt), section)
}
}
def + (name: String, value: String): Options =
{
val opt = check_name(name)
(new Options(options + (name -> opt.copy(value = value)), section)).check_value(name)
}
def + (name: String, opt_value: Option[String]): Options =
{
val opt = check_name(name)
opt_value match {
case Some(value) => this + (name, value)
case None if opt.typ == Options.Bool => this + (name, "true")
case None => error("Missing value for option " + quote(name) + " : " + opt.typ.print)
}
}
def + (str: String): Options =
{
str.indexOf('=') match {
case -1 => this + (str, None)
case i => this + (str.substring(0, i), str.substring(i + 1))
}
}
def ++ (specs: List[Options.Spec]): Options =
(this /: specs)({ case (x, (y, z)) => x + (y, z) })
/* sections */
def set_section(new_section: String): Options =
new Options(options, new_section)
def sections: List[(String, List[Options.Opt])] =
options.groupBy(_._2.section).toList.map({ case (a, opts) => (a, opts.toList.map(_._2)) })
/* encode */
def encode: XML.Body =
{
val opts =
for ((_, opt) <- options.toList; if !opt.unknown)
yield (opt.pos, (opt.name, (opt.typ.print, opt.value)))
import XML.Encode.{string => string_, _}
list(pair(properties, pair(string_, pair(string_, string_))))(opts)
}
/* user preferences */
def load_prefs(): Options =
if (Options.PREFS.is_file)
Options.Parser.parse_file(
Options.prefs_syntax, Options.Parser.prefs_entry, this, Options.PREFS)
else this
def save_prefs()
{
val defaults = Options.init_defaults()
val changed =
(for {
(name, opt2) <- options.iterator
opt1 = defaults.options.get(name)
if (opt1.isEmpty || opt1.get.value != opt2.value)
} yield (name, opt2.value, if (opt1.isEmpty) " (* unknown *)" else "")).toList
val prefs =
changed.sortBy(_._1)
.map({ case (x, y, z) => x + " = " + Outer_Syntax.quote_string(y) + z + "\\n" }).mkString
Isabelle_System.mkdirs(Options.PREFS_DIR)
File.write_backup(Options.PREFS,
"(* generated by Isabelle " + Calendar.getInstance.getTime + " *)\\n\\n" + prefs)
}
}
class Options_Variable
{
private var options = Options.empty
def value: Options = synchronized { options }
def update(new_options: Options): Unit = synchronized { options = new_options }
def + (name: String, x: String): Unit = synchronized { options = options + (name, x) }
class Bool_Access
{
def apply(name: String): Boolean = synchronized { options.bool(name) }
def update(name: String, x: Boolean): Unit =
synchronized { options = options.bool.update(name, x) }
}
val bool = new Bool_Access
class Int_Access
{
def apply(name: String): Int = synchronized { options.int(name) }
def update(name: String, x: Int): Unit =
synchronized { options = options.int.update(name, x) }
}
val int = new Int_Access
class Real_Access
{
def apply(name: String): Double = synchronized { options.real(name) }
def update(name: String, x: Double): Unit =
synchronized { options = options.real.update(name, x) }
}
val real = new Real_Access
class String_Access
{
def apply(name: String): String = synchronized { options.string(name) }
def update(name: String, x: String): Unit =
synchronized { options = options.string.update(name, x) }
}
val string = new String_Access
class Seconds_Access
{
def apply(name: String): Time = synchronized { options.seconds(name) }
}
val seconds = new Seconds_Access
}
| MerelyAPseudonym/isabelle | src/Pure/System/options.scala | Scala | bsd-3-clause | 12,398 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.spark.sql
import scala.collection.Map
import org.apache.commons.logging.Log
import org.apache.commons.logging.LogFactory
import org.apache.spark.Partition
import org.apache.spark.SparkContext
import org.apache.spark.TaskContext
import org.apache.spark.sql.Row
import org.elasticsearch.hadoop.cfg.Settings
import org.elasticsearch.hadoop.rest.InitializationUtils
import org.elasticsearch.hadoop.rest.PartitionDefinition
import org.elasticsearch.spark.rdd.AbstractEsRDD
import org.elasticsearch.spark.rdd.AbstractEsRDDIterator
import org.elasticsearch.spark.rdd.EsPartition
import scala.annotation.meta.param
// while we could have just wrapped the ScalaEsRDD and unpack the top-level data into a Row the issue is the underlying Maps are StructTypes
// and as such need to be mapped as Row resulting in either nested wrapping or using a ValueReader and which point wrapping becomes unyielding since the class signatures clash
private[spark] class ScalaEsRowRDD(
@(transient @param) sc: SparkContext,
params: Map[String, String] = Map.empty,
schema: SchemaUtils.Schema)
extends AbstractEsRDD[Row](sc, params) {
override def compute(split: Partition, context: TaskContext): ScalaEsRowRDDIterator = {
new ScalaEsRowRDDIterator(context, split.asInstanceOf[EsPartition].esPartition, schema)
}
}
private[spark] class ScalaEsRowRDDIterator(
context: TaskContext,
partition: PartitionDefinition,
schema: SchemaUtils.Schema)
extends AbstractEsRDDIterator[Row](context, partition) {
override def getLogger() = LogFactory.getLog(classOf[ScalaEsRowRDD])
override def initReader(settings: Settings, log: Log) = {
InitializationUtils.setValueReaderIfNotSet(settings, classOf[ScalaRowValueReader], log)
// parse the structure and save the order (requested by Spark) for each Row (root and nested)
// since the data returned from Elastic is likely to not be in the same order
SchemaUtils.setRowInfo(settings, schema.struct)
}
override def createValue(value: Array[Object]): Row = {
// drop the ID
value(1).asInstanceOf[ScalaEsRow]
}
}
| takezoe/elasticsearch-hadoop | spark/sql-20/src/main/scala/org/elasticsearch/spark/sql/ScalaEsRowRDD.scala | Scala | apache-2.0 | 2,899 |
package io.buoyant.interpreter.fs
import com.fasterxml.jackson.annotation.JsonIgnore
import com.twitter.finagle.Dtab
import com.twitter.finagle.Stack.Params
import com.twitter.finagle.naming.NameInterpreter
import com.twitter.io.Buf
import com.twitter.util.Activity
import io.buoyant.config.types.File
import io.buoyant.namer.{Param, ConfiguredDtabNamer, InterpreterConfig}
import io.buoyant.namer.fs.Watcher
case class FsInterpreterConfig(dtabFile: File) extends InterpreterConfig {
@JsonIgnore
private[this] val path = dtabFile.path
@JsonIgnore
private[this] def dtab: Activity[Dtab] =
Watcher(path.getParent).children.flatMap { children =>
children.get(path.getFileName.toString) match {
case Some(file: Watcher.File.Reg) => file.data
case _ => Activity.pending
}
}.map {
case Buf.Utf8(dtab) =>
Dtab.read(dtab)
}
@JsonIgnore
override def newInterpreter(params: Params): NameInterpreter = {
val Param.Namers(namers) = params[Param.Namers]
ConfiguredDtabNamer(dtab, namers)
}
}
| denverwilliams/linkerd | interpreter/fs/src/main/scala/io/buoyant/interpreter/fs/FsInterpreterConfig.scala | Scala | apache-2.0 | 1,060 |
package services
import org.joda.time.DateTime
import scala.concurrent.Future
import play.api.libs.json._
/**
* This is the interface of the Toggl service which is supposed to get the list of projects in a workspace.
*/
trait TogglService {
/**
* Get a list of the project names in a Toggl workspace
* @param ApiToken
* @return a Future list of project names
*/
def getTogglProjects(ApiToken: String): Future[List[String]]
def getTogglWorkspace(ApiToken: String): Future[List[String]]
}
/**
* Companion object including case class model for Toggl projects, and Reads/Writes to convert from/to json format.
*/
object TogglService {
/**
* Model case class for Toggl Project
*
* @param id Project ID
* @param wid Workspace ID, where the project belongs to
* @param cid Client ID
* @param name Name of the Toggl project in a specific workspace
* @param billable Whether the project is billable or not
* @param is_private Whether the project is accessible for only project users or for all workspace users
* @param active Whether the project is archived or not
* @param template Whether the project can be used as a template
* @param at Indicates the time task was last updated
* @param created_at Timestamp indicating when the project was created
* @param color ID of the color selected for the project
* @param auto_estimates Whether the estimated hours are automatically calculated based on task estimations or
* manually fixed based on the value of 'estimated_hours'
* @param actual_hours Hours that has been spent on the project
* @param hex_color Project tag color
**/
case class Project(id: Long, wid: Long, cid: Long, name: String,
billable: Boolean, is_private: Boolean, active: Boolean,
template: Boolean, at: DateTime, created_at: DateTime,
color: String, auto_estimates: Boolean, actual_hours: Int, hex_color: String)
case class Workspace(id: Long,
name: String,
premium: Boolean,
admin: Boolean,
default_hourly_rate: Int,
default_currency: String,
only_admins_may_create_projects: Boolean,
only_admins_see_billable_rates: Boolean,
rounding: Int,
rounding_minutes: Int,
at: DateTime,
logo_url: String)
implicit val dateReads = Reads.jodaDateReads("yyyy-MM-dd'T'HH:mm:ssZZ")
implicit val dateWrites = Writes.jodaDateWrites("yyyy-MM-dd'T'HH:mm:ss")
implicit val projectFormat: Format[Project] = Json.format[Project]
implicit val workspaceFormat: Format[Workspace] = Json.format[Workspace]
}
| dcharoulis/teahub | app/services/TogglService.scala | Scala | apache-2.0 | 2,959 |
package org.storrent
import akka.actor.{ Actor, ActorRef, Props, ActorSystem }
object Storrent {
val system = ActorSystem("storrent")
def main(args: Array[String]) {
if(args.isEmpty) {
println("Usage: Storrent [torrent file] [torrent file] ...")
} else {
args.foreach { f => system.actorOf(Props(new Torrent(f)), s"Torrent${f}") }
}
}
}
| bowlofstew/storrent | src/main/scala/Storrent.scala | Scala | apache-2.0 | 369 |
package mesosphere.marathon.core.launcher
import mesosphere.marathon.core.launcher.impl.TaskLabels
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.Task.LocalVolume
import mesosphere.marathon.tasks.ResourceUtil
import org.apache.mesos.{ Protos => MesosProtos }
/**
* An operation which relates to a task and is send to Mesos for execution in an `acceptOffers` API call.
*/
sealed trait TaskOp {
/** The ID of the affected task. */
def taskId: Task.Id
/** The MarathonTask state before this operation has been applied. */
def oldTask: Option[Task]
/**
* The MarathonTask state after this operation has been applied.
* `None` means that the associated task should be expunged.
*/
def maybeNewTask: Option[Task]
/** How would the offer change when Mesos executes this op? */
def applyToOffer(offer: MesosProtos.Offer): MesosProtos.Offer
/** To which Offer.Operations does this task op relate? */
def offerOperations: Iterable[org.apache.mesos.Protos.Offer.Operation]
}
object TaskOp {
/** Launch a task on the offer. */
case class Launch(
taskInfo: MesosProtos.TaskInfo,
newTask: Task,
oldTask: Option[Task] = None,
offerOperations: Iterable[MesosProtos.Offer.Operation]) extends TaskOp {
override def taskId: Task.Id = newTask.taskId
override def maybeNewTask: Option[Task] = Some(newTask)
def applyToOffer(offer: MesosProtos.Offer): MesosProtos.Offer = {
import scala.collection.JavaConverters._
ResourceUtil.consumeResourcesFromOffer(offer, taskInfo.getResourcesList.asScala)
}
}
case class ReserveAndCreateVolumes(
newTask: Task,
resources: Iterable[MesosProtos.Resource],
localVolumes: Iterable[LocalVolume],
oldTask: Option[Task] = None,
offerOperations: Iterable[MesosProtos.Offer.Operation]) extends TaskOp {
override def taskId: Task.Id = newTask.taskId
override def maybeNewTask: Option[Task] = Some(newTask)
override def applyToOffer(offer: MesosProtos.Offer): MesosProtos.Offer =
ResourceUtil.consumeResourcesFromOffer(offer, resources)
}
case class UnreserveAndDestroyVolumes(
taskId: Task.Id,
maybeNewTask: Option[Task] = None,
resources: Iterable[MesosProtos.Resource],
oldTask: Option[Task] = None) extends TaskOp {
override lazy val offerOperations: Iterable[MesosProtos.Offer.Operation] = {
val (withDisk, withoutDisk) = resources.partition(_.hasDisk)
val reservationsForDisks = withDisk.map(_.toBuilder.clearDisk().build())
import scala.collection.JavaConverters._
val maybeDestroyVolumes: Option[MesosProtos.Offer.Operation] =
if (withDisk.nonEmpty) {
val destroyOp =
MesosProtos.Offer.Operation.Destroy.newBuilder()
.addAllVolumes(withDisk.asJava)
val op =
MesosProtos.Offer.Operation.newBuilder()
.setType(MesosProtos.Offer.Operation.Type.DESTROY)
.setDestroy(destroyOp)
.build()
Some(op)
}
else None
val maybeUnreserve: Option[MesosProtos.Offer.Operation] =
if (withDisk.nonEmpty || reservationsForDisks.nonEmpty) {
val unreserveOp = MesosProtos.Offer.Operation.Unreserve.newBuilder()
.addAllResources(withoutDisk.asJava)
.addAllResources(reservationsForDisks.asJava)
.build()
val op =
MesosProtos.Offer.Operation.newBuilder()
.setType(MesosProtos.Offer.Operation.Type.UNRESERVE)
.setUnreserve(unreserveOp)
.build()
Some(op)
}
else None
Iterable(maybeDestroyVolumes, maybeUnreserve).flatten
}
override def applyToOffer(offer: MesosProtos.Offer): MesosProtos.Offer =
ResourceUtil.consumeResourcesFromOffer(offer, resources)
}
}
| pgkelley4/marathon | src/main/scala/mesosphere/marathon/core/launcher/TaskOp.scala | Scala | apache-2.0 | 3,900 |
package com.chrisomeara.pillar
import org.scalatest.{FunSpec, BeforeAndAfter}
import org.scalatest.matchers.ShouldMatchers
import java.io.{ByteArrayInputStream, FileInputStream}
import java.util.Date
class ParserSpec extends FunSpec with BeforeAndAfter with ShouldMatchers {
describe("#parse") {
describe("1370028262000_creates_events_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028262000_creates_events_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[IrreversibleMigration])
}
it("assigns authoredAt") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).authoredAt should equal(new Date(1370023262000L))
}
it("assigns description") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).description should equal("creates events table")
}
it("assigns up") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).up should contain(
"""CREATE TABLE events (
| batch_id text,
| occurred_at uuid,
| event_type text,
| payload blob,
| PRIMARY KEY (batch_id, occurred_at, event_type)
|)""".stripMargin)
}
}
describe("1469630066000_creates_users_groups_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1469630066000_creates_users_groups_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigration])
}
it("assigns authoredAt") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).authoredAt should equal(new Date(1469630066000L))
}
it("assigns description") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).description should equal("creates users and groups tables")
}
it("assigns two up stages") {
val resource = new FileInputStream(migrationPath)
val migration = Parser().parse(resource)
migration.up should contain(
"""CREATE TABLE groups (
| id uuid,
| name text,
| PRIMARY KEY (id)
|)""".stripMargin)
migration.up should contain(
"""CREATE TABLE users (
| id uuid,
| group_id uuid,
| username text,
| password text,
| PRIMARY KEY (id)
|)""".stripMargin)
}
it("assigns two down stages") {
val resource = new FileInputStream(migrationPath)
val migration = Parser().parse(resource).asInstanceOf[ReversibleMigration]
migration.down should contain("""DROP TABLE users""".stripMargin)
migration.down should contain("""DROP TABLE groups""".stripMargin)
}
}
describe("1370028263000_creates_views_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028263000_creates_views_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigration])
}
it("assigns down") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).asInstanceOf[ReversibleMigration].down should contain("DROP TABLE views")
}
}
describe("1370028264000_adds_user_agent_to_views_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028264000_adds_user_agent_to_views_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigrationWithNoOpDown])
}
}
describe("a migration missing an up stanza") {
val migrationContent =
"""-- description: creates events table
|-- authoredAt: 1370023262""".stripMargin
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] {
Parser().parse(resource)
}
thrown.errors("up") should equal("must be present")
}
}
describe("a migration missing a description stanza") {
val migrationContent = "-- authoredAt: 1370023262"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] {
Parser().parse(resource)
}
thrown.errors("description") should equal("must be present")
}
}
describe("a migration missing an authoredAt stanza") {
val migrationContent = "-- description: creates events table"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] {
Parser().parse(resource)
}
thrown.errors("authoredAt") should equal("must be present")
}
}
describe("a migration with a bogus authored at stanza") {
val migrationContent = "-- authoredAt: a long, long time ago"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] {
Parser().parse(resource)
}
thrown.errors("authoredAt") should equal("must be a number greater than zero")
}
}
}
}
| comeara/pillar | src/test/scala/com/chrisomeara/pillar/ParserSpec.scala | Scala | mit | 5,892 |
package com.evojam.nlp.util
import java.io.File
trait ObjectLoader {
def load[T](file: File, gzipped: Boolean = true): Option[T]
def loadResource[T](name: String, gzipped: Boolean = true): Option[T]
}
| evojam/scala-common | src/main/scala/com/evojam/nlp/util/ObjectLoader.scala | Scala | apache-2.0 | 207 |
/*
* Copyright 2013 zhongl
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.zhongl.test
/**
* @author <a href="mailto:[email protected]">zhongl<a>
*/
final class F extends I {
def m() {}
}
| chosen0ne/HouseMD | src/test/scala/com/github/zhongl/test/F.scala | Scala | apache-2.0 | 738 |
package com.ebay.neutrino
import com.ebay.neutrino.balancer.CNameResolver
import com.ebay.neutrino.config._
import org.scalatest.{FlatSpec, Matchers}
class PoolResolverTest extends FlatSpec with Matchers with NeutrinoTestSupport {
behavior of "Creating Pool Resolvers"
implicit val core = new NeutrinoCore(NeutrinoSettings.Empty)
def createAddresses(): Seq[CanonicalAddress] = {
Seq(CanonicalAddress("www.ebay.com", 80))
}
it should "create static resolvers using factor" in {
// None
PoolResolver("") should be (NoResolver)
PoolResolver("none") should be (NoResolver)
PoolResolver("NoNe") should be (NoResolver)
// Default; static
PoolResolver("default") should be (DefaultResolver)
PoolResolver("defaulT") should be (DefaultResolver)
// CNAMEs should be unique
val cname1 = PoolResolver("cname")
val cname2 = PoolResolver("cname")
cname1 shouldBe a [CNameResolver]
cname2 shouldBe a [CNameResolver]
cname1 should not be (cname2)
// Classname: Test valid
val classname = classOf[TestPoolResolver].getName
PoolResolver(classname) shouldBe a [TestPoolResolver]
// Classname: Test invalid classname
// This can be either ClassNotFound or NoDefFound on local for some reason
try {
PoolResolver(classname.toLowerCase)
}
catch {
case ex: ClassNotFoundException =>
case ex: NoClassDefFoundError =>
case th: Throwable => fail(th)
}
// Classname: Should catch anythign else
an [ClassNotFoundException] should be thrownBy PoolResolver("asdfjklab")
}
it should "resolve pools using 'none' resolver" in {
val resolver = PoolResolver("none")
val pools = neutrinoPools()
val passpool = VirtualPool(id="passpool1")
val testpool = VirtualPool(id="testpool1")
// Initial configuration; all our pools; no resolution
pools.update(passpool, testpool)
resolver.resolve(pools, request("/")) should be (None)
// Same with combinations
pools.update(passpool)
resolver.resolve(pools, request("/")) should be (None)
}
it should "resolve pools using 'default' resolver" in {
val resolver = PoolResolver("default")
val pools = neutrinoPools()
val passpool = VirtualPool(id="passpool1")
val testpool = VirtualPool(id="default")
// Initial configuration; includes testpool
pools.update(passpool, testpool)
resolver.resolve(pools, request("/")) shouldBe defined
resolver.resolve(pools, request("/")).get.settings should be (testpool)
// Clear the valid pool from the config
pools.update(passpool)
resolver.resolve(pools, request("/")) should be (None)
}
it should "resolve pools using 'classname' resolver" in {
val resolver = PoolResolver(classOf[TestPoolResolver].getName)
val pools = neutrinoPools()
val passpool = VirtualPool(id="passpool1")
val testpool = VirtualPool(id="testpool1")
// Initial configuration; includes testpool
pools.update(passpool, testpool)
resolver.resolve(pools, request("/")) shouldBe defined
resolver.resolve(pools, request("/")).get.settings should be (testpool)
// Clear the valid pool from the config
pools.update(passpool)
resolver.resolve(pools, request("/")) should be (None)
}
}
class NamedResolverTest extends FlatSpec with Matchers with NeutrinoTestSupport {
behavior of "Named pool-resolver"
implicit val core = new NeutrinoCore(NeutrinoSettings.Empty)
it should "resolve pools using a configured named resolver" in {
val resolver = new NamedResolver("test-name")
val pools = neutrinoPools()
val passpool = VirtualPool(id="passpool1")
val testpool = VirtualPool(id="test-name")
// Initial configuration; includes testpool
pools.update(passpool, testpool)
resolver.resolve(pools, request("/")) shouldBe defined
resolver.resolve(pools, request("/")).get.settings should be (testpool)
// Clear the valid pool from the config
pools.update(passpool)
resolver.resolve(pools, request("/")) should be (None)
}
it should "resolve pool using static get()" in {
val pools = neutrinoPools()
val poolA = VirtualPool(id="some-pool_A")
val poolB = VirtualPool(id="poolb")
// Initial configuration; includes testpool
pools.update(poolA, poolB)
// Check for no/match, should respect casing
NamedResolver.get(pools, "poola") should be (None)
NamedResolver.get(pools, "poolb").get.settings should be (poolB)
NamedResolver.get(pools, "poolB") should be (None)
}
}
/**
* Test resolver; will match any pool with an ID that starts with "test..."
*/
class TestPoolResolver extends PoolResolver {
/**
* Attempt to resolve a pool using this request.
*/
override def resolve(pools: NeutrinoPools, request: NeutrinoRequest): Option[NeutrinoPool] =
pools() find (_.settings.id.startsWith("test"))
} | eBay/Neutrino | src/test/scala/com/ebay/neutrino/PoolResolverTest.scala | Scala | apache-2.0 | 4,920 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table}
import org.apache.spark.sql.connector.metric.CustomMetric
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, ReadLimit, SparkDataStream}
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsTruncate}
import org.apache.spark.sql.connector.write.streaming.StreamingWrite
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.connector.SupportsStreamingUpdateAsAppend
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
val resolvedCheckpointRoot: String,
val analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
/**
* Tracks the latest offsets for each input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var latestOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Map[SparkDataStream, ReadLimit] = Map.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/** All checkpoint file operations should be performed through `CheckpointFileManager`. */
private val fileManager = CheckpointFileManager.create(new Path(resolvedCheckpointRoot),
sparkSession.sessionState.newHadoopConf)
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
val startTimestamp = triggerClock.getTimeMillis()
postEvent(new QueryStartedEvent(id, runId, name, formatTimestamp(startTimestamp)))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
sparkSessionForStream.withActive {
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations
// to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
fileManager.delete(checkpointPath)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { case (source, _) =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Interrupts the query execution thread and awaits its termination until until it exceeds the
* timeout. The timeout can be set on "spark.sql.streaming.stopTimeout".
*
* @throws TimeoutException If the thread cannot be stopped within the timeout
*/
@throws[TimeoutException]
protected def interruptAndAwaitExecutionThreadTermination(): Unit = {
val timeout = math.max(
sparkSession.sessionState.conf.getConf(SQLConf.STREAMING_STOP_TIMEOUT), 0)
queryExecutionThread.interrupt()
queryExecutionThread.join(timeout)
if (queryExecutionThread.isAlive) {
val stackTraceException = new SparkException("The stream thread was last executing:")
stackTraceException.setStackTrace(queryExecutionThread.getStackTrace)
val timeoutException = new TimeoutException(
s"Stream Execution thread for stream $prettyIdString failed to stop within $timeout " +
s"milliseconds (specified by ${SQLConf.STREAMING_STOP_TIMEOUT.key}). See the cause on " +
s"what was being executed in the streaming query thread.")
timeoutException.initCause(stackTraceException)
throw timeoutException
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
s"""|${Option(name).getOrElse("")}
|id = $id
|runId = $runId
|batch = $batchDescription""".stripMargin
}
protected def createStreamingWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): (StreamingWrite, Seq[CustomMetric]) = {
val info = LogicalWriteInfoImpl(
queryId = id.toString,
inputPlan.schema,
new CaseInsensitiveStringMap(options.asJava))
val writeBuilder = table.newWriteBuilder(info)
val write = outputMode match {
case Append =>
writeBuilder.build()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().build()
case Update =>
require(writeBuilder.isInstanceOf[SupportsStreamingUpdateAsAppend],
table.name + " does not support Update mode.")
writeBuilder.asInstanceOf[SupportsStreamingUpdateAsAppend].build()
}
(write.toStreaming, write.supportedCustomMetrics().toSeq)
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
@scala.annotation.tailrec
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryExecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| WeichenXu123/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 26,614 |
package s1.sjakki
import o1.grid._
import scala.collection.mutable.Buffer
/**
- eri sääntöjä ja twistejä jossain vaiheesa.
*/
class ChessBoard(boardWidth: Int = 8, boardLength: Int = 8) extends Grid[Square](boardWidth, boardLength) {
private var whites = Buffer[]()//pieces
private var blacks = Buffer[]()
for (x <- 0 until this.width; y <- 0 until this.height) { // Note to students: this is one way of looping through both x and y coordinates. You could achieve the same by using two separate for loops and nesting the "y loop" within the "x loop".
if (y == 1) {
this.update(new Coords(x,y), Pawn(black))
} else if (y == 6) {
this.update(new Coords(x,y), Pawn(white))
}
}
this.update(new Coords(0,0), Tower(white))
this.update(new Coords(7,7), Tower(white))
this.update(new Coords(7,0), Tower(white))
this.update(new Coords(0,7), Tower(white))
this.update(new Coords(1,0), Knight(white))
this.update(new Coords(6,0), Knight(white))
this.update(new Coords(1,7), Knight(white))
this.update(new Coords(6,7), Knight(white))
this.update(new Coords(2,0), Bishop(white))
this.update(new Coords(5,0), Bishop(white))
this.update(new Coords(2,7), Bishop(white))
this.update(new Coords(5,7), Bishop(white))
this.update(new Coords(3,0), Queen(white))
this.update(new Coords(4,0), King(white))
this.update(new Coords(3,7), Queen(black))
this.update(new Coords(4,7), King(black))
/** Causes the next robot to take a turn. The turn then immediately passes to the next robot.
* @see [[nextRobot]]
* @see [[RobotBody.takeTurn]] */
def advanceTurn() = {
robots(turn).takeTurn
if (turn == robots.length - 1) {
turn = 0
} else {
turn += 1
}
this.nextRobot
}
/** Causes all the robots in the world to take a turn, starting with the one whose turn it is next.
* (After this is done, the robot who originally was next up, will be that once again.)
* @see [[nextRobot]]
* @see [[advanceTurn]]
* @see [[RobotBody.takeTurn]] */
def advanceFullTurn() = {
var length = robots.length
while (length > 0) {
this.advanceTurn
length -= 1
}
}
}
| Trosterud/sjakki | src/s1/sjakki/sjakki.scala | Scala | mit | 2,207 |
/* vim: set ts=2 et sw=2 sts=2 fileencoding=utf-8: */
import scala.util.control.Exception._
import scala.util.{Try, Success, Failure}
import scala.collection.JavaConverters._
import scala.xml.Elem
import org.apache.poi.ss.usermodel._
import org.apache.poi.xssf.usermodel.XSSFWorkbook
import org.scalatra._
import java.io._
import java.nio.file._
import java.util.Properties
import javax.servlet.http.HttpServlet
import exceler.common._
import exceler.excel._
import excellib.ImplicitConversions._
import exceler.abc.{AbcTableQuery}
import exceler.xls.{XlsRect,XlsTable}
import CommonLib._
class ExcelerServlet extends ScalatraServlet {
lazy val excelerConfig = ExcelerConfig
lazy val exceler = new Exceler(excelerConfig.dir)
before() {
contentType = "text/xml"
}
get("/:book/:sheet/:table") {
val result = this.exceler.query(
params("book"),
params("sheet"),
params("table"),
params.getOrElse("row", ""),
params.getOrElse("column", ""),
params.getOrElse("block", "")
)
<html>
<body>
<p>Result: {result.fold("")(
_.map((x)=>x.xml.text).reduce((a,b)=>a + "," + b))}</p>
</body>
</html>
}
get("/") {
val bookList = this.exceler.getBookList
<books>
{ for (book <- bookList) yield <book name={book} /> }
</books>
}
}
case class ExcelerBook(val workbook:Workbook) {
}
class Exceler(excelDir:String) {
private val bookMap =
getListOfFiles(this.excelDir)
.filter(_.canRead)
.filter(_.getName.endsWith(".xlsx"))
.map((f:File)=>(f.getName, ExcelerBook(
WorkbookFactory.create(f, null ,true)))).toMap
def getBookList() = bookMap.keys.toList
def getBook(bookName:String) = bookMap.get(bookName).map(_.workbook)
def query(
filename:String,
sheetname:String,
tablename:String,
rowKeys:String,
colKeys:String,
blockKey:String)
(implicit conv:(XlsRect=>String)) = {
for {
book <- getBook(filename)
sheet <- book.getSheetOption(sheetname)
table <- XlsTable(sheet).get(tablename)
} yield AbcTableQuery[XlsRect](table).queryByString(rowKeys, colKeys, blockKey)
}
}
object Exceler {
def query(
filename:String,
sheetname:String,
tablename:String,
rowKeys:String,
colKeys:String,
blockKey:String)
(implicit conv:(XlsRect=>String)) = {
for {
book <- getBook(filename)
sheet <- book.getSheetOption(sheetname)
table <- XlsTable(sheet).get(tablename)
} yield AbcTableQuery[XlsRect](table).queryByString(rowKeys, colKeys, blockKey)
}
lazy private val bookMap =
getListOfFiles(ExcelerConfig.dir)
.filter(_.canRead)
.filter(_.getName.endsWith(".xlsx"))
.map((f:File)=>(f.getName, ExcelerBook(
WorkbookFactory.create(f, null ,true)))).toMap
def getBookList() = bookMap.keys.toList
def getBook(bookName:String) = bookMap.get(bookName).map(_.workbook)
}
| wak109/exceler | jvm/src/main/scala/ExcelerServlet.scala | Scala | mit | 2,965 |
package examples
import problemtype.ExactCover
object ExactCoverExample0 extends App {
val U: Array[Int] = Array(1, 2, 3, 4, 5, 6, 7)
val S: Map[String, Array[Int]] = Map[String, Array[Int]](
"A" -> Array(3, 5, 6),
"B" -> Array(1, 4, 7),
"C" -> Array(2, 3, 6),
"D" -> Array(1, 4),
"E" -> Array(2, 7),
"F" -> Array(4, 5, 7)
)
val exactCover: ExactCover[Int] = new ExactCover[Int](U, S)
exactCover.solveAndPrint()
}
| csurfer/scala-dlx | src/main/scala/examples/ExactCoverExample0.scala | Scala | mit | 451 |
package mesosphere.marathon
package core.appinfo.impl
import mesosphere.marathon.core.appinfo.{ AppInfo, EnrichedTask, TaskCounts, TaskStatsByVersion }
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.deployment.{ DeploymentPlan, DeploymentStepInfo }
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.core.health.{ Health, HealthCheckManager }
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.pod.PodDefinition
import mesosphere.marathon.core.readiness.ReadinessCheckResult
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.raml.{ PodInstanceState, PodInstanceStatus, PodState, PodStatus, Raml }
import mesosphere.marathon.state._
import mesosphere.marathon.storage.repository.TaskFailureRepository
import org.slf4j.LoggerFactory
import scala.async.Async.{ async, await }
import scala.collection.immutable.{ Map, Seq }
import scala.concurrent.Future
import scala.util.control.NonFatal
// TODO(jdef) pods rename this to something like ResourceInfoBaseData
class AppInfoBaseData(
clock: Clock,
instanceTracker: InstanceTracker,
healthCheckManager: HealthCheckManager,
deploymentService: DeploymentService,
taskFailureRepository: TaskFailureRepository,
groupManager: GroupManager) {
import AppInfoBaseData._
import mesosphere.marathon.core.async.ExecutionContexts.global
if (log.isDebugEnabled) log.debug(s"new AppInfoBaseData $this")
lazy val runningDeployments: Future[Seq[DeploymentStepInfo]] = deploymentService.listRunningDeployments()
lazy val readinessChecksByAppFuture: Future[Map[PathId, Seq[ReadinessCheckResult]]] = {
runningDeployments.map { infos =>
infos.foldLeft(Map.empty[PathId, Vector[ReadinessCheckResult]].withDefaultValue(Vector.empty)) { (result, info) =>
result ++ info.readinessChecksByApp.map {
case (appId, checkResults) => appId -> (result(appId) ++ checkResults)
}
}
}
}
lazy val runningDeploymentsByAppFuture: Future[Map[PathId, Seq[Identifiable]]] = {
log.debug("Retrieving running deployments")
val allRunningDeploymentsFuture: Future[Seq[DeploymentPlan]] = runningDeployments.map(_.map(_.plan))
allRunningDeploymentsFuture.map { allDeployments =>
val byApp = Map.empty[PathId, Vector[DeploymentPlan]].withDefaultValue(Vector.empty)
val deploymentsByAppId = allDeployments.foldLeft(byApp) { (result, deploymentPlan) =>
deploymentPlan.affectedRunSpecIds.foldLeft(result) { (result, appId) =>
val newEl = appId -> (result(appId) :+ deploymentPlan)
result + newEl
}
}
deploymentsByAppId
.map { case (id, deployments) => id -> deployments.map(deploymentPlan => Identifiable(deploymentPlan.id)) }
.withDefaultValue(Seq.empty)
}
}
lazy val instancesByRunSpecFuture: Future[InstanceTracker.InstancesBySpec] = {
log.debug("Retrieve tasks")
instanceTracker.instancesBySpec()
}
def appInfoFuture(app: AppDefinition, embed: Set[AppInfo.Embed]): Future[AppInfo] = {
val appData = new AppData(app)
embed.foldLeft(Future.successful(AppInfo(app))) { (infoFuture, embed) =>
infoFuture.flatMap { info =>
embed match {
case AppInfo.Embed.Counts =>
appData.taskCountsFuture.map(counts => info.copy(maybeCounts = Some(counts)))
case AppInfo.Embed.Readiness =>
readinessChecksByAppFuture.map(checks => info.copy(maybeReadinessCheckResults = Some(checks(app.id))))
case AppInfo.Embed.Deployments =>
runningDeploymentsByAppFuture.map(deployments => info.copy(maybeDeployments = Some(deployments(app.id))))
case AppInfo.Embed.LastTaskFailure =>
appData.maybeLastTaskFailureFuture.map { maybeLastTaskFailure =>
info.copy(maybeLastTaskFailure = maybeLastTaskFailure)
}
case AppInfo.Embed.Tasks =>
appData.enrichedTasksFuture.map(tasks => info.copy(maybeTasks = Some(tasks)))
case AppInfo.Embed.TaskStats =>
appData.taskStatsFuture.map(taskStats => info.copy(maybeTaskStats = Some(taskStats)))
}
}
}
}
/**
* Contains app-sepcific data that we need to retrieved.
*
* All data is lazy such that only data that is actually needed for the requested embedded information
* gets retrieved.
*/
private[this] class AppData(app: AppDefinition) {
lazy val now: Timestamp = clock.now()
lazy val instancesByIdFuture: Future[Map[Instance.Id, Instance]] = instancesByRunSpecFuture.map(_.specInstances(app.id)
.foldLeft(Map.newBuilder[Instance.Id, Instance]) { (result, instance) => result += instance.instanceId -> instance }
.result()
)
lazy val instancesFuture: Future[Seq[Instance]] = instancesByIdFuture.map(_.values.to[Seq])
lazy val healthByInstanceIdFuture: Future[Map[Instance.Id, Seq[Health]]] = {
log.debug(s"retrieving health counts for app [${app.id}]")
healthCheckManager.statuses(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving health counts for app [${app.id}]", e)
}
lazy val tasksForStats: Future[Seq[TaskForStatistics]] = {
for {
instances <- instancesFuture
healthCounts <- healthByInstanceIdFuture
} yield TaskForStatistics.forInstances(now, instances, healthCounts)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while calculating tasksForStats for app [${app.id}]", e)
}
lazy val taskCountsFuture: Future[TaskCounts] = {
log.debug(s"calculating task counts for app [${app.id}]")
for {
tasks <- tasksForStats
} yield TaskCounts(tasks)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while calculating task counts for app [${app.id}]", e)
}
lazy val taskStatsFuture: Future[TaskStatsByVersion] = {
log.debug(s"calculating task stats for app [${app.id}]")
for {
tasks <- tasksForStats
} yield TaskStatsByVersion(app.versionInfo, tasks)
}
lazy val enrichedTasksFuture: Future[Seq[EnrichedTask]] = {
log.debug(s"assembling rich tasks for app [${app.id}]")
def statusesToEnrichedTasks(instances: Seq[Instance], statuses: Map[Instance.Id, collection.Seq[Health]]): Seq[EnrichedTask] = {
instances.map { instance =>
EnrichedTask(app.id, instance.appTask, instance.agentInfo, statuses.getOrElse(instance.instanceId, Seq.empty[Health]).to[Seq])
}
}
for {
instances: Seq[Instance] <- instancesFuture
statuses <- healthByInstanceIdFuture
} yield statusesToEnrichedTasks(instances, statuses)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while assembling rich tasks for app [${app.id}]", e)
}
lazy val maybeLastTaskFailureFuture: Future[Option[TaskFailure]] = {
log.debug(s"retrieving last task failure for app [${app.id}]")
taskFailureRepository.get(app.id)
}.recover {
case NonFatal(e) => throw new RuntimeException(s"while retrieving last task failure for app [${app.id}]", e)
}
}
@SuppressWarnings(Array("all")) // async/await
def podStatus(podDef: PodDefinition): Future[PodStatus] =
async { // linter:ignore UnnecessaryElseBranch
val now = clock.now().toOffsetDateTime
val instances = await(instancesByRunSpecFuture).specInstances(podDef.id)
val specByVersion: Map[Timestamp, Option[PodDefinition]] = await(Future.sequence(
// TODO(jdef) if repositories ever support a bulk-load interface, use it here
instances.map(_.runSpecVersion).distinct.map { version =>
groupManager.podVersion(podDef.id, version.toOffsetDateTime).map(version -> _)
}
)).toMap
val instanceStatus = instances.flatMap { inst => podInstanceStatus(inst)(specByVersion.apply) }
val statusSince = if (instances.isEmpty) now else instanceStatus.map(_.statusSince).max
val state = await(podState(podDef.instances, instanceStatus, isPodTerminating(podDef.id)))
// TODO(jdef) pods need termination history
PodStatus(
id = podDef.id.toString,
spec = Raml.toRaml(podDef),
instances = instanceStatus,
status = state,
statusSince = statusSince,
lastUpdated = now,
lastChanged = statusSince
)
}
def podInstanceStatus(instance: Instance)(f: Timestamp => Option[PodDefinition]): Option[PodInstanceStatus] = {
val maybePodSpec: Option[PodDefinition] = f(instance.runSpecVersion)
if (maybePodSpec.isEmpty)
log.warn(s"failed to generate pod instance status for instance ${instance.instanceId}, " +
s"pod version ${instance.runSpecVersion} failed to load from persistent store")
maybePodSpec.map { pod => Raml.toRaml(pod -> instance) }
}
protected def isPodTerminating(id: PathId): Future[Boolean] =
runningDeployments.map { infos =>
infos.exists(_.plan.deletedPods.contains(id))
}
@SuppressWarnings(Array("all")) // async/await
protected def podState(
expectedInstanceCount: Integer,
instanceStatus: Seq[PodInstanceStatus],
isPodTerminating: Future[Boolean]): Future[PodState] =
async { // linter:ignore UnnecessaryElseBranch
val terminal = await(isPodTerminating)
val state = if (terminal) {
PodState.Terminal
} else if (instanceStatus.count(_.status == PodInstanceState.Stable) >= expectedInstanceCount) {
// TODO(jdef) add an "oversized" condition, or related message of num-current-instances > expected?
PodState.Stable
} else {
PodState.Degraded
}
state
}
}
object AppInfoBaseData {
private val log = LoggerFactory.getLogger(getClass)
}
| natemurthy/marathon | src/main/scala/mesosphere/marathon/core/appinfo/impl/AppInfoBaseData.scala | Scala | apache-2.0 | 9,855 |
/* Copyright 2009-2021 EPFL, Lausanne */
import stainless.lang._
import stainless.annotation._
object Extern1 {
@extern
def plop(a: BigInt): BigInt = {
require(a > 0)
a + scala.math.abs(-3)
} ensuring {
_ > 0
}
def test(b: BigInt): BigInt = {
plop(if (b <= 0) -b+1 else b)
} ensuring {
_ > 0
}
def test2 = test(42)
def test3 = test(-2)
}
| epfl-lara/stainless | sbt-plugin/src/sbt-test/sbt-plugin/simple/success/src/main/scala/Extern1.scala | Scala | apache-2.0 | 380 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.models.json.workflow
import scala.collection.immutable.ListMap
import org.mockito.Mockito._
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{FlatSpec, Matchers}
import spray.json._
import io.deepsense.deeplang.DOperation
import io.deepsense.deeplang.catalogs.doperations.{DOperationCategory, DOperationCategoryNode, DOperationDescriptor}
import io.deepsense.models.json.workflow.DOperationCategoryNodeJsonProtocol._
class DOperationCategoryNodeJsonProtocolSpec extends FlatSpec with Matchers with MockitoSugar {
"DOperationCategoryNode" should "be correctly serialized to json" in {
val childCategory =
new DOperationCategory(DOperationCategory.Id.randomId, "mock child name", None) {}
val childNode = DOperationCategoryNode(Some(childCategory))
val operationDescriptor = mock[DOperationDescriptor]
when(operationDescriptor.id) thenReturn DOperation.Id.randomId
when(operationDescriptor.name) thenReturn "mock operation descriptor name"
when(operationDescriptor.description) thenReturn "mock operator descriptor description"
val node = DOperationCategoryNode(
None,
successors = ListMap(childCategory -> childNode),
operations = List(operationDescriptor))
val expectedJson = JsObject(
"catalog" -> JsArray(
JsObject(
"id" -> JsString(childCategory.id.toString),
"name" -> JsString(childCategory.name),
"catalog" -> JsArray(),
"items" -> JsArray())
),
"items" -> JsArray(
JsObject(
"id" -> JsString(operationDescriptor.id.toString),
"name" -> JsString(operationDescriptor.name),
"description" -> JsString(operationDescriptor.description)
)
)
)
node.toJson shouldBe expectedJson
}
}
| deepsense-io/seahorse-workflow-executor | workflowjson/src/test/scala/io/deepsense/models/json/workflow/DOperationCategoryNodeJsonProtocolSpec.scala | Scala | apache-2.0 | 2,411 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.fastq
import com.fulcrumgenomics.testing.UnitSpec
import com.fulcrumgenomics.commons.CommonsDef.PathToFastq
object TrimFastqTest {
private def fq(name:String, r1: String, r2:String): (FastqRecord, FastqRecord) = {
(FastqRecord(name=name, bases=r1, quals=r1.replaceAll(".", "C"), comment=None, readNumber=None),
FastqRecord(name=name, bases=r2, quals=r2.replaceAll(".", "C"), comment=None, readNumber=None))
}
// A set of test fastq records
val FastqRecords : Seq[(FastqRecord, FastqRecord)] = Seq(
fq("10x10", "ACGTACGTGT", "ACTGATCGAT"),
fq("10x20", "ACGTACGTGT", "ACTGATCGATACTGATCGAT"),
fq("20x20", "ACGTACGTGTACGTACGTGT", "ACTGATCGATACTGATCGAT")
)
}
/**
* Runs TrimFastq to test that it works!
*/
class TrimFastqTest extends UnitSpec {
/** Writes the test records to a pair of files and returns them. */
def fqFiles: (PathToFastq, PathToFastq) = {
val (r1, r2) = (makeTempFile("r1.", ".fq"), makeTempFile("r2.", ".fq"))
val (w1, w2) = (FastqWriter(r1), FastqWriter(r2))
TrimFastqTest.FastqRecords.foreach(fqs => {
w1.write(fqs._1)
w2.write(fqs._2)
})
w1.close()
w2.close()
(r1, r2)
}
"TrimFastq" should "trim a single file and not discard any records" in {
val (r1, r2) = fqFiles
val out = makeTempFile("trimmed.", ".fq")
new TrimFastq(input=Seq(r1), output=Seq(out), length=15, exclude=false).execute()
val r1Map = FastqSource(out).map(r => r.name -> r).toMap
r1Map.size shouldBe 3
r1Map("10x10").length shouldBe 10
r1Map("10x20").length shouldBe 10
r1Map("20x20").length shouldBe 15
}
it should "trim a single file and discard 2 records" in {
val (r1, r2) = fqFiles
val out = makeTempFile("trimmed.", ".fq")
new TrimFastq(input=Seq(r1), output=Seq(out), length=15, exclude=true).execute()
val r1Map = FastqSource(out).map(r => r.name -> r).toMap
r1Map.size shouldBe 1
r1Map("20x20").length shouldBe 15
}
it should "trim a single file and discard 0 records because they are all long enough" in {
val (r1, r2) = fqFiles
val out = makeTempFile("trimmed.", ".fq")
new TrimFastq(input=Seq(r1), output=Seq(out), length=5, exclude=true).execute()
val r1Map = FastqSource(out).map(r => r.name -> r).toMap
r1Map.size shouldBe 3
r1Map("10x10").length shouldBe 5
r1Map("10x20").length shouldBe 5
r1Map("20x20").length shouldBe 5
}
it should "not trim or discard any reads" in {
val (r1, r2) = fqFiles
val (r1Out, r2Out) = (makeTempFile("r1out.", ".fq"), makeTempFile("r2out.", ".fq"))
new TrimFastq(input=Seq(r1, r2), output=Seq(r1Out, r2Out), length=25, exclude=false).execute()
val r1Map = FastqSource(r1Out).map(r => r.name -> r).toMap
val r2Map = FastqSource(r2Out).map(r => r.name -> r).toMap
r1Map.size shouldBe 3
r2Map.size shouldBe r1Map.size
r1Map("10x10").length shouldBe 10
r1Map("10x20").length shouldBe 10
r1Map("20x20").length shouldBe 20
r2Map("10x10").length shouldBe 10
r2Map("10x20").length shouldBe 20
r2Map("20x20").length shouldBe 20
}
it should "trim but not discard some reads" in {
val (r1, r2) = fqFiles
val (r1Out, r2Out) = (makeTempFile("r1out.", ".fq"), makeTempFile("r2out.", ".fq"))
new TrimFastq(input=Seq(r1, r2), output=Seq(r1Out, r2Out), length=15, exclude=false).execute()
val r1Map = FastqSource(r1Out).map(r => r.name -> r).toMap
val r2Map = FastqSource(r2Out).map(r => r.name -> r).toMap
r1Map.size shouldBe 3
r2Map.size shouldBe r1Map.size
r1Map("10x10").length shouldBe 10
r1Map("10x20").length shouldBe 10
r1Map("20x20").length shouldBe 15
r2Map("10x10").length shouldBe 10
r2Map("10x20").length shouldBe 15
r2Map("20x20").length shouldBe 15
}
it should "trim some reads and discard others by pair in" in {
val (r1, r2) = fqFiles
val (r1Out, r2Out) = (makeTempFile("r1out.", ".fq"), makeTempFile("r2out.", ".fq"))
new TrimFastq(input=Seq(r1, r2), output=Seq(r1Out, r2Out), length=15, exclude=true).execute()
val r1Map = FastqSource(r1Out).map(r => r.name -> r).toMap
val r2Map = FastqSource(r2Out).map(r => r.name -> r).toMap
r1Map.size shouldBe 1
r2Map.size shouldBe r1Map.size
r1Map("20x20").length shouldBe 15
r2Map("20x20").length shouldBe 15
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/fastq/TrimFastqTest.scala | Scala | mit | 5,507 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex
import java.io.{File, FileInputStream, InputStreamReader, Reader}
import java.net.URL
import java.util.concurrent.TimeUnit
import java.util.jar.JarFile
import java.util.{Timer, TimerTask}
import akka.actor._
import akka.pattern.ask
import akka.routing.FromConfig
import akka.util.Timeout
import com.typesafe.config._
import com.typesafe.scalalogging.LazyLogging
import org.squbs.lifecycle.ExtensionLifecycle
import org.squbs.unicomplex.ConfigUtil._
import org.squbs.unicomplex.UnicomplexBoot.CubeInit
import scala.annotation.tailrec
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success, Try}
object UnicomplexBoot extends LazyLogging {
final val extConfigDirKey = "squbs.external-config-dir"
final val extConfigNameKey = "squbs.external-config-files"
final val actorSystemNameKey = "squbs.actorsystem-name"
val startupTimeout: Timeout =
Try(System.getProperty("startup.timeout").toLong) map { millis =>
akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
} getOrElse (1 minute)
object StartupType extends Enumeration {
type StartupType = Value
val
// Identifies extensions
EXTENSIONS,
// Identifies actors as startup type
ACTORS,
// Identifies service as startup type
SERVICES = Value
}
case class CubeInit(info: Cube, components: Map[StartupType.Value, Seq[Config]])
val actorSystems = TrieMap.empty[String, ActorSystem]
def apply(addOnConfig: Config): UnicomplexBoot = {
val startTime = Timestamp(System.nanoTime, System.currentTimeMillis)
UnicomplexBoot(startTime, Option(addOnConfig), getFullConfig(Option(addOnConfig)))
}
def apply(actorSystemCreator : (String, Config) => ActorSystem): UnicomplexBoot = {
val startTime = Timestamp(System.nanoTime, System.currentTimeMillis)
UnicomplexBoot(startTime, None, getFullConfig(None), actorSystemCreator)
}
def getFullConfig(addOnConfig: Option[Config]): Config = {
val baseConfig = ConfigFactory.load()
// 1. See whether add-on config is there.
addOnConfig match {
case Some(config) =>
ConfigFactory.load(config withFallback baseConfig)
case None =>
// Sorry, the configDir is used to read the file. So it cannot be read from this config file.
val configDir = new File(baseConfig.getString(extConfigDirKey))
import collection.JavaConversions._
val configNames = baseConfig.getStringList(extConfigNameKey)
configNames.add("application")
val parseOptions = ConfigParseOptions.defaults().setAllowMissing(true)
val addConfigs = configNames map {
name => ConfigFactory.parseFileAnySyntax(new File(configDir, name), parseOptions)
}
if (addConfigs.isEmpty) baseConfig
else ConfigFactory.load((addConfigs :\\ baseConfig) (_ withFallback _))
}
}
private[unicomplex] def scan(jarNames: Seq[String])(boot: UnicomplexBoot): UnicomplexBoot = {
val configEntries = jarNames map readConfigs
val jarConfigs = jarNames zip configEntries collect { case (jar, Some(cfg)) => (jar, cfg) }
resolveCubes(jarConfigs, boot.copy(jarNames = jarNames))
}
private[unicomplex] def scanResources(resources: Seq[URL])(boot: UnicomplexBoot): UnicomplexBoot = {
val jarConfigs = resources map readConfigs collect { case Some(jarCfg) => jarCfg }
resolveCubes(jarConfigs, boot)
}
private[unicomplex] def scanResources(boot: UnicomplexBoot): UnicomplexBoot = {
val loader = getClass.getClassLoader
import scala.collection.JavaConversions._
val resources = Seq("conf", "json", "properties") flatMap { ext => loader.getResources(s"META-INF/squbs-meta.$ext") }
scanResources(resources)(boot)
}
private[this] def resolveCubes(jarConfigs: Seq[(String, Config)], boot: UnicomplexBoot) = {
val cubeList = resolveAliasConflicts(jarConfigs map { case (jar, config) => readCube(jar, config) } collect {
case Some(cube) => cube
})
// Read listener and alias information.
val (activeAliases, activeListeners, missingAliases) = findListeners(boot.config, cubeList)
missingAliases foreach { name => logger.warn(s"Requested listener $name not found!") }
boot.copy(cubes = cubeList, jarConfigs = jarConfigs, listeners = activeListeners, listenerAliases = activeAliases)
}
private[this] def readConfigs(jarName: String): Option[Config] = {
// Make it extra lazy, so that we do not create the next File if the previous one succeeds.
val configExtensions = Stream("conf", "json", "properties")
val jarFile = new File(jarName)
var fileName: String = "" // Contains the evaluated config file name, used for reporting errors.
var configReader: Option[Reader] = None
try {
configReader =
if (jarFile.isDirectory) {
def getConfFile(ext: String) = {
fileName = "META-INF/squbs-meta." + ext
val confFile = new File(jarFile, fileName)
if (confFile.isFile) Option(new InputStreamReader(new FileInputStream(confFile), "UTF-8"))
else None
}
(configExtensions map getConfFile find { _ != None }).flatten
} else if (jarFile.isFile) {
val jar = new JarFile(jarFile)
def getConfFile(ext: String) = {
fileName = "META-INF/squbs-meta." + ext
val confFile = jar.getEntry(fileName)
if (confFile != null && !confFile.isDirectory)
Option(new InputStreamReader(jar.getInputStream(confFile), "UTF-8"))
else None
}
(configExtensions map getConfFile find { _ != None }).flatten
} else None
configReader map ConfigFactory.parseReader
} catch {
case e: Exception =>
logger.info(s"${e.getClass.getName} reading configuration from $jarName : $fileName.\\n ${e.getMessage}")
None
} finally {
configReader match {
case Some(reader) => reader.close()
case None =>
}
}
}
private[this] def readConfigs(resource: URL): Option[(String, Config)] = {
// Taking the best guess at the jar name or classpath entry. Should work most of the time.
val jarName = resource.getProtocol match {
case "jar" =>
val jarURL = new URL(resource.getPath.split('!')(0))
jarURL.getProtocol match {
case "file" => jarURL.getPath
case _ => jarURL.toString
}
case "file" => // We assume the classpath entry ends before the last /META-INF/
val path = resource.getPath
val endIdx = path.lastIndexOf("/META-INF/")
if (endIdx > 0) path.substring(0, endIdx) else path
case _ =>
val path = resource.toString
val endIdx = path.lastIndexOf("/META-INF/")
if (endIdx > 0) path.substring(0, endIdx) else path
}
try {
val config = ConfigFactory.parseURL(resource)
Some((jarName, config))
} catch {
case e: Exception =>
logger.info(s"${e.getClass.getName} reading configuration from $jarName.\\n ${e.getMessage}")
None
}
}
private[this] def readCube(jarPath: String, config: Config): Option[CubeInit] = {
val cubeName =
try {
config.getString("cube-name")
} catch {
case e: ConfigException => return None
}
val cubeVersion =
try {
config.getString("cube-version")
} catch {
case e: ConfigException => return None
}
val cubeAlias = cubeName.substring(cubeName.lastIndexOf('.') + 1)
val c =
Seq(config.getOptionalConfigList("squbs-actors") map ((StartupType.ACTORS, _)),
config.getOptionalConfigList("squbs-services") map ((StartupType.SERVICES, _)),
config.getOptionalConfigList("squbs-extensions") map ((StartupType.EXTENSIONS, _)))
.collect { case Some((sType, configs)) => (sType, configs) } .toMap
Some(CubeInit(Cube(cubeAlias, cubeName, cubeVersion, jarPath), c))
}
// Resolve cube alias conflict by making it longer on demand.
@tailrec
private[unicomplex] def resolveAliasConflicts(cubeList: Seq[CubeInit]): Seq[CubeInit] = {
val aliasConflicts = cubeList map { cube =>
(cube.info.name, cube.info.fullName)
} groupBy (_._1) mapValues { seq =>
(seq map (_._2)).toSet
} filter { _._2.size > 1 }
if (aliasConflicts.isEmpty) cubeList
else {
var updated = false
val newAliases = (aliasConflicts map { case (alias, conflicts) =>
conflicts.toSeq map { symName =>
val idx = symName.lastIndexOf('.', symName.length - alias.length - 2)
if (idx > 0) {
updated = true
(symName, symName.substring(idx + 1))
}
else (symName, symName)
}
}).flatten.toSeq
if (updated) {
val updatedList = cubeList map { cube =>
newAliases find { case (symName, alias) => symName == cube.info.fullName } match {
case Some((symName, alias)) => cube.copy(info = cube.info.copy(name = alias))
case None => cube
}
}
resolveAliasConflicts(updatedList)
}
else sys.error("Duplicate cube names: " + (aliasConflicts flatMap (_._2) mkString ", "))
}
}
private [unicomplex] def startComponents(cube: CubeInit, aliases: Map[String, String])
(implicit actorSystem: ActorSystem) = {
import cube.components
import cube.info.{fullName, jarPath, name, version}
val cubeSupervisor = actorSystem.actorOf(Props[CubeSupervisor], name)
Unicomplex(actorSystem).uniActor ! CubeRegistration(cube.info, cubeSupervisor)
def startActor(actorConfig: Config): Option[(String, String, String, Class[_])] = {
val className = actorConfig getString "class-name"
val name = actorConfig getOptionalString "name" getOrElse (className substring (className.lastIndexOf('.') + 1))
val withRouter = actorConfig getOptionalBoolean "with-router" getOrElse false
val initRequired = actorConfig getOptionalBoolean "init-required" getOrElse false
try {
val clazz = Class.forName(className, true, getClass.getClassLoader)
clazz asSubclass classOf[Actor]
// Create and the props for this actor to be started, optionally enabling the router.
val props = if (withRouter) Props(clazz) withRouter FromConfig() else Props(clazz)
// Send the props to be started by the cube.
cubeSupervisor ! StartCubeActor(props, name,initRequired)
Some((fullName, name, version, clazz))
} catch {
case e: Exception =>
val t = getRootCause(e)
logger.warn(s"Can't load actor: $className.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
None
}
}
def startServiceRoute(clazz: Class[_], proxyName : Option[String], webContext: String, listeners: Seq[String]) = {
try {
val routeClass = clazz asSubclass classOf[RouteDefinition]
val props = Props(classOf[RouteActor], webContext, routeClass)
val className = clazz.getSimpleName
val actorName =
if (webContext.length > 0) s"${webContext.replace('/', '_')}-$className-route"
else s"root-$className-route"
cubeSupervisor ! StartCubeService(webContext, listeners, props, actorName, proxyName, initRequired = true)
Some((fullName, name, version, clazz))
} catch {
case e: ClassCastException => None
}
}
// This same creator class is available in Akka's Props.scala but it is inaccessible to us.
class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator: () => Actor) extends IndirectActorProducer {
override def actorClass = clz
override def produce() = creator()
}
def startServiceActor(clazz: Class[_], proxyName : Option[String], webContext: String, listeners: Seq[String],
initRequired: Boolean) = {
try {
val actorClass = clazz asSubclass classOf[Actor]
def actorCreator: Actor = WebContext.createWithContext[Actor](webContext) { actorClass.newInstance() }
val props = Props(classOf[TypedCreatorFunctionConsumer], clazz, actorCreator _)
val className = clazz.getSimpleName
val actorName =
if (webContext.length > 0) s"${webContext.replace('/', '_')}-$className-handler"
else s"root-$className-handler"
cubeSupervisor ! StartCubeService(webContext, listeners, props, actorName, proxyName, initRequired)
Some((fullName, name, version, actorClass))
} catch {
case e: ClassCastException => None
}
}
def getProxyName(serviceConfig: Config): Option[String] = {
Try {
serviceConfig.getString("proxy-name").trim
} match {
case Success(proxyName) => Some(proxyName)
case Failure(t) => None // not defined
}
}
def startService(serviceConfig: Config): Option[(String, String, String, Class[_])] =
try {
val className = serviceConfig.getString("class-name")
val clazz = Class.forName(className, true, getClass.getClassLoader)
val proxyName = getProxyName(serviceConfig)
val webContext = serviceConfig.getString("web-context")
val listeners = serviceConfig.getOptionalStringList("listeners").fold(Seq("default-listener"))({ list =>
val listenerMapping = list collect {
case entry if entry != "*" => (entry, aliases get entry)
}
listenerMapping foreach {
// Make sure we report any missing listeners
case (entry, None) =>
logger.warn(s"Listener $entry required by $fullName is not configured. Ignoring.")
case _ =>
}
if (list contains "*") aliases.values.toSeq.distinct
else listenerMapping collect { case (entry, Some(listener)) => listener }
})
val service = startServiceRoute(clazz, proxyName,webContext, listeners) orElse startServiceActor(
clazz, proxyName, webContext, listeners, serviceConfig getOptionalBoolean "init-required" getOrElse false)
if (service == None) throw new ClassCastException(s"Class $className is neither a RouteDefinition nor an Actor.")
service
} catch {
case e: Exception =>
val t = getRootCause(e)
logger.warn(s"Can't load service definition $serviceConfig.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
None
}
val actorConfigs = components.getOrElse(StartupType.ACTORS, Seq.empty)
val routeConfigs = components.getOrElse(StartupType.SERVICES, Seq.empty)
val actorInfo = actorConfigs map startActor
val routeInfo = routeConfigs map startService
cubeSupervisor ! Started // Tell the cube all actors to be started are started.
logger.info(s"Started cube $fullName $version")
(actorInfo ++ routeInfo) collect { case Some(component) => component }
}
def configuredListeners(config: Config): Map[String, Config] = {
import collection.JavaConversions._
val listeners = config.root.toSeq collect {
case (n, v: ConfigObject) if v.toConfig.getOptionalString("type") == Some("squbs.listener") => (n, v.toConfig)
}
// Check for duplicates
val listenerMap = mutable.Map.empty[String, Config]
listeners foreach { case (name, cfg) =>
listenerMap.get(name) match {
case Some(_) => logger.warn(s"Duplicate listener $name already declared. Ignoring.")
case None => listenerMap += name -> cfg
}
}
listenerMap.toMap
}
def findListenerAliases(listeners: Map[String, Config]): Map[String, String] = {
val aliases =
for ((name, config) <- listeners) yield {
val aliasNames = config.getOptionalStringList("aliases") getOrElse Seq.empty[String]
(name, name) +: (aliasNames map ((_, name)))
}
val aliasMap = mutable.Map.empty[String, String]
// Check for duplicate aliases
for {
group <- aliases
(alias, listener) <- group
} {
aliasMap.get(alias) match {
case Some(l) =>
logger.warn(s"Duplicate alias $alias for listener $listener already declared for listener $l. Ignoring.")
case None => aliasMap += alias -> listener
}
}
aliasMap.toMap
}
def findListeners(config: Config, cubes: Seq[CubeInit]) = {
val demandedListeners =
for {
routes <- cubes.map { _.components.get(StartupType.SERVICES) } .collect { case Some(routes) => routes } .flatten
routeListeners <- routes getOptionalStringList "listeners" getOrElse Seq("default-listener")
if routeListeners != "*" // Filter out wildcard listener bindings, not starting those.
} yield {
routeListeners
}
val listeners = configuredListeners(config)
val aliases = findListenerAliases(listeners)
val activeAliases = aliases filter { case (n, _) => demandedListeners contains n }
val missingAliases = demandedListeners filterNot { l => activeAliases exists { case (n, _) => n == l } }
val activeListenerNames = activeAliases.values
val activeListeners = listeners filter { case (n, c) => activeListenerNames exists (_ == n) }
(activeAliases, activeListeners, missingAliases)
}
def startServiceInfra(boot: UnicomplexBoot)(implicit actorSystem: ActorSystem) {
import actorSystem.dispatcher
val startTime = System.nanoTime
implicit val timeout = Timeout((boot.listeners.size * 5) seconds)
val ackFutures =
for ((listenerName, config) <- boot.listeners) yield {
Unicomplex(actorSystem).uniActor ? StartListener(listenerName, config)
}
// Block for the web service to be started.
Await.ready(Future.sequence(ackFutures), timeout.duration)
val elapsed = (System.nanoTime - startTime) / 1000000
logger.info(s"Web Service started in $elapsed milliseconds")
}
private[unicomplex] def getRootCause(e: Exception) = {
var t : Throwable = e
var cause = e.getCause
while (cause != null) {
t = cause
cause = t.getCause
}
t
}
}
case class UnicomplexBoot private[unicomplex] (startTime: Timestamp,
addOnConfig: Option[Config] = None,
config: Config,
actorSystemCreator: (String, Config) => ActorSystem =
{(name, config) => ActorSystem(name, config)},
cubes: Seq[CubeInit] = Seq.empty,
listeners: Map[String, Config] = Map.empty,
listenerAliases: Map[String, String] = Map.empty,
jarConfigs: Seq[(String, Config)] = Seq.empty,
jarNames: Seq[String] = Seq.empty,
actors: Seq[(String, String, String, Class[_])] = Seq.empty,
extensions: Seq[Extension] = Seq.empty,
started: Boolean = false,
stopJVM: Boolean = false) extends LazyLogging {
import UnicomplexBoot._
def actorSystemName = config.getString(actorSystemNameKey)
def actorSystem = UnicomplexBoot.actorSystems(actorSystemName)
def externalConfigDir = config.getString(extConfigDirKey)
def createUsing(actorSystemCreator: (String, Config) => ActorSystem) = copy(actorSystemCreator = actorSystemCreator)
def scanComponents(jarNames: Seq[String]): UnicomplexBoot = scan(jarNames)(this)
def scanResources(resources: Seq[URL]): UnicomplexBoot = UnicomplexBoot.scanResources(resources)(this)
def scanResources(): UnicomplexBoot = UnicomplexBoot.scanResources(this)
def initExtensions: UnicomplexBoot = {
val initSeq = cubes.flatMap { cube =>
cube.components.getOrElse(StartupType.EXTENSIONS, Seq.empty) map { config =>
val className = config getString "class-name"
val seqNo = config getOptionalInt "sequence" getOrElse Int.MaxValue
(seqNo, className, cube)
}
} .sortBy (_._1)
// load extensions
val extensions = initSeq map (loadExtension _).tupled
// preInit extensions
val preInitExtensions = extensions map extensionOp("preInit", _.preInit())
// Init extensions
val initExtensions = preInitExtensions map extensionOp("init", _.init())
copy(extensions = initExtensions)
}
def stopJVMOnExit: UnicomplexBoot = copy(stopJVM = true)
def start(): UnicomplexBoot = synchronized {
if (started) throw new IllegalStateException("Unicomplex already started!")
// Extensions may have changed the config. So we need to reload the config here.
val newConfig = UnicomplexBoot.getFullConfig(addOnConfig)
val newName = config.getString(UnicomplexBoot.actorSystemNameKey)
implicit val actorSystem = {
val system = actorSystemCreator(newName, newConfig)
system.registerExtension(Unicomplex)
Unicomplex(system).setScannedComponents(jarNames)
system
}
UnicomplexBoot.actorSystems += actorSystem.name -> actorSystem
actorSystem.registerOnTermination { UnicomplexBoot.actorSystems -= actorSystem.name }
registerExtensionShutdown(actorSystem)
val uniActor = Unicomplex(actorSystem).uniActor
// Send start time to Unicomplex
uniActor ! startTime
// Register extensions in Unicomplex actor
uniActor ! Extensions(extensions)
val startServices = listeners.nonEmpty && cubes.exists(_.components.contains(StartupType.SERVICES))
// Notify Unicomplex that services will be started.
if (startServices) uniActor ! PreStartWebService(listeners)
// Signal started to Unicomplex.
uniActor ! Started
// Start all actors
val actors = cubes.map(startComponents(_, listenerAliases)).flatten
// Start the service infrastructure if services are enabled and registered.
if (startServices) startServiceInfra(this)
val postInitExtensions = extensions map extensionOp("postInit", _.postInit())
// Update the extension errors in Unicomplex actor, in case there are errors.
uniActor ! Extensions(postInitExtensions)
{
// Tell Unicomplex we're done.
implicit val timeout = startupTimeout
val stateFuture = Unicomplex(actorSystem).uniActor ? Activate
Try(Await.result(stateFuture, timeout.duration)) match {
case Success(Active) => logger.info(s"[$actorSystemName] activated")
case Success(Failed) => logger.info(s"[$actorSystemName] initialization failed.")
case e => logger.warn(s"[$actorSystemName] awaiting confirmation, $e.")
}
}
val boot = copy(config = actorSystem.settings.config, actors = actors, extensions = postInitExtensions, started = true)
Unicomplex(actorSystem).boot send boot
boot
}
def registerExtensionShutdown(actorSystem: ActorSystem) {
if (extensions.nonEmpty) {
actorSystem.registerOnTermination {
// Run the shutdown in a different thread, not in the ActorSystem's onTermination thread.
import scala.concurrent.Future
// Kill the JVM if the shutdown takes longer than the timeout.
if (stopJVM) {
val shutdownTimer = new Timer(true)
shutdownTimer.schedule(new TimerTask {
def run() {
System.exit(0)
}
}, 5000)
}
// Then run the shutdown in the global execution context.
import scala.concurrent.ExecutionContext.Implicits.global
Future {
extensions.reverse foreach { e =>
import e.info._
e.extLifecycle foreach (_.shutdown())
logger.info(s"Shutting down extension ${e.extLifecycle.getClass.getName} in $fullName $version")
}
} onComplete {
case Success(result) =>
logger.info(s"ActorSystem ${actorSystem.name} shutdown complete")
if (stopJVM) System.exit(0)
case Failure(e) =>
logger.error(s"Error occurred during shutdown extensions: $e", e)
if (stopJVM) System.exit(-1)
}
}
}
}
def loadExtension(seqNo: Int, className: String, cube: CubeInit): Extension = {
try {
val clazz = Class.forName(className, true, getClass.getClassLoader)
val extLifecycle = ExtensionLifecycle(this) { clazz.asSubclass(classOf[ExtensionLifecycle]).newInstance }
Extension(cube.info, Some(extLifecycle), Seq.empty)
} catch {
case e: Exception =>
import cube.info._
val t = getRootCause(e)
logger.warn(s"Can't load extension $className.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
Extension(cube.info, None, Seq("load" -> t))
}
}
def extensionOp(opName: String, opFn: ExtensionLifecycle => Unit)
(extension: Extension): Extension = {
import extension.info._
extension.extLifecycle match {
case None => extension
case Some(l) =>
try {
opFn(l)
logger.info(s"Success $opName extension ${l.getClass.getName} in $fullName $version")
extension
} catch {
case e: Exception =>
val t = getRootCause(e)
logger.warn(s"Error on $opName extension ${l.getClass.getName}\\n" +
s"Cube: $fullName $version\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
extension.copy(exceptions = extension.exceptions :+ (opName -> t))
}
}
}
}
| keshin/squbs | squbs-unicomplex/src/main/scala/org/squbs/unicomplex/UnicomplexBoot.scala | Scala | apache-2.0 | 26,505 |
package org.jetbrains.sbt
import java.io.{BufferedInputStream, File, FileInputStream}
import java.net.URI
import java.util.Properties
import java.util.jar.JarFile
import com.intellij.openapi.externalSystem.model.{DataNode, ProjectKeys}
import com.intellij.openapi.externalSystem.model.project.ModuleData
import com.intellij.openapi.externalSystem.service.project.manage.ProjectDataManager
import com.intellij.openapi.externalSystem.util.ExternalSystemApiUtil
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.util.BooleanFunction
import org.jetbrains.plugins.scala.project.Version
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.data.SbtModuleData
/**
* Created by jast on 2017-02-20.
*/
object SbtUtil {
/** Directory for global sbt plugins given sbt version */
def globalPluginsDirectory(sbtVersion: Version): File =
getFileProperty(globalPluginsProperty).getOrElse {
val base = globalBase(sbtVersion.presentation)
new File(base, "plugins")
}
private val globalPluginsProperty = "sbt.global.plugins"
private val globalBaseProperty = "sbt.global.base"
/** Base directory for global sbt settings. */
def globalBase(version: String): File =
getFileProperty(globalBaseProperty).getOrElse(defaultVersionedGlobalBase(version))
private def getFileProperty(name: String): Option[File] = Option(System.getProperty(name)) flatMap { path =>
if (path.isEmpty) None else Some(new File(path))
}
private def fileProperty(name: String): File = new File(System.getProperty(name))
private def defaultGlobalBase = fileProperty("user.home") / ".sbt"
private def defaultVersionedGlobalBase(sbtVersion: String): File = defaultGlobalBase / sbtVersion
def majorVersion(sbtVersion: Version): Version = sbtVersion.major(2)
def detectSbtVersion(directory: File, sbtLauncher: => File): String =
sbtVersionIn(directory)
.orElse(sbtVersionInBootPropertiesOf(sbtLauncher))
.orElse(implementationVersionOf(sbtLauncher))
.getOrElse(Sbt.LatestVersion)
def numbersOf(version: String): Seq[String] = version.split("\\\\D").toSeq
private def implementationVersionOf(jar: File): Option[String] =
readManifestAttributeFrom(jar, "Implementation-Version")
private def readManifestAttributeFrom(file: File, name: String): Option[String] = {
val jar = new JarFile(file)
try {
Option(jar.getJarEntry("META-INF/MANIFEST.MF")).flatMap { entry =>
val input = new BufferedInputStream(jar.getInputStream(entry))
val manifest = new java.util.jar.Manifest(input)
val attributes = manifest.getMainAttributes
Option(attributes.getValue(name))
}
}
finally {
jar.close()
}
}
private def sbtVersionInBootPropertiesOf(jar: File): Option[String] = {
val appProperties = readSectionFromBootPropertiesOf(jar, sectionName = "app")
for {
name <- appProperties.get("name")
if name == "sbt"
versionStr <- appProperties.get("version")
version <- "\\\\d+(\\\\.\\\\d+)+".r.findFirstIn(versionStr)
} yield version
}
private def readSectionFromBootPropertiesOf(launcherFile: File, sectionName: String): Map[String, String] = {
val Property = "^\\\\s*(\\\\w+)\\\\s*:(.+)".r.unanchored
def findProperty(line: String): Option[(String, String)] = {
line match {
case Property(name, value) => Some((name, value.trim))
case _ => None
}
}
val jar = new JarFile(launcherFile)
try {
Option(jar.getEntry("sbt/sbt.boot.properties")).fold(Map.empty[String, String]) { entry =>
val lines = scala.io.Source.fromInputStream(jar.getInputStream(entry)).getLines()
val sectionLines = lines
.dropWhile(_.trim != s"[$sectionName]").drop(1)
.takeWhile(!_.trim.startsWith("["))
sectionLines.flatMap(findProperty).toMap
}
} finally {
jar.close()
}
}
private def sbtVersionIn(directory: File): Option[String] = {
val propertiesFile = directory / "project" / "build.properties"
if (propertiesFile.exists()) readPropertyFrom(propertiesFile, "sbt.version") else None
}
private def readPropertyFrom(file: File, name: String): Option[String] = {
using(new BufferedInputStream(new FileInputStream(file))) { input =>
val properties = new Properties()
properties.load(input)
Option(properties.getProperty(name))
}
}
def getSbtModuleData(module: Module): Option[SbtModuleData] = {
val project = module.getProject
val moduleId = ExternalSystemApiUtil.getExternalProjectId(module) // nullable, but that's okay for use in predicate
getSbtModuleData(project, moduleId)
}
def getSbtModuleData(project: Project, moduleId: String): Option[SbtModuleData] = {
// seems hacky. but it seems there isn't yet any better way to get the data for selected module?
val predicate = new BooleanFunction[DataNode[ModuleData]] {
override def fun(s: DataNode[ModuleData]): Boolean = s.getData.getId == moduleId
}
val emptyURI = new URI("")
val dataManager = ProjectDataManager.getInstance()
// TODO instead of silently not running a task, collect failures, report to user
for {
projectInfo <- Option(dataManager.getExternalProjectData(project, SbtProjectSystem.Id, project.getBasePath))
projectStructure <- Option(projectInfo.getExternalProjectStructure)
moduleDataNode <- Option(ExternalSystemApiUtil.find(projectStructure, ProjectKeys.MODULE, predicate))
moduleSbtDataNode <- Option(ExternalSystemApiUtil.find(moduleDataNode, SbtModuleData.Key))
data = {
dataManager.ensureTheDataIsReadyToUse(moduleSbtDataNode)
moduleSbtDataNode.getData
}
// buildURI should never be empty for true sbt projects, but filtering here handles synthetic projects
// created from AAR files. Should implement a more elegant solution for AARs.
if data.buildURI != emptyURI
} yield {
data
}
}
def getSbtProjectIdSeparated(module: Module): (Option[String], Option[String]) =
getSbtModuleData(module) match {
case Some(data) => (Some(data.buildURI.toString), Some(data.id))
case _ => (None, None)
}
def makeSbtProjectId(data: SbtModuleData): String = {
val uri = data.buildURI
val id = data.id
s"{$uri}$id"
}
}
| ilinum/intellij-scala | src/org/jetbrains/sbt/SbtUtil.scala | Scala | apache-2.0 | 6,409 |
/*
* Copyright (C) 2015 Language Technology Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package utils
import com.typesafe.scalalogging.slf4j.Logger
import org.slf4j.{Logger => Underlying}
import testFactories.FlatSpecWithCommonTraits
class BenchmarkTest extends FlatSpecWithCommonTraits {
final class BenchmarkableWithMockedLogger[B](mocked: Underlying)(op: => B) extends Benchmarkable(op) {
override lazy val logger = Logger(mocked)
}
val benchmarkFunctionMock = mockFunction[Int, Int]
it should "execute the given method once with correct parameter" in {
benchmarkFunctionMock.expects(2).onCall { arg: Int => arg + 1 }.once()
val uut = new Benchmarkable(benchmarkFunctionMock(2))
uut.withBenchmark("Message")
}
it should "return the result of computation" in {
benchmarkFunctionMock.expects(*).onCall { arg: Int => arg + 1 }.once()
val uut = new Benchmarkable(benchmarkFunctionMock(2))
val actual = uut.withBenchmark("Message")
assert(actual == 3)
}
it should "log the execution time in seconds" in {
benchmarkFunctionMock.expects(*).onCall { arg: Int => Thread.sleep(1000); arg }.once()
val loggerMock = mock[Underlying]
(loggerMock.isInfoEnabled: () => Boolean).expects().returning(true).twice()
(loggerMock.info(_: String)).expects("Message...").once()
(loggerMock.info(_: String)).expects("Message... Done. [1 seconds]").once()
val uut = new BenchmarkableWithMockedLogger(loggerMock)(benchmarkFunctionMock(2))
uut.withBenchmark("Message")
}
}
| tudarmstadt-lt/newsleak | common/src/test/scala/utils/BenchmarkTest.scala | Scala | agpl-3.0 | 2,177 |
///////////////////////////////////////////////////////////////////////////////
// TextGrounderInfo.scala
//
// Copyright (C) 2011-2014 Ben Wing, The University of Texas at Austin
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///////////////////////////////////////////////////////////////////////////////
package opennlp.textgrounder
package gridlocate
import util.print.errprint
/**
TextGrounder-specific information (e.g. env vars).
*/
object TextGrounderInfo {
var textgrounder_dir: String = null
def set_textgrounder_dir(dir: String) {
textgrounder_dir = dir
}
def get_textgrounder_dir = {
if (textgrounder_dir == null)
textgrounder_dir = System.getenv("TEXTGROUNDER_DIR")
if (textgrounder_dir == null) {
errprint("""TEXTGROUNDER_DIR must be set to the top-level directory where
Textgrounder is installed.""")
require(textgrounder_dir != null)
}
textgrounder_dir
}
}
| utcompling/textgrounder | src/main/scala/opennlp/textgrounder/gridlocate/TextGrounderInfo.scala | Scala | apache-2.0 | 1,451 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package toplevel
package synthetic
import _root_.javax.swing.Icon
import com.intellij.navigation.ItemPresentation
import com.intellij.openapi.editor.colors.TextAttributesKey
import com.intellij.openapi.progress.ProcessCanceledException
import com.intellij.openapi.startup.StartupManager
import com.intellij.psi._
import com.intellij.psi.impl.light.LightElement
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.util.IncorrectOperationException
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFun
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.Parameter
import org.jetbrains.plugins.scala.lang.psi.types.result.Success
import org.jetbrains.plugins.scala.lang.resolve.processor.{BaseProcessor, ImplicitProcessor, ResolveProcessor, ResolverEnv}
import org.jetbrains.plugins.scala.util.ScalaUtils
import scala.collection.mutable.ArrayBuffer
import scala.collection.{Seq, mutable}
abstract class SyntheticNamedElement(val manager: PsiManager, name: String)
extends LightElement(manager, ScalaFileType.SCALA_LANGUAGE) with PsiNameIdentifierOwner {
override def getName = name
override def getText = ""
def setName(newName: String) : PsiElement = throw new IncorrectOperationException("nonphysical element")
override def copy = throw new IncorrectOperationException("nonphysical element")
override def accept(v: PsiElementVisitor) {
throw new IncorrectOperationException("should not call")
}
override def getContainingFile = SyntheticClasses.get(manager.getProject).file
def nameId: PsiElement = null
override def getNameIdentifier: PsiIdentifier = null
}
class ScSyntheticTypeParameter(manager: PsiManager, override val name: String, val owner: ScFun)
extends SyntheticNamedElement(manager, name) with ScTypeParam with PsiClassFake {
def typeParameterText: String = name
override def getPresentation: ItemPresentation = super[ScTypeParam].getPresentation
def getOffsetInFile: Int = 0
def getContainingFileName: String = "NoFile"
override def toString = "Synthetic type parameter: " + name
def isCovariant = false
def isContravariant = false
def lowerBound = Success(Nothing, Some(this))
def upperBound = Success(Any, Some(this))
def getIndex = -1
def getOwner = null
protected def findChildrenByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): Array[T] =
findChildrenByClass[T](clazz)
protected def findChildByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): T = findChildByClass[T](clazz)
}
// we could try and implement all type system related stuff
// with class types, but it is simpler to indicate types corresponding to synthetic classes explicitly
class ScSyntheticClass(manager: PsiManager, val className: String, val t: StdType)
extends SyntheticNamedElement(manager, className) with PsiClass with PsiClassFake {
override def getPresentation: ItemPresentation = {
new ItemPresentation {
val This = ScSyntheticClass.this
def getLocationString: String = "(scala)"
def getTextAttributesKey: TextAttributesKey = null
def getPresentableText: String = This.className
def getIcon(open: Boolean): Icon = This.getIcon(0)
}
}
override def toString = "Synthetic class"
def syntheticMethods(scope: GlobalSearchScope) = methods.values.flatMap(s => s).toList ++
specialMethods.values.flatMap(s => s.map(_(scope))).toList
protected object methods extends mutable.HashMap[String, mutable.Set[ScSyntheticFunction]] with mutable.MultiMap[String, ScSyntheticFunction]
protected object specialMethods extends mutable.HashMap[String, mutable.Set[GlobalSearchScope => ScSyntheticFunction]] with
mutable.MultiMap[String, GlobalSearchScope => ScSyntheticFunction]
def addMethod(method: ScSyntheticFunction) = methods.addBinding(method.name, method)
def addMethod(method: GlobalSearchScope => ScSyntheticFunction, methodName: String) = specialMethods.addBinding(methodName, method)
import com.intellij.psi.scope.PsiScopeProcessor
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
processor match {
case p : ResolveProcessor =>
val nameSet = state.get(ResolverEnv.nameKey)
val name = if (nameSet == null) p.name else nameSet
methods.get(name) match {
case Some(ms) => for (method <- ms) {
if (!processor.execute(method, state)) return false
}
case None =>
}
case _: ImplicitProcessor => //do nothing, there is no implicit synthetic methods
case _: BaseProcessor =>
//method toString and hashCode exists in java.lang.Object
for (p <- methods; method <- p._2) {
if (!processor.execute(method, state)) return false
}
case _ => //do not execute synthetic methods to not Scala processors.
}
true
}
override def getSuperTypes: Array[PsiClassType] = {
val project = manager.getProject
t.tSuper match {
case None => PsiClassType.EMPTY_ARRAY
case Some(ts) => Array[PsiClassType] (JavaPsiFacade.getInstance(project).getElementFactory.
createType(ts.asClass(project).getOrElse(return PsiClassType.EMPTY_ARRAY), PsiSubstitutor.EMPTY))
}
}
}
class ScSyntheticFunction(manager: PsiManager, val name: String,
val retType: ScType, val paramClauses: Seq[Seq[Parameter]],
typeParameterNames : Seq[String])
extends SyntheticNamedElement(manager, name) with ScFun {
def isStringPlusMethod: Boolean = {
if (name != "+") return false
ScType.extractClass(retType, Some(manager.getProject)) match {
case Some(clazz) => clazz.qualifiedName == "java.lang.String"
case _ => false
}
}
def this(manager: PsiManager, name: String, retType: ScType, paramTypes: Seq[Seq[ScType]]) =
this(manager, name, retType, paramTypes.mapWithIndex {
case (p, index) => p.map(new Parameter("", None, _, false, false, false, index))
}, Seq.empty)
val typeParams: Seq[ScSyntheticTypeParameter] =
typeParameterNames.map {name => new ScSyntheticTypeParameter(manager, name, this)}
override def typeParameters = typeParams
override def getIcon(flags: Int) = icons.Icons.FUNCTION
override def toString = "Synthetic method"
protected def findChildrenByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): Array[T] = {
findChildrenByClass[T](clazz)
}
protected def findChildByClassScala[T >: Null <: ScalaPsiElement](clazz: Class[T]): T = {
var cur: PsiElement = getFirstChild
while (cur != null) {
if (clazz.isInstance(cur)) return cur.asInstanceOf[T]
cur = cur.getNextSibling
}
null
}
}
class ScSyntheticValue(manager: PsiManager, val name: String, val tp: ScType) extends SyntheticNamedElement(manager, name) {
override def getIcon(flags: Int): Icon = icons.Icons.VAL
override def toString = "Synthetic value"
}
import com.intellij.openapi.components.ProjectComponent
import com.intellij.openapi.project.Project
class SyntheticClasses(project: Project) extends PsiElementFinder with ProjectComponent {
def projectOpened() {}
def projectClosed() {}
def getComponentName = "SyntheticClasses"
def disposeComponent() {}
def initComponent() {
StartupManager.getInstance(project).registerPostStartupActivity(new Runnable {
def run() {
registerClasses()
}
})
}
private var classesInitialized: Boolean = false
def isClassesRegistered: Boolean = classesInitialized
def registerClasses() {
all = new mutable.HashMap[String, ScSyntheticClass]
file = PsiFileFactory.getInstance(project).createFileFromText(
"dummy." + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension, ScalaFileType.SCALA_FILE_TYPE, "")
val any = registerClass(Any, "Any")
val manager = any.manager
any.addMethod(new ScSyntheticFunction(manager, "==", Boolean, Seq(Seq(Any))))
any.addMethod(new ScSyntheticFunction(manager, "!=", Boolean, Seq(Seq(Any))))
any.addMethod(new ScSyntheticFunction(manager, "##", Int, Seq.empty))
any.addMethod(new ScSyntheticFunction(manager, "isInstanceOf", Boolean, Seq.empty, Seq(ScalaUtils.typeParameter)))
any.addMethod(new ScSyntheticFunction(manager, "asInstanceOf", Any, Seq.empty, Seq(ScalaUtils.typeParameter)) {
override val retType = ScalaPsiManager.typeVariable(typeParams(0))
})
val anyRef = registerClass(AnyRef, "AnyRef")
anyRef.addMethod(new ScSyntheticFunction(manager, "eq", Boolean, Seq(Seq(AnyRef))))
anyRef.addMethod(new ScSyntheticFunction(manager, "ne", Boolean, Seq(Seq(AnyRef))))
anyRef.addMethod(new ScSyntheticFunction(manager, "synchronized", Any, Seq.empty, Seq(ScalaUtils.typeParameter)) {
override val paramClauses: Seq[Seq[Parameter]] = Seq(Seq(new Parameter("", None,
ScalaPsiManager.typeVariable(typeParams(0)), false, false, false, 0)))
override val retType: ScType = ScalaPsiManager.typeVariable(typeParams(0))
})
registerClass(AnyVal, "AnyVal")
registerClass(Nothing, "Nothing")
registerClass(Null, "Null")
registerClass(Singleton, "Singleton")
registerClass(Unit, "Unit")
val boolc = registerClass(Boolean, "Boolean")
for (op <- bool_bin_ops)
boolc.addMethod(new ScSyntheticFunction(manager, op, Boolean, Seq(Seq(Boolean))))
boolc.addMethod(new ScSyntheticFunction(manager, "unary_!", Boolean, Seq.empty))
registerIntegerClass(registerNumericClass(registerClass(Char, "Char")))
registerIntegerClass(registerNumericClass(registerClass(Int, "Int")))
registerIntegerClass(registerNumericClass(registerClass(Long, "Long")))
registerIntegerClass(registerNumericClass(registerClass(Byte, "Byte")))
registerIntegerClass(registerNumericClass(registerClass(Short, "Short")))
registerNumericClass(registerClass(Float, "Float"))
registerNumericClass(registerClass(Double, "Double"))
for (nc <- numeric) {
for (nc1 <- numeric; op <- numeric_comp_ops)
nc.addMethod(new ScSyntheticFunction(manager, op, Boolean, Seq(Seq(nc1.t))))
for (nc1 <- numeric; op <- numeric_arith_ops)
nc.addMethod(new ScSyntheticFunction(manager, op, op_type(nc, nc1), Seq(Seq(nc1.t))))
for (nc1 <- numeric)
nc.addMethod(new ScSyntheticFunction(manager, "to" + nc1.className, nc1.t, Seq.empty))
for (un_op <- numeric_arith_unary_ops)
nc.addMethod(new ScSyntheticFunction(manager, "unary_" + un_op, nc.t match {
case Long | Double | Float => nc.t
case _ => Int
}, Seq.empty))
}
for (ic <- integer) {
for (ic1 <- integer; op <- bitwise_bin_ops)
ic.addMethod(new ScSyntheticFunction(manager, op, op_type(ic, ic1), Seq(Seq(ic1.t))))
ic.addMethod(new ScSyntheticFunction(manager, "unary_~", ic.t, Seq.empty))
val ret = ic.t match {
case Long => Long
case _ => Int
}
for (op <- bitwise_shift_ops) {
ic.addMethod(new ScSyntheticFunction(manager, op, ret, Seq(Seq(Int))))
ic.addMethod(new ScSyntheticFunction(manager, op, ret, Seq(Seq(Long))))
}
}
scriptSyntheticValues = new mutable.HashSet[ScSyntheticValue]
//todo: remove all scope => method value
//todo: handle process cancelled exception
try {
val stringClass = ScalaPsiManager.instance(project).getCachedClass(GlobalSearchScope.allScope(project), "java.lang.String")
stringClass.map { stringClass =>
scriptSyntheticValues += new ScSyntheticValue(manager, "args", JavaArrayType(ScDesignatorType(stringClass)))
}
}
catch {
case _: ProcessCanceledException =>
}
stringPlusMethod = new ScSyntheticFunction(manager, "+", _, Seq(Seq(Any)))
//register synthetic objects
syntheticObjects = new mutable.HashSet[ScObject]
def registerObject(fileText: String) {
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText("dummy." + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, fileText).asInstanceOf[ScalaFile]
val obj = dummyFile.typeDefinitions(0).asInstanceOf[ScObject]
syntheticObjects += obj
}
registerObject(
"""
package scala
object Boolean {
def box(x: Boolean): java.lang.Boolean = throw new Error()
def unbox(x: Object): Boolean = throw new Error()
}
"""
)
registerObject(
"""
package scala
object Byte {
def box(x: Byte): java.lang.Byte = throw new Error()
def unbox(x: Object): Byte = throw new Error()
def MinValue = java.lang.Byte.MIN_VALUE
def MaxValue = java.lang.Byte.MAX_VALUE
}
"""
)
registerObject(
"""
package scala
object Char {
def box(x: Char): java.lang.Character = throw new Error()
def unbox(x: Object): Char = throw new Error()
def MinValue = java.lang.Character.MIN_VALUE
def MaxValue = java.lang.Character.MAX_VALUE
}
"""
)
registerObject(
"""
package scala
object Double {
def box(x: Double): java.lang.Double = throw new Error()
def unbox(x: Object): Double = throw new Error()
@deprecated("use Double.MinNegativeValue instead")
def MinValue = -java.lang.Double.MAX_VALUE
def MinNegativeValue = -java.lang.Double.MAX_VALUE
def MaxValue = java.lang.Double.MAX_VALUE
@deprecated("use Double.MinPositiveValue instead")
def Epsilon = java.lang.Double.MIN_VALUE
def MinPositiveValue = java.lang.Double.MIN_VALUE
def NaN = java.lang.Double.NaN
def PositiveInfinity = java.lang.Double.POSITIVE_INFINITY
def NegativeInfinity = java.lang.Double.NEGATIVE_INFINITY
}
"""
)
registerObject(
"""
package scala
object Float {
def box(x: Float): java.lang.Float = throw new Error()
def unbox(x: Object): Float = throw new Error()
@deprecated("use Float.MinNegativeValue instead")
def MinValue = -java.lang.Float.MAX_VALUE
def MinNegativeValue = -java.lang.Float.MAX_VALUE
def MaxValue = java.lang.Float.MAX_VALUE
@deprecated("use Float.MinPositiveValue instead")
def Epsilon = java.lang.Float.MIN_VALUE
def MinPositiveValue = java.lang.Float.MIN_VALUE
def NaN = java.lang.Float.NaN
def PositiveInfinity = java.lang.Float.POSITIVE_INFINITY
def NegativeInfinity = java.lang.Float.NEGATIVE_INFINITY
}
"""
)
registerObject(
"""
package scala
object Int {
def box(x: Int): java.lang.Integer = throw new Error()
def unbox(x: Object): Int = throw new Error()
def MinValue = java.lang.Integer.MIN_VALUE
def MaxValue = java.lang.Integer.MAX_VALUE
}
"""
)
registerObject(
"""
package scala
object Long {
def box(x: Long): java.lang.Long = throw new Error()
def unbox(x: Object): Long = throw new Error()
def MinValue = java.lang.Long.MIN_VALUE
def MaxValue = java.lang.Long.MAX_VALUE
}
"""
)
registerObject(
"""
package scala
object Short {
def box(x: Short): java.lang.Short = throw new Error()
def unbox(x: Object): Short = throw new Error()
def MinValue = java.lang.Short.MIN_VALUE
def MaxValue = java.lang.Short.MAX_VALUE
}
"""
)
registerObject(
"""
package scala
object Unit
"""
)
classesInitialized = true
}
var stringPlusMethod: ScType => ScSyntheticFunction = null
var scriptSyntheticValues: mutable.Set[ScSyntheticValue] = new mutable.HashSet[ScSyntheticValue]
var all: mutable.Map[String, ScSyntheticClass] = new mutable.HashMap[String, ScSyntheticClass]
var numeric: mutable.Set[ScSyntheticClass] = new mutable.HashSet[ScSyntheticClass]
var integer : mutable.Set[ScSyntheticClass] = new mutable.HashSet[ScSyntheticClass]
var syntheticObjects: mutable.Set[ScObject] = new mutable.HashSet[ScObject]
def op_type (ic1 : ScSyntheticClass, ic2 : ScSyntheticClass) = (ic1.t, ic2.t) match {
case (_, Double) | (Double, _) => Double
case (Float, _) | (_, Float) => Float
case (_, Long) | (Long, _)=> Long
case _ => Int
}
var file : PsiFile = _
def registerClass(t: StdType, name: String) = {
val manager = PsiManager.getInstance(project)
val clazz = new ScSyntheticClass(manager, name, t) {
override def getQualifiedName = "scala." + name
}
all += ((name, clazz)); clazz
}
def registerIntegerClass(clazz : ScSyntheticClass) = {integer += clazz; clazz}
def registerNumericClass(clazz : ScSyntheticClass) = {numeric += clazz; clazz}
def getAll: Iterable[ScSyntheticClass] = all.values
def byName(name: String) = all.get(name)
val numeric_comp_ops = "==" :: "!=" :: "<" :: ">" :: "<=" :: ">=" :: Nil
val numeric_arith_ops = "+" :: "-" :: "*" :: "/" :: "%" :: Nil
val numeric_arith_unary_ops = "+" :: "-" :: Nil
val bool_bin_ops = "&&" :: "||" :: "&" :: "|" :: "==" :: "!=" :: "^" :: Nil
val bitwise_bin_ops = "&" :: "|" :: "^" :: Nil
val bitwise_shift_ops = "<<" :: ">>" :: ">>>" :: Nil
val prefix = "scala."
def findClass(qName: String, scope: GlobalSearchScope): PsiClass = {
if (qName.startsWith(prefix)) {
byName(qName.substring(prefix.length)) match {
case Some(c) => return c
case _ =>
}
}
for (obj <- syntheticObjects) {
if (obj.qualifiedName == qName) return obj
}
null
}
def findClasses(qName: String, scope: GlobalSearchScope): Array[PsiClass] = {
val res: ArrayBuffer[PsiClass] = new ArrayBuffer[PsiClass]
val c = findClass(qName, scope)
if (c != null) res += c
for (obj <- syntheticObjects) {
if (obj.qualifiedName == qName) res += obj
}
res.toArray
}
override def getClasses(p : PsiPackage, scope : GlobalSearchScope) = findClasses(p.getQualifiedName, scope)
def getScriptSyntheticValues: Seq[ScSyntheticValue] = scriptSyntheticValues.toSeq
}
object SyntheticClasses {
def get(project: Project): SyntheticClasses = project.getComponent(classOf[SyntheticClasses])
}
| advancedxy/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/toplevel/synthetic/ScSyntheticClass.scala | Scala | apache-2.0 | 18,342 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * [email protected]
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.power.tiles
import com.itszuvalex.femtocraft.FemtocraftGuiConstants
import com.itszuvalex.femtocraft.core.tiles.TileEntityBase
import com.itszuvalex.femtocraft.core.traits.tile.MultiBlockComponent
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.tileentity.TileEntity
class TileEntityFemtoCubeFrame extends TileEntityBase with MultiBlockComponent {
override def canUpdate = false
override def onSideActivate(par5EntityPlayer: EntityPlayer, side: Int): Boolean = {
if (isValidMultiBlock) {
val te: TileEntity = worldObj.getTileEntity(info.x, info.y, info.z)
if (te == null) {
return false
}
par5EntityPlayer.openGui(getMod, getGuiID, worldObj, info.x, info.y, info.z)
return true
}
false
}
override def getGuiID = FemtocraftGuiConstants.FemtoCubeGuiID
override def hasGUI = info.isValidMultiBlock
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/power/tiles/TileEntityFemtoCubeFrame.scala | Scala | gpl-2.0 | 1,937 |
package defw.http
import org.scalatest._
import java.net.{URI => JavaNetURI}
class URITest extends FlatSpec {
"URI.apply" should "create uri object from http://hc.apache.org/?a=b#c" in {
val uri = URI("http://hc.apache.org/?a=b#c")
assert(uri.isInstanceOf[JavaNetURI] === true)
}
}
| takahish0306/scala-defw | util/src/test/scala/defw/http/URITest.scala | Scala | apache-2.0 | 298 |
import org.junit.Assert._
import org.junit.Test
import org.junit.Ignore
import Chisel._
import chiselutils.xilinx._
class Fifo36E1TestSuite extends TestSuite {
@Test def fifo36E1() {
class UserTop extends Module {
val io = new QueueIO( UInt( width = 72 ), 500 )
val enqClk = Clock()
val deqClk = Clock()
val testFifo = Module( new Fifo36E1( 72, 100, 500, enqClk, deqClk ) )
testFifo.io.din := io.enq.bits( 71, 8 )
testFifo.io.dip := io.enq.bits( 7, 0 )
testFifo.io.wren := io.enq.valid
io.enq.ready := !testFifo.io.full
io.deq.valid := !testFifo.io.empty
testFifo.io.rden := io.deq.ready
io.deq.bits := testFifo.io.dout ## testFifo.io.dop
}
chiselMain( Array("--backend", "v", "--targetDir", dir.getPath.toString()), () => Module( new UserTop ) )
assertFile( "Fifo36E1TestSuite_UserTop_1.v" )
}
}
| da-steve101/chisel-utils | src/test/scala/Fifo36E1Suite.scala | Scala | lgpl-3.0 | 889 |
package BIDMach.models
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,HMat,GMat,GIMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.datasources._
import BIDMach.datasinks._
import BIDMach.updaters._
import BIDMach._
/**
* LDA model using online Variational Bayes (Hoffman, Blei and Bach, 2010)
*
* '''Parameters'''
- dim(256): Model dimension
- uiter(5): Number of iterations on one block of data
- alpha(0.001f): Dirichlet document-topic prior
- beta(0.0001f): Dirichlet word-topic prior
- exppsi(true): Apply exp(psi(X)) if true, otherwise just use X
- LDAeps(1e-9): A safety floor constant
*
* Other key parameters inherited from the learner, datasource and updater:
- blockSize: the number of samples processed in a block
- power(0.3f): the exponent of the moving average model' = a dmodel + (1-a)*model, a = 1/nblocks^power
- npasses(10): number of complete passes over the dataset
*
* '''Example:'''
*
* a is a sparse word x document matrix
* {{{
* val (nn, opts) = LDA.learner(a)
* opts.what // prints the available options
* opts.uiter=2 // customize options
* nn.train // train the model
* nn.modelmat // get the final model
* nn.datamat // get the other factor (requires opts.putBack=1)
*
* val (nn, opts) = LDA.learnPar(a) // Build a parallel learner
* opts.nthreads=2 // number of threads (defaults to number of GPUs)
* nn.train // train the model
* nn.modelmat // get the final model
* nn.datamat // get the other factor
* }}}
*/
class LDA(override val opts:LDA.Opts = new LDA.Options) extends FactorModel(opts) {
var mm:Mat = null
var traceMem = false
/** Sets up the modelmats and updatemats arrays and initializes modelmats(0) randomly unless stated otherwise. */
override def init() = {
super.init();
mm = modelmats(0);
if (refresh) {
setmodelmats(Array(mm, mm.ones(mm.nrows, 1)));
}
updatemats = new Array[Mat](2);
updatemats(0) = mm.zeros(mm.nrows, mm.ncols);
updatemats(1) = mm.zeros(mm.nrows, 1);
}
/**
* Updates '''user''' according to the variational EM update process in the original (2003) LDA Paper.
*
* This can be a bit tricky to understand. See Equation 2.2 in Huasha Zhao's PhD from UC Berkeley
* for details on the math and cross-reference it with the 2003 LDA journal paper.
*
* @param sdata The word x document input data. Has dimension (# words x opts.batchSize), where batchSize is
* typically much smaller than the total number of documents, so sdata is usually a portion of the full input.
* @param user An (opts.dim x opts.batchSize) matrix that stores some intermediate/temporary data and gets left-
* multiplied by modelmats(0) to form sdata.
* @param ipass Index of the pass over the data (0 = first pass, 1 = second pass, etc.).
*/
def uupdate(sdata:Mat, user:Mat, ipass:Int, pos:Long):Unit = {
if (putBack < 0 || ipass == 0) user.set(1f)
for (i <- 0 until opts.uiter) {
val preds = DDS(mm, user, sdata)
val dc = sdata.contents
val pc = preds.contents
max(opts.weps, pc, pc)
pc ~ dc / pc
val unew = user ∘ (mm * preds) + opts.alpha
if (opts.exppsi) exppsi(unew, unew)
user <-- unew
}
}
/**
* Updates '''modelmats(0)''', the topic x word matrix that is ultimately returned as output for the model.
*
* @param sdata The word x document input data. Has dimension (# words x opts.batchSize), where batchSize is
* typically much smaller than the total number of documents, so sdata is usually a portion of the full input.
* @param user An (opts.dim x opts.batchSize) matrix that stores some intermediate/temporary data and gets left-
* multiplied by modelmats(0) to form sdata.
* @param ipass Index of the pass over the data (0 = first pass, 1 = second pass, etc.).
*/
def mupdate(sdata:Mat, user:Mat, ipass:Int, pos:Long):Unit = {
val preds = DDS(mm, user, sdata)
val dc = sdata.contents
val pc = preds.contents
max(opts.weps, pc, pc)
pc ~ dc / pc
val ud = user *^ preds
ud ~ ud ∘ mm
ud ~ ud + opts.beta
updatemats(0) <-- ud
sum(ud, 2, updatemats(1))
}
/**
* Evaluates model log-likelihood on a held-out batch of the input data.
*
* @param sdata The word x document input data. Has dimension (# words x opts.batchSize), where batchSize is
* typically much smaller than the total number of documents, so sdata is usually a portion of the full input.
* @param user An (opts.dim x opts.batchSize) matrix that stores some intermediate/temporary data and gets left-
* multiplied by modelmats(0) to form sdata.
* @param ipass Index of the pass over the data (0 = first pass, 1 = second pass, etc.).
*/
def evalfun(sdata:Mat, user:Mat, ipass:Int, pos:Long):FMat = {
if (ogmats != null) ogmats(0) = user;
val preds = DDS(mm, user, sdata);
val dc = sdata.contents;
val pc = preds.contents;
max(opts.weps, pc, pc);
ln(pc, pc);
val sdat = sum(sdata,1);
val mms = sum(mm,2);
val suu = ln(mms ^* user);
val vv = ((pc ddot dc) - (sdat ddot suu))/sum(sdat,2).dv;
row(vv, math.exp(-vv))
}
}
object LDA {
trait Opts extends FactorModel.Opts {
var LDAeps = 1e-9
var exppsi = true
var alpha = 0.001f
var beta = 0.0001f
}
class Options extends Opts {}
/** Creates a new LDA model. */
def mkLDAmodel(fopts:Model.Opts) = {
new LDA(fopts.asInstanceOf[LDA.Opts])
}
/** Creates a new IncNorm updater. */
def mkUpdater(nopts:Updater.Opts) = {
new IncNorm(nopts.asInstanceOf[IncNorm.Opts])
}
class MatOpts extends Learner.Options with LDA.Opts with MatSource.Opts with IncNorm.Opts
/** Online Variational Bayes LDA algorithm with a matrix datasource. */
def learner(mat0:Mat):(Learner, MatOpts) = learner(mat0, 256);
def learner(mat0:Mat, d:Int):(Learner, MatOpts) = {
val opts = new MatOpts
opts.dim = d
opts.batchSize = math.min(100000, mat0.ncols/30 + 1)
val nn = new Learner(
new MatSource(Array(mat0:Mat), opts),
new LDA(opts),
null,
new IncNorm(opts),
null,
opts)
(nn, opts)
}
class FileOpts extends Learner.Options with LDA.Opts with SFileSource.Opts with IncNorm.Opts
def learner(fpattern:String):(Learner, FileOpts) = learner(fpattern, 256)
def learner(fpattern:String, d:Int):(Learner, FileOpts) = learner(List(FileSource.simpleEnum(fpattern, 1, 0)), d)
/** Online Variational Bayes LDA algorithm with a files dataSource. */
def learner(fnames:List[(Int)=>String], d:Int):(Learner, FileOpts) = {
val opts = new FileOpts
opts.dim = d
opts.fnames = fnames
opts.batchSize = 100000;
opts.eltsPerSample = 500;
implicit val threads = threadPool(4)
val nn = new Learner(
new SFileSource(opts),
new LDA(opts),
null,
new IncNorm(opts),
null,
opts)
(nn, opts)
}
class PredOptions extends Learner.Options with LDA.Opts with MatSource.Opts with MatSink.Opts;
// This function constructs a predictor from an existing model
def predictor(model:Model, mat1:Mat):(Learner, PredOptions) = {
val nopts = new PredOptions;
nopts.batchSize = math.min(10000, mat1.ncols/30 + 1)
nopts.dim = model.opts.dim;
val newmod = new LDA(nopts);
newmod.refresh = false
model.copyTo(newmod)
val nn = new Learner(
new MatSource(Array(mat1), nopts),
newmod,
null,
null,
new MatSink(nopts),
nopts)
(nn, nopts)
}
class MatBatchOpts extends Learner.Options with LDA.Opts with MatSource.Opts with BatchNorm.Opts;
/** Batch Variational Bayes LDA algorithm with a matrix datasource. */
def learnBatch(mat0:Mat):(Learner, MatBatchOpts) = learnBatch(mat0, 256);
def learnBatch(mat0:Mat, d:Int):(Learner, MatBatchOpts) = {
val opts = new MatBatchOpts;
opts.dim = d
opts.batchSize = math.min(100000, mat0.ncols/30 + 1)
val nn = new Learner(
new MatSource(Array(mat0:Mat), opts),
new LDA(opts),
null,
new BatchNorm(opts),
null,
opts)
(nn, opts)
}
class MatParOpts extends ParLearner.Options with LDA.Opts with MatSource.Opts with IncNorm.Opts;
/** Parallel online LDA algorithm with a matrix datasource. */
def learnPar(mat0:Mat):(ParLearnerF, MatParOpts) = learnPar(mat0, 256);
def learnPar(mat0:Mat, d:Int):(ParLearnerF, MatParOpts) = {
val opts = new MatParOpts;
opts.dim = d
opts.batchSize = math.min(100000, mat0.ncols/30/opts.nthreads + 1)
opts.coolit = 0 // Assume we dont need cooling on a matrix input
val nn = new ParLearnerF(
new MatSource(Array(mat0:Mat), opts),
opts, mkLDAmodel _,
null, null,
opts, mkUpdater _,
null, null,
opts)
(nn, opts)
}
class SFDSopts extends ParLearner.Options with LDA.Opts with SFileSource.Opts with IncNorm.Opts
def learnPar(fnames:String, d:Int):(ParLearnerF, SFDSopts) = learnPar(List(FileSource.simpleEnum(fnames, 1, 0)), d);
/** Parallel online LDA algorithm with one file datasource. */
def learnPar(fnames:List[(Int) => String], d:Int):(ParLearnerF, SFDSopts) = {
val opts = new SFDSopts;
opts.dim = d;
opts.npasses = 4;
opts.fnames = fnames;
opts.batchSize = 100000;
opts.eltsPerSample = 500;
opts.resFile = "../results.mat"
implicit val threads = threadPool(12)
val nn = new ParLearnerF(
new SFileSource(opts),
opts, mkLDAmodel _,
null, null,
opts, mkUpdater _,
null, null,
opts
)
(nn, opts)
}
}
| jamesjia94/BIDMach | src/main/scala/BIDMach/models/LDA.scala | Scala | bsd-3-clause | 10,119 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog.common
package jobs
import blueeyes.core.http.{ MimeType, MimeTypes }
import blueeyes.json._
import blueeyes.json.serialization.{ Decomposer, Extractor }
import blueeyes.json.serialization.DefaultSerialization._
import org.apache.commons.codec.binary.Base64
import scalaz._
case class JobResult(mimeTypes: List[MimeType], content: Array[Byte]) {
override def hashCode: Int = mimeTypes.## * 23 + content.toList.##
override def equals(that: Any): Boolean = that match {
case JobResult(thoseMimeTypes, thatContent) =>
val len = content.length
(mimeTypes.toSet == thoseMimeTypes.toSet) && (len == thatContent.length) && {
var i = 0
var result = true
while (result && i < len) {
result = content(i) == thatContent(i)
i += 1
}
result
}
case _ =>
false
}
}
object JobResult extends JobResultSerialization
trait JobResultSerialization {
import scalaz.syntax.apply._
import scalaz.syntax.monad._
import Validation._
implicit object JobResultDecomposer extends Decomposer[JobResult] {
override def decompose(result: JobResult): JValue = JObject(List(
JField("content", JString(Base64.encodeBase64String(result.content))),
JField("mimeTypes", JArray(result.mimeTypes map { mimeType =>
JString(mimeType.value)
}))
))
}
implicit object JobResultExtractor extends Extractor[JobResult] {
import Extractor._
override def validated(obj: JValue): Validation[Error, JobResult] = {
val mimeTypes = (obj \\ "mimeTypes").validated[List[String]] flatMap { rawTypes =>
success[Error, List[MimeType]]((rawTypes flatMap (MimeTypes.parseMimeTypes(_))).toList)
}
(mimeTypes |@| (obj \\ "content").validated[String]) { (mimeTypes, content) =>
JobResult(mimeTypes, Base64.decodeBase64(content))
}
}
}
}
| precog/platform | common/src/main/scala/com/precog/common/jobs/JobResult.scala | Scala | agpl-3.0 | 2,975 |
package com.datastax.spark.connector.rdd.partitioner.dht
import java.net.InetAddress
case class TokenRange[V, T <: Token[V]] (
start: T, end: T, replicas: Set[InetAddress], dataSize: Long) {
def isWrappedAround(implicit tf: TokenFactory[V, T]): Boolean =
start >= end && end != tf.minToken
def isFull(implicit tf: TokenFactory[V, T]): Boolean =
start == end && end == tf.minToken
def isEmpty(implicit tf: TokenFactory[V, T]): Boolean =
start == end && end != tf.minToken
def unwrap(implicit tf: TokenFactory[V, T]): Seq[TokenRange[V, T]] = {
val minToken = tf.minToken
if (isWrappedAround)
Seq(
TokenRange(start, minToken, replicas, dataSize / 2),
TokenRange(minToken, end, replicas, dataSize / 2))
else
Seq(this)
}
def contains(token: T)(implicit tf: TokenFactory[V, T]): Boolean = {
(end == tf.minToken && token > start
|| start == tf.minToken && token <= end
|| !isWrappedAround && token > start && token <= end
|| isWrappedAround && (token > start || token <= end))
}
}
| maasg/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/rdd/partitioner/dht/TokenRange.scala | Scala | apache-2.0 | 1,074 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.analysis
import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types.IntegerType
class FilterPushdownSuite extends PlanTest {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches =
Batch("Subqueries", Once,
EliminateSubqueryAliases) ::
Batch("Filter Pushdown", FixedPoint(10),
CombineFilters,
PushDownPredicate,
BooleanSimplification,
PushPredicateThroughJoin,
CollapseProject) :: Nil
}
val testRelation = LocalRelation('a.int, 'b.int, 'c.int)
val testRelation1 = LocalRelation('d.int)
// This test already passes.
test("eliminate subqueries") {
val originalQuery =
testRelation
.subquery('y)
.select('a)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.select('a.attr)
.analyze
comparePlans(optimized, correctAnswer)
}
// After this line is unimplemented.
test("simple push down") {
val originalQuery =
testRelation
.select('a)
.where('a === 1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1)
.select('a)
.analyze
comparePlans(optimized, correctAnswer)
}
test("combine redundant filters") {
val originalQuery =
testRelation
.where('a === 1 && 'b === 1)
.where('a === 1 && 'c === 1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1 && 'b === 1 && 'c === 1)
.analyze
comparePlans(optimized, correctAnswer)
}
test("SPARK-16164: Filter pushdown should keep the ordering in the logical plan") {
val originalQuery =
testRelation
.where('a === 1)
.select('a, 'b)
.where('b === 1)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1 && 'b === 1)
.select('a, 'b)
.analyze
// We can not use comparePlans here because it normalized the plan.
assert(optimized == correctAnswer)
}
test("SPARK-16994: filter should not be pushed through limit") {
val originalQuery = testRelation.limit(10).where('a === 1).analyze
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("can't push without rewrite") {
val originalQuery =
testRelation
.select('a + 'b as 'e)
.where('e === 1)
.analyze
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a + 'b === 1)
.select('a + 'b as 'e)
.analyze
comparePlans(optimized, correctAnswer)
}
test("nondeterministic: can always push down filter through project with deterministic field") {
val originalQuery = testRelation
.select('a)
.where(Rand(10) > 5 || 'a > 5)
.analyze
val optimized = Optimize.execute(originalQuery)
val correctAnswer = testRelation
.where(Rand(10) > 5 || 'a > 5)
.select('a)
.analyze
comparePlans(optimized, correctAnswer)
}
test("nondeterministic: can't push down filter through project with nondeterministic field") {
val originalQuery = testRelation
.select(Rand(10).as('rand), 'a)
.where('a > 5)
.analyze
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("nondeterministic: can't push down filter through aggregate with nondeterministic field") {
val originalQuery = testRelation
.groupBy('a)('a, Rand(10).as('rand))
.where('a > 5)
.analyze
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("nondeterministic: push down part of filter through aggregate with deterministic field") {
val originalQuery = testRelation
.groupBy('a)('a)
.where('a > 5 && Rand(10) > 5)
.analyze
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where('a > 5)
.groupBy('a)('a)
.where(Rand(10) > 5)
.analyze
comparePlans(optimized, correctAnswer)
}
test("filters: combines filters") {
val originalQuery = testRelation
.select('a)
.where('a === 1)
.where('a === 2)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer =
testRelation
.where('a === 1 && 'a === 2)
.select('a).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to either side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1)
.where("y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation.where('b === 2)
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to one side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push to one side after transformCondition") {
val x = testRelation.subquery('x)
val y = testRelation1.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.a".attr === 1 && "y.d".attr === "x.b".attr) ||
("x.a".attr === 1 && "y.d".attr === "x.c".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1)
val right = testRelation1
val correctAnswer =
left.join(right, condition = Some("d".attr === "b".attr || "d".attr === "c".attr)).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: rewrite filter to push to either side") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val right = testRelation.where('b === 2)
val correctAnswer =
left.join(right).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left semi join") {
val x = testRelation.subquery('x)
val y = testRelation1.subquery('y)
val originalQuery = {
x.join(y, LeftSemi, Option("x.a".attr === "y.d".attr && "x.b".attr >= 1 && "y.d".attr >= 2))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b >= 1)
val right = testRelation1.where('d >= 2)
val correctAnswer =
left.join(right, LeftSemi, Option("a".attr === "d".attr)).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #1") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 1)
val correctAnswer =
left.join(y, LeftOuter).where("y.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #1") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter)
.where("x.b".attr === 1 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('d)
val correctAnswer =
x.join(right, RightOuter).where("x.b".attr === 1).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("x.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('d)
val correctAnswer =
left.join(y, LeftOuter, Some("d.b".attr === 1)).where("y.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('d)
val correctAnswer =
x.join(right, RightOuter, Some("d.b".attr === 1)).where("x.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter).where("r.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2)
}
val optimized = Optimize.execute(originalQuery.analyze)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
x.join(right, RightOuter, Some("r.b".attr === 1)).where("x.b".attr === 2).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #4") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter).where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #4") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.subquery('l)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
left.join(right, RightOuter, Some("r.b".attr === 1)).
where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down left outer join #5") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, LeftOuter, Some("y.b".attr === 1 && "x.a".attr === 3))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('b === 2).subquery('l)
val right = testRelation.where('b === 1).subquery('r)
val correctAnswer =
left.join(right, LeftOuter, Some("l.a".attr===3)).
where("r.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: push down right outer join #5") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, RightOuter, Some("y.b".attr === 1 && "x.a".attr === 3))
.where("x.b".attr === 2 && "y.b".attr === 2 && "x.c".attr === "y.c".attr)
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 3).subquery('l)
val right = testRelation.where('b === 2).subquery('r)
val correctAnswer =
left.join(right, RightOuter, Some("r.b".attr === 1)).
where("l.b".attr === 2 && "l.c".attr === "r.c".attr).analyze
comparePlans(optimized, correctAnswer)
}
test("joins: can't push down") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y, condition = Some("x.b".attr === "y.b".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
comparePlans(analysis.EliminateSubqueryAliases(originalQuery.analyze), optimized)
}
test("joins: conjunctive predicates") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) && ("y.a".attr === 1))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.where('a === 1).subquery('y)
val correctAnswer =
left.join(right, condition = Some("x.b".attr === "y.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
test("joins: conjunctive predicates #2") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery = {
x.join(y)
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1))
}
val optimized = Optimize.execute(originalQuery.analyze)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.subquery('y)
val correctAnswer =
left.join(right, condition = Some("x.b".attr === "y.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
test("joins: conjunctive predicates #3") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val z = testRelation.subquery('z)
val originalQuery = {
z.join(x.join(y))
.where(("x.b".attr === "y.b".attr) && ("x.a".attr === 1) &&
("z.a".attr >= 3) && ("z.a".attr === "x.b".attr))
}
val optimized = Optimize.execute(originalQuery.analyze)
val lleft = testRelation.where('a >= 3).subquery('z)
val left = testRelation.where('a === 1).subquery('x)
val right = testRelation.subquery('y)
val correctAnswer =
lleft.join(
left.join(right, condition = Some("x.b".attr === "y.b".attr)),
condition = Some("z.a".attr === "x.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
test("joins: push down where clause into left anti join") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery =
x.join(y, LeftAnti, Some("x.b".attr === "y.b".attr))
.where("x.a".attr > 10)
.analyze
val optimized = Optimize.execute(originalQuery)
val correctAnswer =
x.where("x.a".attr > 10)
.join(y, LeftAnti, Some("x.b".attr === "y.b".attr))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
test("joins: only push down join conditions to the right of a left anti join") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val originalQuery =
x.join(y,
LeftAnti,
Some("x.b".attr === "y.b".attr && "y.a".attr > 10 && "x.a".attr > 10)).analyze
val optimized = Optimize.execute(originalQuery)
val correctAnswer =
x.join(
y.where("y.a".attr > 10),
LeftAnti,
Some("x.b".attr === "y.b".attr && "x.a".attr > 10))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
test("joins: only push down join conditions to the right of an existence join") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val fillerVal = 'val.boolean
val originalQuery =
x.join(y,
ExistenceJoin(fillerVal),
Some("x.a".attr > 1 && "y.b".attr > 2)).analyze
val optimized = Optimize.execute(originalQuery)
val correctAnswer =
x.join(
y.where("y.b".attr > 2),
ExistenceJoin(fillerVal),
Some("x.a".attr > 1))
.analyze
comparePlans(optimized, analysis.EliminateSubqueryAliases(correctAnswer))
}
val testRelationWithArrayType = LocalRelation('a.int, 'b.int, 'c_arr.array(IntegerType))
test("generate: predicate referenced no generated column") {
val originalQuery = {
testRelationWithArrayType
.generate(Explode('c_arr), true, false, Some("arr"))
.where(('b >= 5) && ('a > 6))
}
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = {
testRelationWithArrayType
.where(('b >= 5) && ('a > 6))
.generate(Explode('c_arr), true, false, Some("arr")).analyze
}
comparePlans(optimized, correctAnswer)
}
test("generate: non-deterministic predicate referenced no generated column") {
val originalQuery = {
testRelationWithArrayType
.generate(Explode('c_arr), true, false, Some("arr"))
.where(('b >= 5) && ('a + Rand(10).as("rnd") > 6) && ('c > 6))
}
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = {
testRelationWithArrayType
.where('b >= 5)
.generate(Explode('c_arr), true, false, Some("arr"))
.where('a + Rand(10).as("rnd") > 6 && 'c > 6)
.analyze
}
comparePlans(optimized, correctAnswer)
}
test("generate: part of conjuncts referenced generated column") {
val generator = Explode('c_arr)
val originalQuery = {
testRelationWithArrayType
.generate(generator, true, false, Some("arr"))
.where(('b >= 5) && ('c > 6))
}
val optimized = Optimize.execute(originalQuery.analyze)
val referenceResult = {
testRelationWithArrayType
.where('b >= 5)
.generate(generator, true, false, Some("arr"))
.where('c > 6).analyze
}
// Since newly generated columns get different ids every time being analyzed
// e.g. comparePlans(originalQuery.analyze, originalQuery.analyze) fails.
// So we check operators manually here.
// Filter("c" > 6)
assertResult(classOf[Filter])(optimized.getClass)
assertResult(1)(optimized.asInstanceOf[Filter].condition.references.size)
assertResult("c") {
optimized.asInstanceOf[Filter].condition.references.toSeq(0).name
}
// the rest part
comparePlans(optimized.children(0), referenceResult.children(0))
}
test("generate: all conjuncts referenced generated column") {
val originalQuery = {
testRelationWithArrayType
.generate(Explode('c_arr), true, false, Some("arr"))
.where(('c > 6) || ('b > 5)).analyze
}
val optimized = Optimize.execute(originalQuery)
comparePlans(optimized, originalQuery)
}
test("aggregate: push down filter when filter on group by expression") {
val originalQuery = testRelation
.groupBy('a)('a, count('b) as 'c)
.select('a, 'c)
.where('a === 2)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where('a === 2)
.groupBy('a)('a, count('b) as 'c)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: don't push down filter when filter not on group by expression") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L)
val optimized = Optimize.execute(originalQuery.analyze)
comparePlans(optimized, originalQuery.analyze)
}
test("aggregate: push down filters partially which are subset of group by expressions") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L && 'a === 3)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where('a === 3)
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c)
.where('c === 2L)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: push down filters with alias") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)(('a + 1) as 'aa, count('b) as 'c)
.where(('c === 2L || 'aa > 4) && 'aa < 3)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where('a + 1 < 3)
.select('a, 'b)
.groupBy('a)(('a + 1) as 'aa, count('b) as 'c)
.where('c === 2L || 'aa > 4)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: push down filters with literal") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c, "s" as 'd)
.where('c === 2L && 'd === "s")
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.where("s" === "s")
.select('a, 'b)
.groupBy('a)('a, count('b) as 'c, "s" as 'd)
.where('c === 2L)
.analyze
comparePlans(optimized, correctAnswer)
}
test("aggregate: don't push down filters that are nondeterministic") {
val originalQuery = testRelation
.select('a, 'b)
.groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd"))
.where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.groupBy('a)('a + Rand(10) as 'aa, count('b) as 'c, Rand(11).as("rnd"))
.where('c === 2L && 'aa + Rand(10).as("rnd") === 3 && 'rnd === 5)
.analyze
comparePlans(optimized, correctAnswer)
}
test("SPARK-17712: aggregate: don't push down filters that are data-independent") {
val originalQuery = LocalRelation.apply(testRelation.output, Seq.empty)
.select('a, 'b)
.groupBy('a)(count('a))
.where(false)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = testRelation
.select('a, 'b)
.groupBy('a)(count('a))
.where(false)
.analyze
comparePlans(optimized, correctAnswer)
}
test("broadcast hint") {
val originalQuery = BroadcastHint(testRelation)
.where('a === 2L && 'b + Rand(10).as("rnd") === 3)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = BroadcastHint(testRelation.where('a === 2L))
.where('b + Rand(10).as("rnd") === 3)
.analyze
comparePlans(optimized, correctAnswer)
}
test("union") {
val testRelation2 = LocalRelation('d.int, 'e.int, 'f.int)
val originalQuery = Union(Seq(testRelation, testRelation2))
.where('a === 2L && 'b + Rand(10).as("rnd") === 3 && 'c > 5L)
val optimized = Optimize.execute(originalQuery.analyze)
val correctAnswer = Union(Seq(
testRelation.where('a === 2L),
testRelation2.where('d === 2L)))
.where('b + Rand(10).as("rnd") === 3 && 'c > 5L)
.analyze
comparePlans(optimized, correctAnswer)
}
test("expand") {
val agg = testRelation
.groupBy(Cube(Seq('a, 'b)))('a, 'b, sum('c))
.analyze
.asInstanceOf[Aggregate]
val a = agg.output(0)
val b = agg.output(1)
val query = agg.where(a > 1 && b > 2)
val optimized = Optimize.execute(query)
val correctedAnswer = agg.copy(child = agg.child.where(a > 1 && b > 2)).analyze
comparePlans(optimized, correctedAnswer)
}
test("predicate subquery: push down simple") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val z = LocalRelation('a.int, 'b.int, 'c.int).subquery('z)
val query = x
.join(y, Inner, Option("x.a".attr === "y.a".attr))
.where(Exists(z.where("x.a".attr === "z.a".attr)))
.analyze
val answer = x
.where(Exists(z.where("x.a".attr === "z.a".attr)))
.join(y, Inner, Option("x.a".attr === "y.a".attr))
.analyze
val optimized = Optimize.execute(Optimize.execute(query))
comparePlans(optimized, answer)
}
test("predicate subquery: push down complex") {
val w = testRelation.subquery('w)
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
val z = LocalRelation('a.int, 'b.int, 'c.int).subquery('z)
val query = w
.join(x, Inner, Option("w.a".attr === "x.a".attr))
.join(y, LeftOuter, Option("x.a".attr === "y.a".attr))
.where(Exists(z.where("w.a".attr === "z.a".attr)))
.analyze
val answer = w
.where(Exists(z.where("w.a".attr === "z.a".attr)))
.join(x, Inner, Option("w.a".attr === "x.a".attr))
.join(y, LeftOuter, Option("x.a".attr === "y.a".attr))
.analyze
val optimized = Optimize.execute(Optimize.execute(query))
comparePlans(optimized, answer)
}
test("Window: predicate push down -- basic") {
val winExpr = windowExpr(count('b), windowSpec('a :: Nil, 'b.asc :: Nil, UnspecifiedFrame))
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a > 1)
val correctAnswer = testRelation
.where('a > 1).select('a, 'b, 'c)
.window(winExpr.as('window) :: Nil, 'a :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: predicate push down -- predicates with compound predicate using only one column") {
val winExpr =
windowExpr(count('b), windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame))
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a * 3 > 15)
val correctAnswer = testRelation
.where('a * 3 > 15).select('a, 'b, 'c)
.window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: predicate push down -- multi window expressions with the same window spec") {
val winSpec = windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)
val winExpr1 = windowExpr(count('b), winSpec)
val winExpr2 = windowExpr(sum('b), winSpec)
val originalQuery = testRelation
.select('a, 'b, 'c, winExpr1.as('window1), winExpr2.as('window2)).where('a > 1)
val correctAnswer = testRelation
.where('a > 1).select('a, 'b, 'c)
.window(winExpr1.as('window1) :: winExpr2.as('window2) :: Nil,
'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window1, 'window2).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: predicate push down -- multi window specification - 1") {
// order by clauses are different between winSpec1 and winSpec2
val winSpec1 = windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)
val winExpr1 = windowExpr(count('b), winSpec1)
val winSpec2 = windowSpec('a.attr :: 'b.attr :: Nil, 'a.asc :: Nil, UnspecifiedFrame)
val winExpr2 = windowExpr(count('b), winSpec2)
val originalQuery = testRelation
.select('a, 'b, 'c, winExpr1.as('window1), winExpr2.as('window2)).where('a > 1)
val correctAnswer1 = testRelation
.where('a > 1).select('a, 'b, 'c)
.window(winExpr1.as('window1) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.window(winExpr2.as('window2) :: Nil, 'a.attr :: 'b.attr :: Nil, 'a.asc :: Nil)
.select('a, 'b, 'c, 'window1, 'window2).analyze
val correctAnswer2 = testRelation
.where('a > 1).select('a, 'b, 'c)
.window(winExpr2.as('window2) :: Nil, 'a.attr :: 'b.attr :: Nil, 'a.asc :: Nil)
.window(winExpr1.as('window1) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window1, 'window2).analyze
// When Analyzer adding Window operators after grouping the extracted Window Expressions
// based on their Partition and Order Specs, the order of Window operators is
// non-deterministic. Thus, we have two correct plans
val optimizedQuery = Optimize.execute(originalQuery.analyze)
try {
comparePlans(optimizedQuery, correctAnswer1)
} catch {
case ae: Throwable => comparePlans(optimizedQuery, correctAnswer2)
}
}
test("Window: predicate push down -- multi window specification - 2") {
// partitioning clauses are different between winSpec1 and winSpec2
val winSpec1 = windowSpec('a.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)
val winExpr1 = windowExpr(count('b), winSpec1)
val winSpec2 = windowSpec('b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame)
val winExpr2 = windowExpr(count('a), winSpec2)
val originalQuery = testRelation
.select('a, winExpr1.as('window1), 'b, 'c, winExpr2.as('window2)).where('b > 1)
val correctAnswer1 = testRelation.select('a, 'b, 'c)
.window(winExpr1.as('window1) :: Nil, 'a.attr :: Nil, 'b.asc :: Nil)
.where('b > 1)
.window(winExpr2.as('window2) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil)
.select('a, 'window1, 'b, 'c, 'window2).analyze
val correctAnswer2 = testRelation.select('a, 'b, 'c)
.window(winExpr2.as('window2) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil)
.window(winExpr1.as('window1) :: Nil, 'a.attr :: Nil, 'b.asc :: Nil)
.where('b > 1)
.select('a, 'window1, 'b, 'c, 'window2).analyze
val optimizedQuery = Optimize.execute(originalQuery.analyze)
// When Analyzer adding Window operators after grouping the extracted Window Expressions
// based on their Partition and Order Specs, the order of Window operators is
// non-deterministic. Thus, we have two correct plans
try {
comparePlans(optimizedQuery, correctAnswer1)
} catch {
case ae: Throwable => comparePlans(optimizedQuery, correctAnswer2)
}
}
test("Window: predicate push down -- predicates with multiple partitioning columns") {
val winExpr =
windowExpr(count('b), windowSpec('a.attr :: 'b.attr :: Nil, 'b.asc :: Nil, UnspecifiedFrame))
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1)
val correctAnswer = testRelation
.where('a + 'b > 1).select('a, 'b, 'c)
.window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
// complex predicates with the same references but the same expressions
// Todo: in Analyzer, to enable it, we need to convert the expression in conditions
// to the alias that is defined as the same expression
ignore("Window: predicate push down -- complex predicate with the same expressions") {
val winSpec = windowSpec(
partitionSpec = 'a.attr + 'b.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExpr = windowExpr(count('b), winSpec)
val winSpecAnalyzed = windowSpec(
partitionSpec = '_w0.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed)
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1)
val correctAnswer = testRelation
.where('a + 'b > 1).select('a, 'b, 'c, ('a + 'b).as("_w0"))
.window(winExprAnalyzed.as('window) :: Nil, '_w0 :: Nil, 'b.asc :: Nil)
.select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: no predicate push down -- predicates are not from partitioning keys") {
val winSpec = windowSpec(
partitionSpec = 'a.attr :: 'b.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExpr = windowExpr(count('b), winSpec)
// No push down: the predicate is c > 1, but the partitioning key is (a, b).
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('c > 1)
val correctAnswer = testRelation.select('a, 'b, 'c)
.window(winExpr.as('window) :: Nil, 'a.attr :: 'b.attr :: Nil, 'b.asc :: Nil)
.where('c > 1).select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: no predicate push down -- partial compound partition key") {
val winSpec = windowSpec(
partitionSpec = 'a.attr + 'b.attr :: 'b.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExpr = windowExpr(count('b), winSpec)
// No push down: the predicate is a > 1, but the partitioning key is (a + b, b)
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a > 1)
val winSpecAnalyzed = windowSpec(
partitionSpec = '_w0.attr :: 'b.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed)
val correctAnswer = testRelation.select('a, 'b, 'c, ('a + 'b).as("_w0"))
.window(winExprAnalyzed.as('window) :: Nil, '_w0 :: 'b.attr :: Nil, 'b.asc :: Nil)
.where('a > 1).select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("Window: no predicate push down -- complex predicates containing non partitioning columns") {
val winSpec =
windowSpec(partitionSpec = 'b.attr :: Nil, orderSpec = 'b.asc :: Nil, UnspecifiedFrame)
val winExpr = windowExpr(count('b), winSpec)
// No push down: the predicate is a + b > 1, but the partitioning key is b.
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a + 'b > 1)
val correctAnswer = testRelation
.select('a, 'b, 'c)
.window(winExpr.as('window) :: Nil, 'b.attr :: Nil, 'b.asc :: Nil)
.where('a + 'b > 1).select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
// complex predicates with the same references but different expressions
test("Window: no predicate push down -- complex predicate with different expressions") {
val winSpec = windowSpec(
partitionSpec = 'a.attr + 'b.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExpr = windowExpr(count('b), winSpec)
val winSpecAnalyzed = windowSpec(
partitionSpec = '_w0.attr :: Nil,
orderSpec = 'b.asc :: Nil,
UnspecifiedFrame)
val winExprAnalyzed = windowExpr(count('b), winSpecAnalyzed)
// No push down: the predicate is a + b > 1, but the partitioning key is a + b.
val originalQuery = testRelation.select('a, 'b, 'c, winExpr.as('window)).where('a - 'b > 1)
val correctAnswer = testRelation.select('a, 'b, 'c, ('a + 'b).as("_w0"))
.window(winExprAnalyzed.as('window) :: Nil, '_w0 :: Nil, 'b.asc :: Nil)
.where('a - 'b > 1).select('a, 'b, 'c, 'window).analyze
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer)
}
test("join condition pushdown: deterministic and non-deterministic") {
val x = testRelation.subquery('x)
val y = testRelation.subquery('y)
// Verify that all conditions preceding the first non-deterministic condition are pushed down
// by the optimizer and others are not.
val originalQuery = x.join(y, condition = Some("x.a".attr === 5 && "y.a".attr === 5 &&
"x.a".attr === Rand(10) && "y.b".attr === 5))
val correctAnswer = x.where("x.a".attr === 5).join(y.where("y.a".attr === 5),
condition = Some("x.a".attr === Rand(10) && "y.b".attr === 5))
comparePlans(Optimize.execute(originalQuery.analyze), correctAnswer.analyze)
}
}
| u2009cf/spark-radar | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/FilterPushdownSuite.scala | Scala | apache-2.0 | 38,080 |
import sbt._
object Dependencies {
lazy val appDependencies = Seq(
"com.typesafe.slick" %% "slick" % "3.2.1",
"com.typesafe.akka" %% "akka-http" % "10.0.9",
"joda-time" % "joda-time" % "2.9.9"
)
lazy val testDependencies = Seq(
"org.scalatest" %% "scalatest" % "3.0.1" % Test
)
}
| avalon12/crypto-trader | traderbot/project/Dependencies.scala | Scala | gpl-3.0 | 375 |
/*
* Copyright (C) 2016 Academic Medical Center of the University of Amsterdam (AMC)
*
* This program is semi-free software: you can redistribute it and/or modify it
* under the terms of the Rosemary license. You may obtain a copy of this
* license at:
*
* https://github.com/AMCeScience/Rosemary-Vanilla/blob/master/LICENSE.md
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* You should have received a copy of the Rosemary license
* along with this program. If not,
* see https://github.com/AMCeScience/Rosemary-Vanilla/blob/master/LICENSE.md.
*
* Project: https://github.com/AMCeScience/Rosemary-Vanilla
* AMC eScience Website: http://www.ebioscience.amc.nl/
*/
package nl.amc.ebioscience.rosemary.controllers.api
import javax.inject._
import play.api.inject.{ QualifierInstance, BindingKey }
import play.api.{ Application => PlayApplication, _ }
import play.api.mvc._
import play.api.libs.json._
import nl.amc.ebioscience.rosemary.models._
import nl.amc.ebioscience.rosemary.models.core._
import nl.amc.ebioscience.rosemary.models.core.ModelBase._
import nl.amc.ebioscience.rosemary.models.core.Implicits._
import nl.amc.ebioscience.rosemary.controllers.JsonHelpers
import nl.amc.ebioscience.rosemary.core.{ WebSockets, HelperTools }
import nl.amc.ebioscience.rosemary.services.{ SecurityService, CryptoService }
import nl.amc.ebioscience.rosemary.services.processing._
import nl.amc.ebioscience.rosemary.services.search._
import nl.amc.ebioscience.processingmanager.types.messaging.{ ProcessingMessage, PortMessagePart }
import nl.amc.ebioscience.processingmanager.types.{ ProcessingLifeCycle, PortType, Credentials }
import java.util.Date
import com.google.inject.name.Names
@Singleton
class ProcessingsController @Inject() (
securityService: SecurityService,
cryptoService: CryptoService,
processingManagerClient: ProcessingManagerClient,
processingHelper: ProcessingHelper,
searchWriter: SearchWriter,
playApplication: Provider[PlayApplication]) extends Controller with JsonHelpers {
case class SubmitProcessingRequest(
workspace: Tag.Id,
application: Recipe.Id,
description: String,
dataPorts: Set[SubmitProcessingRequest.DataPort],
paramPorts: Set[SubmitProcessingRequest.ParamPort]) {
def validate: Either[String, Map[DefaultModelBase.Id, BaseEntity]] = {
// check existence of workspace tag id
val eitherWorkspace = Tag.findOneById(workspace).map {
w => Right(Map(w.id -> w))
} getOrElse Left(s"Could not find tag_id $workspace")
// check existence of application id
val eitherApplication = Recipe.findOneById(application).map {
r =>
r match {
case a: Application => Right(Map(a.id -> a))
case _ => Left(s"Recipe with ID $application is not an Application.")
}
} getOrElse Left(s"Could not find Recipe with ID: $application")
// check existence of datum ids
val dataMapList = for {
dataPort <- dataPorts.toList
datum <- Datum.findOneById(dataPort.datum)
} yield Map(datum.id -> datum)
val dataMap = dataMapList reduce (_ ++ _)
val eitherData = for {
dataPort <- dataPorts.toList
} yield dataMap get dataPort.datum map {
d => Right(Map(d.id -> d))
} getOrElse Left(s"Could not find datum_id ${dataPort.datum}")
// check existence of port names
val eitherPorts = eitherApplication.right.flatMap { appIdMap =>
val appPorts = appIdMap(application).iPorts.map(_.name)
val reqPorts = (dataPorts.map(_.port) ++ paramPorts.map(_.port)).toSet
val diffs = appPorts diff reqPorts
if (diffs.isEmpty) Right(appIdMap) else Left(s"Could not find port numbers: $diffs")
}
// concatenate all "Eithers"
val eithersList = eitherData ::: List(eitherWorkspace, eitherApplication, eitherPorts)
val eitherIterables = HelperTools.evertEitherList(eithersList)
// reduce iterables
eitherIterables match {
case Right(ml) => Right(ml reduce (_ ++ _))
case Left(el) => Left(el mkString (" , "))
}
}
}
object SubmitProcessingRequest {
case class DataPort(port: String, datum: Datum.Id)
case class ParamPort(port: String, param: String)
implicit val dataPortFmt = Json.format[SubmitProcessingRequest.DataPort]
implicit val paramPortFmt = Json.format[SubmitProcessingRequest.ParamPort]
implicit val requestFormatter = Json.format[SubmitProcessingRequest]
}
def create = securityService.HasToken(parse.json) { implicit request =>
val json = request.body
Logger.trace(s"Request: $json")
json.validate[SubmitProcessingRequest].fold(
valid = { submitReq =>
submitReq.validate match {
case Right(objectMap) => // every id exists
val workspace = objectMap(submitReq.workspace).asInstanceOf[WorkspaceTag]
val dataProcessingTag = Tag.getProcessingCategory(Tag.ProcessingCategories.DataProcessing.toString)
val inPreparationStatusTag = Tag.getProcessingStatusTag(ProcessingLifeCycle.InPreparation.toString)
val abortedStatusTag = Tag.getProcessingStatusTag(ProcessingLifeCycle.Aborted.toString)
val application = objectMap(submitReq.application).asInstanceOf[Application]
// run-time binding using the dependency injection API
val qualifier = Some(QualifierInstance(Names.named(application.transformer)))
val bindingKey = BindingKey[Transformer](classOf[Transformer], qualifier)
val transformer = playApplication.get.injector.instanceOf[Transformer](bindingKey)
// to avoid multiple queries to the DB, wrap it in the Cybertronian
val cybertronian = Cybertronian(
application = application,
dataPorts = submitReq.dataPorts.map { dp => (dp.port, Seq(objectMap(dp.datum).asInstanceOf[Datum])) }.toMap,
paramPorts = submitReq.paramPorts.map { pp => (pp.port, Seq(pp.param)) }.toMap)
transformer.revealDecepticons(cybertronian) match {
case Some(map) => Conflict(map mkString (" ; ")) // report domain-specific problems related to the type of inputs
case None => { // there is no domain-specific problem
// Raw param and data ports for ProcessingGroup
val params = submitReq.paramPorts.toSeq.map { pp =>
ParamOrDatum(name = pp.port, param = Some(pp.param))
}
val data = submitReq.dataPorts.toSeq.map { dp =>
ParamOrDatum(name = dp.port, datum = Some(DatumAndReplica(datum = dp.datum)))
}
// Create a new ProcessingGroup
val processingGroup = ProcessingGroup(
name = submitReq.description,
initiator = User.current.id,
inputs = data ++ params,
recipes = Set(application.id),
tags = Set(workspace.id, dataProcessingTag.id))
// Define credentials
val creds = User.credentialFor(transformer.planet.id).map { cred =>
cred.copy(password = cryptoService.decrypt(cred.password))
} orElse {
Logger.debug(s"${User.current.email} has no credential for ${transformer.planet.name}, trying community credentials...")
for (user <- transformer.planet.username; pass <- transformer.planet.password) yield Credential(
resource = transformer.planet.id,
username = user,
password = cryptoService.decrypt(pass))
}
val pmcreds = creds.map { c =>
Credentials(
username = c.username,
password = c.password,
server = "None", // TODO
authType = "userpass")
}
// Transform the user request to one or more Processings according to the domain-specific definition
val seqIOICP = transformer.transform(cybertronian)
// Create a sequence of Processings and ProcessingMessages for each Set of IOInflatedConcretePorts
val psAndpms = seqIOICP.map { ioicp =>
// Create a Processing for each Set of IOInflatedConcretePorts
val p = Processing(
parentId = Some(processingGroup.id),
name = submitReq.description,
initiator = User.current.id,
inputs = ioicp.inputs.map { icp =>
ParamOrDatum(
name = icp.name,
param = icp.data match {
case Param(constant) => Some(constant)
case _ => None
},
datum = icp.data match {
case ConcreteDatum(data, replica) => Some(DatumAndReplica(data.id, Some(replica.id)))
case _ => None
})
},
recipes = Set(application.id),
tags = Set(workspace.id, dataProcessingTag.id))
// Create a ProcessingMessage for each set of IOInflatedConcretePorts and its respective Processing
val pm = ProcessingMessage(
id = p.id.toString,
appName = application.name,
appVersion = application.version,
appPlatform = application.platform,
desc = submitReq.description,
inputs = ioicp.inputs.map { icp =>
PortMessagePart(
portName = icp.name,
valueType = icp.data match {
case Param(_) => PortType.Constant.toString
case ConcreteDatum(_, _) => PortType.File.toString
case _ => "error: unrecognized type"
},
value = icp.data match {
case Param(constant) => constant
case ConcreteDatum(_, replica) => transformer.planet.uri + "/" + replica.location
case _ => "error: unrecognized value"
},
resourceName = transformer.planet.name,
readCreds = pmcreds,
writeCreds = None) // TODO
}.toList,
outputs = ioicp.outputs.map { icp =>
PortMessagePart(
portName = icp.name,
valueType = icp.data match {
case FutureDatum(_) => PortType.File.toString
case _ => "error: unrecognized type"
},
value = icp.data match {
case FutureDatum(url) => url
case _ => "error: unrecognized value"
},
resourceName = transformer.planet.name,
readCreds = None, // TODO
writeCreds = pmcreds)
}.toList,
groupId = Some(processingGroup.id.toString),
platformCreds = None) // TODO
(p, pm)
}
// Submit ProcessingMessages one by one and update their status accordingly, and save them
val insertedPs = psAndpms map { pAndpm =>
// Submit Processing
processingManagerClient.submitProcessing(pAndpm._2) match {
case Right(r) => pAndpm._1.copy(progress = 10,
statuses = Seq(nl.amc.ebioscience.rosemary.models.Status(ProcessingLifeCycle.InPreparation)),
tags = pAndpm._1.tags + inPreparationStatusTag.id).insert
case Left(e) => pAndpm._1.copy(progress = 0,
statuses = Seq(nl.amc.ebioscience.rosemary.models.Status(ProcessingLifeCycle.Aborted)),
tags = pAndpm._1.tags + abortedStatusTag.id).insert
}
}
// Update status of the ProcessingGroup and save it
val avgProgress = insertedPs.map(_.progress).sum / insertedPs.length
val insertedPG = if (avgProgress > 0)
processingGroup.copy(progress = avgProgress,
statuses = Seq(nl.amc.ebioscience.rosemary.models.Status(ProcessingLifeCycle.InPreparation)),
tags = processingGroup.tags + inPreparationStatusTag.id).insert
else processingGroup.copy(progress = avgProgress,
statuses = Seq(nl.amc.ebioscience.rosemary.models.Status(ProcessingLifeCycle.Aborted)),
tags = processingGroup.tags + abortedStatusTag.id).insert // all failed
// Index Processings and their ProcessingGroup
searchWriter.add(processingGroup)
insertedPs.foreach(searchWriter.add(_))
searchWriter.commit
// Create and save user action notification
val upNotification = UserProcessingNotification(
actor = User.current.id,
action = "submitted",
processing = processingGroup.id,
tags = Set(workspace.id),
info = Info(dict = Map("countData" -> Valunit(cybertronian.dataPorts.size.toString, Some("Integer"))))).insert
// Create and save processing status notification
val pNotification = ProcessingNotification(
processing = processingGroup.id,
status = insertedPG.lastStatus.get,
tags = Set(workspace.id),
info = Info(dict = Map("countSubmission" -> Valunit(insertedPs.length.toString, Some("Integer"))))).insert
// Send notification via WebSocket
val socket = WebSockets.getSocket
socket.map(_.send("notification", upNotification.toJson))
socket.map(_.send("notification", pNotification.toJson))
// Finally send the new processing to the front-end
Ok(processingGroup.toJson)
}
}
case Left(errors) => Conflict(errors) // report invalid ids
}
},
invalid = {
errors => BadRequest(Json.toJson(errors))
})
}
private def extractInputs(inflatedConcretePorts: Set[InflatedConcretePort]): Set[ParamOrDatum] = {
???
// inflatedConcretePorts.filter(_.data.isInstanceOf[
}
case class ProcessingIORequest(input: Option[Datum.Id], output: Option[Datum.Id])
object ProcessingIORequest {
implicit val processingIORequestFmt = Json.format[ProcessingIORequest]
}
def findByIO = securityService.HasToken(parse.json) { implicit request =>
val json = request.body
json.validate[ProcessingIORequest].fold(
valid = { req =>
(req.input, req.output) match {
case (None, None) => Conflict(s"At least either of input or output should be provided.")
case (Some(i), None) => Ok(Processing.findByI(i).toJson)
case (None, Some(o)) => Ok(Processing.findByO(o).toJson)
case (Some(i), Some(o)) => Ok(Processing.findByIorO(i, o).toJson)
}
},
invalid = {
errors => BadRequest(Json.toJson(errors))
})
}
def queryId(id: Processing.Id) = securityService.HasToken(parse.empty) { implicit request =>
Processing.findOneById(id).map { processing =>
Ok(processing.toJson)
} getOrElse Conflict(s"Could not find Processing with ID: $id")
}
def abort(id: Processing.Id) = securityService.HasToken(parse.json) { implicit request =>
Logger.trace("Request: " + request.body)
Processing.findOneById(id).map { processing =>
val reason = (request.body \\ "reason").asOpt[String].getOrElse("Yes, We Can!")
// TODO Send abort request to the Processing Manager
processingManagerClient.abortProcessing(processing.id, reason).fold(
{ error => Conflict(error) }, // Report Processing Manager service connection problems
{ optMsg => // Call to the Processing Manager service was successful
optMsg match {
case None => Logger.warn(s"Invalid Json response received when aborting the Processing ${processing.id}")
case Some(msg) => msg match {
case "OK" =>
// Update ProcessingGroup status and send notification about its status change
processingHelper.updateStatusAndSendNotification(processing)
// TODO Send user action notification
case m @ _ => Logger.warn(s"Processing Manager says that aborting the Processing ${processing.id} was not OK: $m")
}
}
})
// Redirect(s"/api/v1/processings/${id}")
Ok("OK!")
} getOrElse Conflict(s"Could not find processing with ID: $id")
}
def resume(id: Processing.Id) = securityService.HasToken(parse.empty) { implicit request =>
Processing.findOneById(id).map { processing =>
// Send resume request to the Processing Manager
processingManagerClient.resumeProcessing(processing.id).fold(
{ error => Conflict(error) }, // Report Processing Manager service connection problems
{ optMsg => // Call to the Processing Manager service was successful
optMsg match {
case None => Logger.warn(s"Invalid Json response received when resuming the Processing ${processing.id}")
case Some(msg) => msg match {
case "OK" =>
// Update Processing status and send notification about its status change
processingHelper.updateStatusAndSendNotification(processing)
// TODO Send user action notification
case m @ _ => Logger.warn(s"Processing Manager says that resuming the Processing ${processing.id} was not OK: $m")
}
}
})
// Redirect(s"/api/v1/processings/${id}")
Ok("OK!")
} getOrElse Conflict(s"Could not find processing with ID: $id")
}
}
| AMCeScience/Rosemary-Vanilla | app/nl/amc/ebioscience/rosemary/controllers/api/ProcessingsController.scala | Scala | agpl-3.0 | 18,879 |
package org.remus32.NanoHttpdLayer
import fi.iki.elonen.NanoHTTPD
import fi.iki.elonen.NanoHTTPD.IHTTPSession
import fi.iki.elonen.NanoHTTPD.Response.IStatus
import scala.language.implicitConversions
/**
* Created by remus32 on 17/04/16.
*/
trait Implicits {
implicit def ihttpsessionToRequest(that: IHTTPSession): Request = new Request(that)
implicit def responseToResponse(that: Response): NanoHTTPD.Response = that.toResponse
implicit def iStatusToStatus(that: IStatus): Status = new Status {
def description: String = that.getDescription
def code: Int = that.getRequestStatus
}
implicit def statusToIStatus(that: Status): IStatus = new IStatus {
def getRequestStatus: Int = that.code
def getDescription: String = that.description
}
}
object Implicits extends Implicits
| remus32/BetterServer | serverLayer/src/main/scala/org/remus32/NanoHttpdLayer/Implicits.scala | Scala | gpl-3.0 | 814 |
/*
* Created on 2010/04/01
* Copyright (c) 2010-2014, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glulx
import org.scalatest._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.BeforeAndAfterEach
import java.io._
import org.zmpp.base._
@RunWith(classOf[JUnitRunner])
class StackSpec extends FlatSpec with Matchers with BeforeAndAfterEach {
val DummyMem = Array[Byte](0x47, 0x6c, 0x75, 0x6c, 0x00, 0x03, 0x01, 0x01,
0x00, 0x00, 0x00, 0x00, // RAMSTART
0x00, 0x00, 0x00, 0x00, // EXTSTART
0x00, 0x00, 0x00, 0x00, // ENDMEM
0x00, 0x00, 0x00, 0xff.asInstanceOf[Byte], // STACKSIZE
0x00, 0x00, 0x00, 0x00, // STARTFUNC
0x00, 0x00, 0x00, 0x00, // Decoding table
0x00, 0x00, 0x00, 0x00 // Checksum
)
var vmstate = new GlulxVMState
override def beforeEach {
vmstate.init(DummyMem)
}
"GlulxVM stack" should "be initialized" in {
vmstate.stackEmpty should be (true)
}
it should "push and pop a byte" in {
vmstate.pushInt(0) // artificial frame len
vmstate.pushByte(1)
vmstate.sp should be (5)
vmstate.topByte should be (1)
vmstate.popByte should be (1)
vmstate.sp should be (4)
vmstate.pushByte(255)
vmstate.topByte should be (255)
vmstate.popByte should be (255)
}
it should "push and pop short" in {
vmstate.pushInt(0) // artificial frame len
vmstate.pushShort(32767)
vmstate.topShort should be (32767)
vmstate.pushShort(65535)
vmstate.topShort should be (65535)
vmstate.popShort should be (65535)
vmstate.popShort should be (32767)
vmstate.sp should be (4)
}
it should "push and pop int" in {
vmstate.pushInt(0) // artificial frame len
vmstate.pushInt(32767)
vmstate.topInt should equal (32767)
vmstate.pushInt(-42)
vmstate.topInt should equal (-42)
vmstate.popInt should equal (-42)
vmstate.popInt should equal (32767)
vmstate.sp should be (4)
}
it should "set and get a byte" in {
vmstate.setByteInStack(3, 0xba)
vmstate.getByteInStack(3) should be (0xba)
vmstate.sp should be (0)
}
it should "set and get a short" in {
vmstate.setShortInStack(4, 0xcafe)
vmstate.getShortInStack(4) should be (0xcafe)
vmstate.sp should be (0)
}
it should "set and get a int" in {
vmstate.setIntInStack(4, 0xdeadbeef)
vmstate.getIntInStack(4) should be (0xdeadbeef)
vmstate.sp should be (0)
}
}
| weiju/zmpp2 | zmpp-glulx/src/test/scala/org/zmpp/glulx/StackTest.scala | Scala | bsd-3-clause | 4,125 |
package db.scalikejdbc
import org.intracer.wmua.Image
import org.specs2.mutable.Specification
class ImageSpec extends Specification with TestDb {
sequential
def image(id: Long) =
Image(id, s"File:Image$id.jpg", None, None, 640, 480, Some(s"12-345-$id"))
def addToContest(contestId: Long, images: Seq[Image]) =
CategoryLinkJdbc.addToCategory(ContestJuryJdbc.findById(contestId).flatMap(_.categoryId).get, images)
"fresh database" should {
"be empty" in {
withDb {
val images = imageDao.findAll()
images.size === 0
}
}
"insert image" in {
withDb {
val id = 10
val contestId = 20
createContests(contestId)
val image = Image(id, "File:Image.jpg", None, None, 640, 480, Some("12-345-6789"))
imageDao.batchInsert(Seq(image))
addToContest(contestId, Seq(image))
val dbi = imageDao.findById(id)
dbi === Some(image)
val images = imageDao.findAll()
images === Seq(image)
}
}
"find by contest " in {
withDb {
val (contest1, contest2) = (10, 20)
createContests(contest1, contest2)
val images1 = (11 to 19).map(id => image(id))
val images2 = (21 to 29).map(id => image(id))
imageDao.batchInsert(images1 ++ images2)
addToContest(contest1, images1)
addToContest(contest2, images2)
imageDao.findByContestId(10) === images1
imageDao.findByContestId(20) === images2
}
}
"contests can share images" in {
withDb {
val (contest1, contest2) = (10, 20)
createContests(contest1, contest2)
val images1 = (11 to 19).map(id => image(id))
val images2 = (21 to 29).map(id => image(id))
val commonImages = (31 to 39).map(id => image(id))
imageDao.batchInsert(images1 ++ images2 ++ commonImages)
addToContest(contest1, images1)
addToContest(contest2, images2)
addToContest(contest1, commonImages)
addToContest(contest2, commonImages)
imageDao.findByContestId(10) === images1 ++ commonImages
imageDao.findByContestId(20) === images2 ++ commonImages
}
}
}
}
| intracer/wlxjury | test/db/scalikejdbc/ImageSpec.scala | Scala | apache-2.0 | 2,206 |
package com.azavea.server
import com.azavea.server.mapalgebra.focal._
import geotrellis.raster._
import geotrellis.raster.io._
import geotrellis.raster.histogram.{Histogram, StreamingHistogram}
import geotrellis.raster.render._
import geotrellis.spark._
import geotrellis.spark.buffer.{BufferedTile, Direction}
import geotrellis.pointcloud.spark._
import geotrellis.pointcloud.spark.io._
import geotrellis.pointcloud.spark.io.hadoop._
import geotrellis.spark.io._
import geotrellis.spark.io.hadoop._
import geotrellis.spark.summary._
import geotrellis.spark.tiling.FloatingLayoutScheme
import geotrellis.spark.mapalgebra._
import geotrellis.spark.mapalgebra.focal._
import geotrellis.spark.mapalgebra.focal.hillshade._
import geotrellis.proj4._
import geotrellis.raster.io.geotiff.GeoTiff
import geotrellis.raster.io.geotiff.writer.GeoTiffWriter
import geotrellis.raster.mapalgebra.focal.{Square, TargetCell}
import geotrellis.raster.mapalgebra.focal.hillshade.Hillshade
import geotrellis.raster.rasterize._
import geotrellis.raster.rasterize.polygon._
import geotrellis.raster.summary.polygonal._
import geotrellis.spark.io.AttributeStore
import geotrellis.spark.io.avro.AvroRecordCodec
import geotrellis.util._
import geotrellis.vector._
import geotrellis.vector.io._
import org.apache.hadoop.fs.Path
//import org.apache.spark.{SparkConf, SparkContext}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.server._
import akka.actor.ActorRef
import akka.http.scaladsl.model.{ContentType, HttpEntity, HttpResponse, MediaTypes}
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import ch.megard.akka.http.cors.CorsDirectives._
import spray.json._
import spray.json.DefaultJsonProtocol._
import spire.syntax.cfor._
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.reflect.ClassTag
trait Router extends Directives with CacheSupport with AkkaSystem.LoggerExecutor {
// val conf: SparkConf
val tileReader: ValueReader[LayerId]
// val layerReader: FilteringLayerReader[LayerId]
val collectionReader: CollectionLayerReader[LayerId]
val attributeStore: AttributeStore
val staticPath: String
// implicit val sc: SparkContext
import AkkaSystem.materializer
implicit def rejectionHandler =
RejectionHandler.newBuilder().handleAll[MethodRejection] { rejections =>
val methods = rejections map (_.supported)
lazy val names = methods map (_.name) mkString ", "
respondWithHeader(Allow(methods)) {
options {
complete(s"Supported methods : $names.")
} ~
complete(MethodNotAllowed, s"HTTP method not allowed, supported methods: $names!")
}
}
.result()
def printAnyException[T](f: => T): T= {
try {
f
} catch {
case e: Throwable =>
import java.io._
val sw = new StringWriter
e.printStackTrace(new PrintWriter(sw))
println(sw.toString)
throw e
}
}
def colorWithHillshade(elevation: Tile, hillshade: Tile, cm: ColorMap): Tile =
elevation.color(cm).combine(hillshade) { (rgba, z) =>
if(rgba == 0) { 0 }
else {
// Convert to HSB, replace the brightness with the hillshade value, convert back to RGBA
val (r, g, b, a) = rgba.unzipRGBA
val hsbArr = java.awt.Color.RGBtoHSB(r, g, b, null)
val (newR, newG, newB) = (java.awt.Color.HSBtoRGB(hsbArr(0), hsbArr(1), math.min(z, 160).toFloat / 160.0f) << 8).unzipRGB
RGBA(newR, newG, newB, a)
}
}
def getTile(layerId: LayerId, key: SpatialKey): Tile =
getCachedTile(layerId, key) { tileReader.reader[SpatialKey, Tile](layerId).read(key) }
def getBufferedTile[K: SpatialComponent: AvroRecordCodec: JsonFormat: ClassTag](layerId: LayerId, key: K, layerBounds: GridBounds, tileDimensions: (Int, Int)): Future[BufferedTile[Tile]] = {
val futures: Vector[Future[Option[(Direction, Tile)]]] =
getNeighboringKeys(key)
.map { case (direction, key) =>
val sk = key.getComponent[SpatialKey]
if(!layerBounds.contains(sk.col, sk.row)) Future { None }
else {
(Future {
try {
val tile =
getCachedTile(layerId, sk) { tileReader.reader[SpatialKey, Tile](layerId).read(sk) }
Some(direction -> tile)
} catch {
case e: ValueNotFoundError => None
}
})
}
}
Future.sequence(futures)
.map { tileOpts =>
import Direction._
val flattened = tileOpts.flatten
val tileSeq =
flattened
.map(_._2)
// TODO: Handle the case where there a corner but no side,
// e.g. TopLeft but no Left
val ((centerCol, centerRow),(layoutCols, layoutRows)) =
flattened
.map(_._1)
.foldLeft(((0, 0), (1, 1))) { case (acc @ ((centerCol, centerRow), (totalCol, totalRow)), direction) =>
direction match {
case Left =>
val newTotalCol =
if(totalCol == 1) { 2 }
else if(totalCol == 2) { 3 }
else { totalCol }
((1, centerRow), (newTotalCol, totalRow))
case Right =>
val newTotalCol =
if(totalCol == 1) { 2 }
else if(totalCol == 2) { 3 }
else { totalCol }
((centerCol, centerRow), (newTotalCol, totalRow))
case Top =>
val newTotalRow =
if(totalRow == 1) { 2 }
else if(totalRow == 2) { 3 }
else { totalRow }
((centerCol, 1), (totalCol, newTotalRow))
case Bottom =>
val newTotalRow =
if(totalRow == 1) { 2 }
else if(totalRow == 2) { 3 }
else { totalRow }
((centerCol, centerRow), (totalCol, newTotalRow))
case _ => acc
}
}
val tileLayout =
TileLayout(layoutCols, layoutRows, tileDimensions._1, tileDimensions._2)
val gridBounds = {
val (colMin, colMax) =
if(centerCol == 0) {
(0, tileDimensions._1 - 1)
} else {
(tileDimensions._1, tileDimensions._1 * 2 - 1)
}
val (rowMin, rowMax) =
if(centerRow == 0) {
(0, tileDimensions._2 - 1)
} else {
(tileDimensions._2, tileDimensions._2 * 2 - 1)
}
GridBounds(colMin, rowMin, colMax, rowMax)
}
val tile =
CompositeTile(tiles = tileSeq, tileLayout)
println(tileLayout)
println(gridBounds)
println(s"DIRECTIONS: ${flattened.map(_._1).toSeq}")
println(s"TILE DIMS: ${tile.cols} ${tile.rows}")
println(s"TILES DIMS: ${flattened.map(_._2.dimensions).toSeq}")
BufferedTile(tile, gridBounds)
}
}
def seqFutures[T, U](items: TraversableOnce[T])(func: T => Future[U]): Future[List[U]] = {
items.foldLeft(Future.successful[List[U]](Nil)) {
(f, item) => f.flatMap {
x => func(item).map(_ :: x)
}
} map (_.reverse)
}
def populateKeys[K: SpatialComponent](key: K): Vector[K] =
getNeighboringKeys(key).map(_._2)
def getNeighboringKeys[K: SpatialComponent](key: K): Vector[(Direction, K)] = {
import Direction._
val SpatialKey(c, r) = key.getComponent[SpatialKey]
Vector(
(TopLeft, key.setComponent(SpatialKey(c - 1, r - 1))),
(Top, key.setComponent(SpatialKey(c, r - 1))),
(TopRight, key.setComponent(SpatialKey(c + 1, r - 1))),
(Left, key.setComponent(SpatialKey(c - 1, r))),
(Center, key.setComponent(SpatialKey(c, r))),
(Right, key.setComponent(SpatialKey(c + 1, r))),
(BottomLeft, key.setComponent(SpatialKey(c - 1, r + 1))),
(Bottom, key.setComponent(SpatialKey(c, r + 1))),
(BottomRight, key.setComponent(SpatialKey(c + 1, r + 1)))
)
}
def keyToBounds[K: SpatialComponent](key: K): KeyBounds[K] = {
val SpatialKey(c, r) = key.getComponent[SpatialKey]
KeyBounds(
key.setComponent(SpatialKey(c - 1, r - 1)),
key.setComponent(SpatialKey(c + 1, r + 1))
)
}
def readTileNeighbours[K: SpatialComponent: AvroRecordCodec: JsonFormat: ClassTag](layerId: LayerId, key: K): Future[Seq[(K, Tile)]] = {
Future.sequence(populateKeys(key).map { k => Future {
try {
Some(k -> getCachedTile(layerId, k.getComponent[SpatialKey]) { tileReader.reader[K, Tile](layerId).read(k) })
} catch {
case e: ValueNotFoundError => None
}
} }) map (_.flatten)
}
def focalCompositeTileApply[
K: SpatialComponent: AvroRecordCodec: JsonFormat: ClassTag
](layerId: LayerId, key: K, colorRamp: String)(f: Seq[(K, Tile)] => Tile) =
readTileNeighbours(layerId, key) map { tileSeq => f(tileSeq) }
def DIMRender(tile: Tile, layerId: LayerId, colorRamp: String): HttpResponse = {
val breaks =
attributeStore
.read[Histogram[Double]](LayerId(layerId.name, 0), "histogram")
.asInstanceOf[StreamingHistogram]
.quantileBreaks(50)
val ramp =
ColorRampMap
.getOrElse(colorRamp, ColorRamps.BlueToRed)
val colorMap =
ramp
.toColorMap(breaks, ColorMap.Options(fallbackColor = ramp.colors.last))
val bytes = tile.renderPng(colorMap)
HttpResponse(entity = HttpEntity(ContentType(MediaTypes.`image/png`), bytes))
}
def index(i: Option[Int] = None) = i match {
case Some(n) if n > 1 && n < 6 => s"/index${n}.html"
case _ => "/index.html"
}
case class VolumeStats(mean: Double, min: Double, max: Double, volume: Double) {
def toJson: JsObject =
JsObject(
"mean" -> mean.toInt.toJson,
"min" -> min.toInt.toJson,
"max" -> max.toInt.toJson,
"volume" -> volume.toInt.toJson
)
}
object VolumeStats {
def apply(layer: Seq[(SpatialKey, Tile)] with Metadata[TileLayerMetadata[SpatialKey]], geom: MultiPolygon, cellArea: Double): VolumeStats = {
val (zTotal, count, min, max, volume) =
layer
.polygonalSummary(geom, (0.0, 0L, Double.MaxValue, Double.MinValue, 0.0), new TilePolygonalSummaryHandler[(Double, Long, Double, Double, Double)] {
def handlePartialTile(raster: Raster[Tile], intersection: Polygon): (Double, Long, Double, Double, Double) = {
var volume = 0.0
var min = Double.MaxValue
var max = Double.MinValue
var zTotal = 0.0
var count = 0L
val tile = raster.tile
FractionalRasterizer.foreachCellByPolygon(intersection, raster.rasterExtent)(
new FractionCallback {
def callback(col: Int, row: Int, fraction: Double): Unit = {
val z = tile.getDouble(col, row)
if(isData(z)) {
volume += z * cellArea * fraction
zTotal += z
count += 1
if(z < min) { min = z }
if(max < z) { max = z }
}
}
}
)
(zTotal, count, min, max, volume)
}
def handleFullTile(tile: Tile): (Double, Long, Double, Double, Double) = {
var volume = 0.0
var min = Double.MaxValue
var max = Double.MinValue
var zTotal = 0.0
var count = 0L
tile.foreachDouble { z =>
if(isData(z)) {
volume += z * cellArea
zTotal += z
count += 1
if(z < min) { min = z }
if(max < z) { max = z }
}
}
(zTotal, count, min, max, volume)
}
def combineResults(values: Seq[(Double, Long, Double, Double, Double)]): (Double, Long, Double, Double, Double) =
values
.foldLeft((0.0, 0L, Double.MaxValue, Double.MinValue, 0.0)) { case ((accZTotal, accCount, accMin, accMax, accVolume), (zTotal, count, min, max, volume)) =>
(
zTotal + accZTotal,
count + accCount,
math.min(min, accMin),
math.max(max, accMax),
volume + accVolume
)
}
})
VolumeStats(zTotal / count, min, max, volume)
}
}
def routes =
pathPrefix("ping") {
get {
complete { "pong" }
}
} ~
pathPrefix("api") {
pathPrefix("stats") {
import spray.json._
import DefaultJsonProtocol._
pathPrefix("poly") {
pathPrefix("single") {
pathPrefix(Segment / IntNumber) { (layerName, zoom) =>
// post {
// entity(as[String]) { poly =>
parameters('poly) { (poly) =>
val layerId = LayerId(layerName, zoom)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId)
val cellArea = md.cellSize.width * md.cellSize.height
val rawGeometry = try {
poly.parseJson.convertTo[Geometry]
} catch {
case e: Exception => sys.error("THAT PROBABLY WASN'T GEOMETRY")
}
val geometry = rawGeometry match {
case p: Polygon => MultiPolygon(p.reproject(LatLng, md.crs))
case mp: MultiPolygon => mp.reproject(LatLng, md.crs)
case _ => sys.error(s"BAD GEOMETRY")
}
val result =
collectionReader
.query[SpatialKey, Tile, TileLayerMetadata[SpatialKey]](layerId)
.where(Intersects(geometry))
.result
val stats = VolumeStats(result, geometry, cellArea)
// Mean, Min, Max, Volume
cors() {
complete {
Future {
JsObject(
"mean" -> stats.mean.toInt.toJson,
"min" -> stats.min.toInt.toJson,
"max" -> stats.max.toInt.toJson
)
}
}
}
// }
}
}
} ~
pathPrefix("diff") {
pathPrefix(Segment / Segment / IntNumber) { (layer1Name, layer2Name, zoom) =>
// post {
// entity(as[String]) { poly =>
parameters('poly) { (poly) =>
val layer1Id = LayerId(layer1Name, zoom)
val layer2Id = LayerId(layer2Name, zoom)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layer1Id)
val cellArea = md.cellSize.width * md.cellSize.height
val rawGeometry = try {
poly.parseJson.convertTo[Geometry]
} catch {
case e: Exception => sys.error("THAT PROBABLY WASN'T GEOMETRY")
}
val geometry = rawGeometry match {
case p: Polygon => MultiPolygon(p.reproject(LatLng, md.crs))
case mp: MultiPolygon => mp.reproject(LatLng, md.crs)
case _ => sys.error(s"BAD GEOMETRY")
}
val result1 =
collectionReader
.query[SpatialKey, Tile, TileLayerMetadata[SpatialKey]](layer1Id)
.where(Intersects(geometry))
.result
val result2 =
collectionReader
.query[SpatialKey, Tile, TileLayerMetadata[SpatialKey]](layer2Id)
.where(Intersects(geometry))
.result
val result = result1.withContext(_ - result2)
val stats = VolumeStats(result, geometry, cellArea)
cors() {
complete {
Future {
stats.toJson
}
}
}
}
}
// }
}
} ~
pathPrefix("point") {
pathPrefix("single") {
pathPrefix(Segment / IntNumber) { (layerName, zoom) =>
get {
parameters('lat.as[Double], 'lng.as[Double]) { (lat, lng) =>
val layerId = LayerId(layerName, zoom)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId)
val point = Point(lng, lat).reproject(LatLng, md.crs)
val key = md.layout.mapTransform(point)
val extent = md.layout.mapTransform(key)
val elevationTile =
Future {
getTile(layerId, key)
}
val value =
elevationTile.map { tile =>
Raster(tile, extent).getDoubleValueAtPoint(point.x, point.y)
}
cors() {
complete {
value.map { v =>
JsObject(
"value" -> v.toInt.toJson
)
}
}
}
}
}
}
} ~
pathPrefix("diff") {
pathPrefix(Segment / Segment / IntNumber) { (layer1Name, layer2Name, zoom) =>
get {
parameters('lat.as[Double], 'lng.as[Double]) { (lat, lng) =>
val layer1Id = LayerId(layer1Name, zoom)
val layer2Id = LayerId(layer2Name, zoom)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layer1Id)
val point = Point(lng, lat).reproject(LatLng, md.crs)
val key = md.layout.mapTransform(point)
val extent = md.layout.mapTransform(key)
val layer1Tile =
Future {
getTile(layer1Id, key)
}
val layer2Tile =
Future {
getTile(layer2Id, key)
}
val values =
for(
t1 <- layer1Tile;
t2 <- layer2Tile
) yield {
(
Raster(t1, extent).getDoubleValueAtPoint(point.x, point.y),
Raster(t2, extent).getDoubleValueAtPoint(point.x, point.y)
)
}
cors() {
complete {
Future {
values.map { case (v1, v2) =>
JsObject(
"value1" -> v1.toInt.toJson,
"value2" -> v2.toInt.toJson
)
}
}
}
}
}
}
}
}
}
}
} ~
pathPrefix("tms") {
pathPrefix("hillshade") {
pathPrefix(Segment / IntNumber / IntNumber / IntNumber) { (layerName, zoom, x, y) =>
parameters(
'colorRamp ? "blue-to-red",
'azimuth.as[Double] ? 315,
'altitude.as[Double] ? 45,
'zFactor.as[Double] ? 1,
'targetCell ? "all",
'poly ? ""
) { (colorRamp, azimuth, altitude, zFactor, targetCell, poly) =>
val target = targetCell match {
case "nodata" => TargetCell.NoData
case "data" => TargetCell.Data
case _ => TargetCell.All
}
val layerId = LayerId(layerName, zoom)
val key = SpatialKey(x, y)
val keys = populateKeys(key)
val kb = keyToBounds(key)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId)
val extent = md.mapTransform(key)
val polygon =
if(poly.isEmpty) None
else Some(poly.parseGeoJson[Polygon].reproject(LatLng, md.crs))
complete {
val layerGridBounds =
md.bounds match {
case k: KeyBounds[SpatialKey] => k.toGridBounds
case _ => sys.error("Layer does not contain valid keybounds")
}
val tileDimensions =
md.layout.tileLayout.tileDimensions
val result: Future[Option[HttpResponse]] =
if(!layerGridBounds.contains(key.col, key.row)) { Future { None } }
else {
val elevationTile =
Future {
getTile(layerId, key)
}
val hillshadeTile =
getBufferedTile(layerId, key, layerGridBounds, tileDimensions)
.map { case BufferedTile(tile, bounds) =>
printAnyException {
Hillshade(
tile,
Square(1),
Some(bounds),
md.cellSize,
azimuth,
altitude,
zFactor,
target
)
}
}
val breaks =
attributeStore
.read[Histogram[Double]](LayerId(layerId.name, 0), "histogram")
.asInstanceOf[StreamingHistogram]
.quantileBreaks(50)
val ramp =
ColorRampMap
.getOrElse(colorRamp, ColorRamps.BlueToRed)
val colorMap =
ramp
.toColorMap(breaks, ColorMap.Options(fallbackColor = ramp.colors.last))
for(
e <- elevationTile;
h <- hillshadeTile
) yield {
val bytes =
colorWithHillshade(e, h, colorMap)
.renderPng
.bytes
Some(HttpResponse(entity = HttpEntity(ContentType(MediaTypes.`image/png`), bytes)))
}
}
result
}
}
}
} ~
pathPrefix("png") {
pathPrefix(Segment / IntNumber / IntNumber / IntNumber) { (layerName, zoom, x, y) =>
parameters('colorRamp ? "blue-to-red", 'poly ? "") { (colorRamp, poly) =>
val layerId = LayerId(layerName, zoom)
val key = SpatialKey(x, y)
val md = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId)
val extent = md.mapTransform(key)
val polygon =
if(poly.isEmpty) None
else Some(poly.parseGeoJson[Polygon].reproject(LatLng, md.crs))
complete {
Future {
val tileOpt =
try {
Some(getTile(layerId, key))
} catch {
case e: ValueNotFoundError =>
None
}
tileOpt.map { tile =>
DIMRender(polygon.fold(tile) { p => tile.mask(extent, p.geom) }, layerId, colorRamp)
}
}
}
}
}
} ~
pathPrefix("diff-tms") {
pathPrefix("png") {
pathPrefix(Segment / Segment / IntNumber / IntNumber / IntNumber) { (layerName1, layerName2, zoom, x, y) =>
parameters(
'colorRamp ? "green-to-red",
'breaks ? "-11,-10,-3,-4,-5,-6,-2,-1,-0.1,-0.06,-0.041,-0.035,-0.03,-0.025,-0.02,-0.019,-0.017,-0.015,-0.01,-0.008,-0.002,0.002,0.004,0.006,0.009,0.01,0.013,0.015,0.027,0.04,0.054,0.067,0.1,0.12,0.15,0.23,0.29,0.44,0.66,0.7,1,1.2,1.4,1.6,1.7,2,3,4,5,50,60,70,80,90,150,200",
'poly ? ""
) { (colorRamp, pbreaks, poly) =>
val (layerId1, layerId2) = LayerId(layerName1, zoom) -> LayerId(layerName2, zoom)
val key = SpatialKey(x, y)
val (md1, md2) = attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId1) -> attributeStore.readMetadata[TileLayerMetadata[SpatialKey]](layerId2)
val extent = md1.mapTransform(key)
val polygon =
if(poly.isEmpty) None
else Some(poly.parseGeoJson[Polygon].reproject(LatLng, md1.crs))
complete {
Future {
val tileOpt =
try {
val tile1 = getTile(layerId1, key)
val tile2 = getTile(layerId2, key)
val diff = tile1 - tile2
Some(diff)
} catch {
case e: ValueNotFoundError =>
None
}
tileOpt.map { t =>
val tile = polygon.fold(t) { p => t.mask(extent, p.geom) }
println(s"tile.findMinMaxDouble: ${tile.findMinMaxDouble}")
println(s"pbreaks: ${pbreaks}")
val breaks = pbreaks match {
case "none" => {
tile
.histogramDouble
.asInstanceOf[StreamingHistogram]
.quantileBreaks(50)
}
case s => s.split(",").map(_.toDouble)
}
println(s"breaks: ${breaks.toList}")
val ramp =
ColorRampMap
.getOrElse(colorRamp, ColorRamps.BlueToRed)
val colorMap =
ramp
.toColorMap(breaks, ColorMap.Options(fallbackColor = ramp.colors.last))
//val bytes = tile.renderPng(colorMap)
val bytes = tile.renderPng(ColorRampMap.gr)
HttpResponse(entity = HttpEntity(ContentType(MediaTypes.`image/png`), bytes))
}
}
}
}
}
}
}
}
def time[T](msg: String)(f: => T): (T, JsObject) = {
val s = System.currentTimeMillis
val result = f
val e = System.currentTimeMillis
val t = "%,d".format(e - s)
val obj = JsObject(
"TIMING RESULT" -> JsObject(
"msg" -> msg.toJson,
"time (in ms)" -> t.toJson
)
)
println(obj.toString)
result -> obj
}
}
| geotrellis/geotrellis-pointcloud-demo | src/app-backend/server/src/main/scala/com/azavea/server/Router.scala | Scala | apache-2.0 | 27,497 |
package net.sansa_stack.inference.data
import org.apache.jena.graph.{Node, Triple}
/**
* @author Lorenz Buehmann
*/
trait JenaRDFTripleLike extends TripleOps[Jena] {
// self: Triple =>
//
// override def s: Node = self.getSubject
// override def p: Node = self.getPredicate
// override def o: Node = self.getObject
}
| SANSA-Stack/SANSA-RDF | sansa-inference/sansa-inference-common/src/main/scala/net/sansa_stack/inference/data/JenaRDFTripleLike.scala | Scala | apache-2.0 | 327 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.cassandra
import java.nio.ByteBuffer
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.util.{Duration, Future}
import com.twitter.zipkin.adjuster.{ApplyTimestampAndDuration, CorrectForClockSkew, MergeById}
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.storage.{CollectAnnotationQueries, IndexedTraceId, SpanStore}
import com.twitter.zipkin.thriftscala.{Span => ThriftSpan}
import com.twitter.zipkin.util.{FutureUtil, Util}
import org.twitter.zipkin.storage.cassandra.Repository
import scala.collection.JavaConverters._
object CassandraSpanStoreDefaults {
val KeyspaceName = Repository.KEYSPACE
val SpanTtl = 7.days
val IndexTtl = 3.days
val MaxTraceCols = 100000
val SpanCodec = new ScroogeThriftCodec[ThriftSpan](ThriftSpan)
}
abstract class CassandraSpanStore(
stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraSpanStore"),
spanTtl: Duration = CassandraSpanStoreDefaults.SpanTtl,
indexTtl: Duration = CassandraSpanStoreDefaults.IndexTtl,
maxTraceCols: Int = CassandraSpanStoreDefaults.MaxTraceCols
) extends SpanStore with CollectAnnotationQueries {
/** Deferred as repository creates network connections */
protected def repository: Repository
private[this] val IndexDelimiter = ":"
private[this] val IndexDelimiterBytes = IndexDelimiter.getBytes
private[this] val spanCodec = CassandraSpanStoreDefaults.SpanCodec
/**
* Internal helper methods
*/
private[this] def createSpanColumnName(span: Span): String =
"%d_%d_%d".format(span.id, span.annotations.hashCode, span.binaryAnnotations.hashCode)
private[this] def annotationKey(serviceName: String, annotation: String, value: Option[ByteBuffer]): ByteBuffer = {
ByteBuffer.wrap(
serviceName.getBytes ++ IndexDelimiterBytes ++ annotation.getBytes ++
value.map { v => IndexDelimiterBytes ++ Util.getArrayFromBuffer(v) }.getOrElse(Array()))
}
/**
* Stats
*/
private[this] val SpansStats = stats.scope("spans")
private[this] val SpansStoredCounter = SpansStats.counter("stored")
private[this] val SpansIndexedCounter = SpansStats.counter("indexed")
private[this] val IndexStats = stats.scope("index")
private[this] val IndexServiceNameCounter = IndexStats.counter("serviceName")
private[this] val IndexServiceNameNoNameCounter = IndexStats.scope("serviceName").counter("noName")
private[this] val IndexSpanNameCounter = IndexStats.scope("serviceName").counter("spanName")
private[this] val IndexSpanNameNoNameCounter = IndexStats.scope("serviceName").scope("spanName").counter("noName")
private[this] val IndexTraceStats = IndexStats.scope("trace")
private[this] val IndexTraceNoTimestampCounter = IndexTraceStats.counter("noTimestamp")
private[this] val IndexTraceByServiceNameCounter = IndexTraceStats.counter("serviceName")
private[this] val IndexTraceBySpanNameCounter = IndexTraceStats.counter("spanName")
private[this] val IndexTraceByDurationCounter = IndexTraceStats.counter("duration")
private[this] val IndexAnnotationCounter = IndexStats.scope("annotation").counter("standard")
private[this] val IndexBinaryAnnotationCounter = IndexStats.scope("annotation").counter("binary")
private[this] val IndexSpanNoTimestampCounter = IndexStats.scope("span").counter("noTimestamp")
private[this] val IndexSpanNoDurationCounter = IndexStats.scope("span").counter("noDuration")
private[this] val QueryStats = stats.scope("query")
private[this] val QueryGetSpansByTraceIdsStat = QueryStats.stat("getSpansByTraceIds")
private[this] val QueryGetServiceNamesCounter = QueryStats.counter("getServiceNames")
private[this] val QueryGetSpanNamesCounter = QueryStats.counter("getSpanNames")
private[this] val QueryGetTraceIdsByNameCounter = QueryStats.counter("getTraceIdsByName")
private[this] val QueryGetTraceIdsByAnnotationCounter = QueryStats.counter("getTraceIdsByAnnotation")
private[this] val QueryGetTraceIdsByDurationCounter = QueryStats.counter("getTraceIdsByDuration")
/**
* Internal indexing helpers
*/
private[this] def indexServiceName(span: Span): Future[Unit] = {
IndexServiceNameCounter.incr()
Future.join(span.serviceNames.toList map {
case "" =>
IndexServiceNameNoNameCounter.incr()
Future.value(())
case s =>
FutureUtil.toFuture(repository.storeServiceName(s, indexTtl.inSeconds))
})
}
private[this] def indexSpanNameByService(span: Span): Future[Unit] = {
if (span.name == "") {
IndexSpanNameNoNameCounter.incr()
Future.value(())
} else {
IndexSpanNameCounter.incr()
Future.join(
span.serviceNames.toSeq map { serviceName =>
FutureUtil.toFuture(repository.storeSpanName(serviceName, span.name, indexTtl.inSeconds))
})
}
}
private[this] def indexTraceIdByName(span: Span): Future[Unit] = {
if (span.timestamp.isEmpty)
IndexTraceNoTimestampCounter.incr()
span.timestamp map { timestamp =>
val serviceNames = span.serviceNames
Future.join(
serviceNames.toList map { serviceName =>
IndexTraceByServiceNameCounter.incr()
val storeFuture =
FutureUtil.toFuture(repository.storeTraceIdByServiceName(serviceName, timestamp, span.traceId, indexTtl.inSeconds))
if (span.name != "") {
IndexTraceBySpanNameCounter.incr()
Future.join(
storeFuture,
FutureUtil.toFuture(repository.storeTraceIdBySpanName(serviceName, span.name, timestamp, span.traceId, indexTtl.inSeconds)))
} else storeFuture
})
} getOrElse Future.value(())
}
private[this] def indexByAnnotations(span: Span): Future[Unit] = {
if (span.timestamp.isEmpty)
IndexSpanNoTimestampCounter.incr()
span.timestamp map { timestamp =>
val annotationsFuture = Future.join(
span.annotations
.groupBy(_.value)
.flatMap { case (_, as) =>
val a = as.min
a.host map { endpoint =>
IndexAnnotationCounter.incr()
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, a.value, None), timestamp, span.traceId, indexTtl.inSeconds))
}
}.toList)
val binaryFuture = Future.join(span.binaryAnnotations flatMap { ba =>
ba.host map { endpoint =>
IndexBinaryAnnotationCounter.incr()
Future.join(
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, ba.key, Some(ba.value)), timestamp, span.traceId, indexTtl.inSeconds)),
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, ba.key, None), timestamp, span.traceId, indexTtl.inSeconds)))
}
})
Future.join(annotationsFuture, binaryFuture).map(_ => ())
} getOrElse Future.value(())
}
private[this] def indexByDuration(span: Span): Future[Unit] = {
(span.timestamp, span.duration) match {
case (Some(timestamp), Some(duration)) =>
Future.join(
span.serviceNames.toSeq.flatMap { serviceName =>
IndexTraceByDurationCounter.incr()
Seq(
repository.storeTraceIdByDuration(
serviceName, span.name, timestamp, duration, span.traceId, indexTtl.inSeconds),
repository.storeTraceIdByDuration(
serviceName, "", timestamp, duration, span.traceId, indexTtl.inSeconds)
)
}.map(FutureUtil.toFuture)
)
case (_, None) =>
IndexSpanNoDurationCounter.incr()
Future.value((): Unit)
case _ => Future.value((): Unit)
}
}
private[this] def getSpansByTraceIds(traceIds: Seq[Long], count: Int): Future[Seq[List[Span]]] = {
FutureUtil.toFuture(repository.getSpansByTraceIds(traceIds.toArray.map(Long.box), count))
.map { spansByTraceId =>
val spans =
spansByTraceId.asScala.mapValues { spans => spans.asScala.map(spanCodec.decode(_).toSpan) }
traceIds.flatMap(traceId => spans.get(traceId))
.map(MergeById)
.map(CorrectForClockSkew)
.map(ApplyTimestampAndDuration)
.sortBy(_.head)(Ordering[Span].reverse) // sort descending by the first span
}
}
/**
* API Implementation
*/
override def close() = repository.close()
override def apply(spans: Seq[Span]): Future[Unit] = {
SpansStoredCounter.incr(spans.size)
Future.join(
spans.map(s => s.copy(annotations = s.annotations.sorted))
.map(ApplyTimestampAndDuration.apply).map { span =>
SpansIndexedCounter.incr()
Future.join(
FutureUtil.toFuture(
repository.storeSpan(
span.traceId,
span.timestamp.getOrElse(0L),
createSpanColumnName(span),
spanCodec.encode(span.toThrift),
spanTtl.inSeconds)),
indexServiceName(span),
indexSpanNameByService(span),
indexTraceIdByName(span),
indexByAnnotations(span),
indexByDuration(span))
})
}
override def getTracesByIds(traceIds: Seq[Long]): Future[Seq[List[Span]]] = {
QueryGetSpansByTraceIdsStat.add(traceIds.size)
getSpansByTraceIds(traceIds, maxTraceCols)
}
override def getAllServiceNames(): Future[Seq[String]] = {
QueryGetServiceNamesCounter.incr()
FutureUtil.toFuture(repository.getServiceNames).map(_.asScala.toList.sorted)
}
override def getSpanNames(service: String): Future[Seq[String]] = {
QueryGetSpanNamesCounter.incr()
FutureUtil.toFuture(repository.getSpanNames(service)).map(_.asScala.toList.sorted)
}
override def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByNameCounter.incr()
val traceIdsFuture = FutureUtil.toFuture(spanName match {
// if we have a span name, look up in the service + span name index
// if not, look up by service name only
case Some(x :String) => repository.getTraceIdsBySpanName(serviceName, x, endTs * 1000, lookback * 1000, limit)
case None => repository.getTraceIdsByServiceName(serviceName, endTs * 1000, lookback * 1000, limit)
})
traceIdsFuture.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
override def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByAnnotationCounter.incr()
FutureUtil.toFuture(
repository
.getTraceIdsByAnnotation(annotationKey(serviceName, annotation, value), endTs * 1000, lookback * 1000, limit))
.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
override protected def getTraceIdsByDuration(
serviceName: String,
spanName: Option[String],
minDuration: Long,
maxDuration: Option[Long],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByDurationCounter.incr()
Future.exception(new UnsupportedOperationException)
FutureUtil.toFuture(
repository
.getTraceIdsByDuration(serviceName, spanName getOrElse "", minDuration, maxDuration getOrElse Long.MaxValue,
endTs * 1000, (endTs - lookback) * 1000, limit))
.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
}
| rocwzp/zipkin | zipkin-cassandra/src/main/scala/com/twitter/zipkin/storage/cassandra/CassandraSpanStore.scala | Scala | apache-2.0 | 12,646 |
package jp.ne.opt.redshiftfake
import org.scalatest.fixture
class FakeConnectionTest extends fixture.FlatSpec with H2Sandbox {
it should "convert and execute alter table add column with default and null to postgres equivalents" in { conn =>
val stmt = conn.createStatement()
stmt.execute("CREATE TABLE testDb(id int)")
stmt.execute("ALTER TABLE testDb ADD COLUMN loadTimestamp TIMESTAMP DEFAULT GETDATE() NOT NULL")
stmt.execute("INSERT INTO testDb (id) VALUES (55301)")
stmt.execute("SELECT id, loadTimestamp FROM testDb")
val rs = stmt.getResultSet
rs.next()
assert(rs.getInt("id") == 55301)
assert(rs.getTimestamp("loadTimestamp") != null)
}
}
| opt-tech/redshift-fake-driver | src/test/scala/jp/ne/opt/redshiftfake/FakeConnectionTest.scala | Scala | apache-2.0 | 692 |
import org.scalatest.{Matchers, FunSuite}
/** @version 1.2.0 */
class RomanNumeralsTest extends FunSuite with Matchers {
test("1 is a single I") {
RomanNumerals.roman(1) should be ("I")
}
test("2 is two I's") {
pending
RomanNumerals.roman(2) should be ("II")
}
test("3 is three I's") {
pending
RomanNumerals.roman(3) should be ("III")
}
test("4, being 5 - 1, is IV") {
pending
RomanNumerals.roman(4) should be ("IV")
}
test("5 is a single V") {
pending
RomanNumerals.roman(5) should be ("V")
}
test("6, being 5 + 1, is VI") {
pending
RomanNumerals.roman(6) should be ("VI")
}
test("9, being 10 - 1, is IX") {
pending
RomanNumerals.roman(9) should be ("IX")
}
test("20 is two X's") {
pending
RomanNumerals.roman(27) should be ("XXVII")
}
test("48 is not 50 - 2 but rather 40 + 8") {
pending
RomanNumerals.roman(48) should be ("XLVIII")
}
test("49 is not 40 + 5 + 4 but rather 50 - 10 + 10 - 1") {
pending
RomanNumerals.roman(49) should be ("XLIX")
}
test("50 is a single L") {
pending
RomanNumerals.roman(59) should be ("LIX")
}
test("90, being 100 - 10, is XC") {
pending
RomanNumerals.roman(93) should be ("XCIII")
}
test("100 is a single C") {
pending
RomanNumerals.roman(141) should be ("CXLI")
}
test("60, being 50 + 10, is LX") {
pending
RomanNumerals.roman(163) should be ("CLXIII")
}
test("400, being 500 - 100, is CD") {
pending
RomanNumerals.roman(402) should be ("CDII")
}
test("500 is a single D") {
pending
RomanNumerals.roman(575) should be ("DLXXV")
}
test("900, being 1000 - 100, is CM") {
pending
RomanNumerals.roman(911) should be ("CMXI")
}
test("1000 is a single M") {
pending
RomanNumerals.roman(1024) should be ("MXXIV")
}
test("3000 is three M's") {
pending
RomanNumerals.roman(3000) should be ("MMM")
}
} | ricemery/xscala | exercises/roman-numerals/src/test/scala/RomanNumeralsTest.scala | Scala | mit | 1,971 |
/*
* Model.scala
* Book example unit test support file.
*
* Created By: Michael Reposa ([email protected]), Avi Pfeffer ([email protected])
* Creation Date: Feb 26, 2016
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.book.chap03
import com.cra.figaro.language.{Element, Constant, Flip, Universe}
import com.cra.figaro.library.compound.If
import com.cra.figaro.library.atomic.continuous.{Beta, AtomicBeta}
import com.cra.figaro.library.atomic.discrete.Binomial
import com.cra.figaro.algorithm.ProbQueryAlgorithm
import scala.collection.Map
class PriorParameters(dictionary: Dictionary) {
val spamProbability = Beta(2,3)
val wordGivenSpamProbabilities = dictionary.featureWords.map(word => (word, Beta(2,2)))
val wordGivenNormalProbabilities = dictionary.featureWords.map(word => (word, Beta(2,2)))
val hasManyUnusualWordsGivenSpamProbability = Beta(2,2)
val hasManyUnusualWordsGivenNormalProbability = Beta(2, 21)
val unusualWordGivenManyProbability = Beta(2,2)
val unusualWordGivenFewProbability = Beta(2,7)
val fullParameterList =
spamProbability ::
hasManyUnusualWordsGivenSpamProbability ::
hasManyUnusualWordsGivenNormalProbability ::
unusualWordGivenManyProbability ::
unusualWordGivenFewProbability ::
wordGivenSpamProbabilities.map(pair => pair._2) :::
wordGivenNormalProbabilities.map(pair => pair._2)
}
class LearnedParameters(
val spamProbability: Double,
val hasManyUnusualWordsGivenSpamProbability: Double,
val hasManyUnusualWordsGivenNormalProbability: Double,
val unusualWordGivenManyProbability: Double,
val unusualWordGivenFewProbability: Double,
val wordGivenSpamProbabilities: Map[String, Double],
val wordGivenNormalProbabilities: Map[String, Double]
)
abstract class Model(val dictionary: Dictionary) {
val isSpam: Element[Boolean]
val hasWordElements: List[(String, Element[Boolean])]
val hasManyUnusualWords: Element[Boolean]
val numUnusualWords: Element[Int]
}
class LearningModel(dictionary: Dictionary, parameters: PriorParameters) extends Model(dictionary) {
val isSpam = Flip(parameters.spamProbability)
val hasWordElements = {
val wordGivenSpamMap = Map(parameters.wordGivenSpamProbabilities:_*)
val wordGivenNormalMap = Map(parameters.wordGivenNormalProbabilities:_*)
for { word <- dictionary.featureWords } yield {
val givenSpamProbability = wordGivenSpamMap(word)
val givenNormalProbability = wordGivenNormalMap(word)
val hasWordIfSpam = Flip(givenSpamProbability)
val hasWordIfNormal = Flip(givenNormalProbability)
(word, If(isSpam, hasWordIfSpam, hasWordIfNormal))
}
}
val hasManyUnusualIfSpam = Flip(parameters.hasManyUnusualWordsGivenSpamProbability)
val hasManyUnusualIfNormal = Flip(parameters.hasManyUnusualWordsGivenNormalProbability)
val hasManyUnusualWords = If(isSpam, hasManyUnusualIfSpam, hasManyUnusualIfNormal)
val numUnusualIfHasMany = Binomial(Model.binomialNumTrials, parameters.unusualWordGivenManyProbability)
val numUnusualIfHasFew = Binomial(Model.binomialNumTrials, parameters.unusualWordGivenFewProbability)
val numUnusualWords = If(hasManyUnusualWords, numUnusualIfHasMany, numUnusualIfHasFew)
}
class ReasoningModel(dictionary: Dictionary, parameters: LearnedParameters) extends Model(dictionary) {
val isSpam = Flip(parameters.spamProbability)
val hasWordElements = {
for { word <- dictionary.featureWords } yield {
val givenSpamProbability = parameters.wordGivenSpamProbabilities(word)
val givenNormalProbability = parameters.wordGivenNormalProbabilities(word)
val hasWordIfSpam = Flip(givenSpamProbability)
val hasWordIfNormal = Flip(givenNormalProbability)
(word, If(isSpam, hasWordIfSpam, hasWordIfNormal))
}
}
val hasManyUnusualIfSpam = Flip(parameters.hasManyUnusualWordsGivenSpamProbability)
val hasManyUnusualIfNormal = Flip(parameters.hasManyUnusualWordsGivenNormalProbability)
val hasManyUnusualWords = If(isSpam, hasManyUnusualIfSpam, hasManyUnusualIfNormal)
val numUnusualIfHasMany = Binomial(Model.binomialNumTrials, parameters.unusualWordGivenManyProbability)
val numUnusualIfHasFew = Binomial(Model.binomialNumTrials, parameters.unusualWordGivenFewProbability)
val numUnusualWords = If(hasManyUnusualWords, numUnusualIfHasMany, numUnusualIfHasFew)
}
object Model {
val binomialNumTrials = 20
}
| scottcb/figaro | Figaro/src/test/scala/com/cra/figaro/test/book/chap03/Model.scala | Scala | bsd-3-clause | 4,621 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.core.codec
import scalaz._, Scalaz._
import au.com.cba.omnia.maestro.core.data.Field
/** Tags a row of cells with the column names for each cell. */
case class Tag[A](run: List[String] => String \\/ List[(String, Field[A, _])])
/** [[Tag]] Companion object. */
object Tag {
/** Creates a [[Tag]] from a list of [[Field]]. */
def fromFields[A](fields: => List[Field[A, _]]): Tag[A] = {
// Keep the field length to avoid having to calculate this for each row.
val fieldsLength = fields.length
Tag(row => {
if (row.length < fieldsLength)
s"Not enough cells in the row. Got ${row.length} expected ${fields.length}.".left
else if (row.length > fieldsLength)
s"Too many cells in the row. Got ${row.length} expected ${fields.length}.".left
else
(row zip fields).right
})
}
/** Tag the cells in the specified row with its column names. */
def tag[A : Tag](row: List[String]): String \\/ List[(String, Field[A, _])] =
Tag.of[A].run(row)
/** Gets the [[Tag]] type class instance for `A`. */
def of[A : Tag]: Tag[A] =
implicitly[Tag[A]]
}
| toddmowen/maestro | maestro-core/src/main/scala/au/com/cba/omnia/maestro/core/codec/Tag.scala | Scala | apache-2.0 | 1,773 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.bucketing
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight, BuildSide}
import org.apache.spark.sql.catalyst.plans.Inner
import org.apache.spark.sql.execution.{BinaryExecNode, FileSourceScanExec, SparkPlan}
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, InMemoryFileIndex, PartitionSpec}
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, ShuffledHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
import org.apache.spark.sql.types.{IntegerType, StructType}
class CoalesceBucketsInJoinSuite extends SQLTestUtils with SharedSparkSession {
private val SORT_MERGE_JOIN = "sortMergeJoin"
private val SHUFFLED_HASH_JOIN = "shuffledHashJoin"
private val BROADCAST_HASH_JOIN = "broadcastHashJoin"
case class RelationSetting(
cols: Seq[Attribute],
numBuckets: Int,
expectedCoalescedNumBuckets: Option[Int])
object RelationSetting {
def apply(numBuckets: Int, expectedCoalescedNumBuckets: Option[Int]): RelationSetting = {
val cols = Seq(AttributeReference("i", IntegerType)())
RelationSetting(cols, numBuckets, expectedCoalescedNumBuckets)
}
}
case class JoinSetting(
leftKeys: Seq[Attribute],
rightKeys: Seq[Attribute],
leftRelation: RelationSetting,
rightRelation: RelationSetting,
joinOperator: String,
shjBuildSide: Option[BuildSide])
object JoinSetting {
def apply(
l: RelationSetting,
r: RelationSetting,
joinOperator: String = SORT_MERGE_JOIN,
shjBuildSide: Option[BuildSide] = None): JoinSetting = {
JoinSetting(l.cols, r.cols, l, r, joinOperator, shjBuildSide)
}
}
private def newFileSourceScanExec(setting: RelationSetting): FileSourceScanExec = {
val relation = HadoopFsRelation(
location = new InMemoryFileIndex(spark, Nil, Map.empty, None),
partitionSchema = PartitionSpec.emptySpec.partitionColumns,
dataSchema = StructType.fromAttributes(setting.cols),
bucketSpec = Some(BucketSpec(setting.numBuckets, setting.cols.map(_.name), Nil)),
fileFormat = new ParquetFileFormat(),
options = Map.empty)(spark)
FileSourceScanExec(relation, setting.cols, relation.dataSchema, Nil, None, None, Nil, None)
}
private def run(setting: JoinSetting): Unit = {
val swappedSetting = setting.copy(
leftKeys = setting.rightKeys,
rightKeys = setting.leftKeys,
leftRelation = setting.rightRelation,
rightRelation = setting.leftRelation)
val settings = if (setting.joinOperator != SHUFFLED_HASH_JOIN) {
Seq(setting, swappedSetting)
} else {
Seq(setting)
}
settings.foreach { s =>
val lScan = newFileSourceScanExec(s.leftRelation)
val rScan = newFileSourceScanExec(s.rightRelation)
val join = if (s.joinOperator == SORT_MERGE_JOIN) {
SortMergeJoinExec(s.leftKeys, s.rightKeys, Inner, None, lScan, rScan)
} else if (s.joinOperator == SHUFFLED_HASH_JOIN) {
ShuffledHashJoinExec(s.leftKeys, s.rightKeys, Inner, s.shjBuildSide.get, None, lScan, rScan)
} else {
BroadcastHashJoinExec(
s.leftKeys, s.rightKeys, Inner, BuildLeft, None, lScan, rScan)
}
val plan = CoalesceBucketsInJoin(join)
def verify(expected: Option[Int], subPlan: SparkPlan): Unit = {
val coalesced = subPlan.collect {
case f: FileSourceScanExec if f.optionalNumCoalescedBuckets.nonEmpty =>
f.optionalNumCoalescedBuckets.get
}
if (expected.isDefined) {
assert(coalesced.size == 1 && coalesced.head == expected.get)
} else {
assert(coalesced.isEmpty)
}
}
verify(s.leftRelation.expectedCoalescedNumBuckets, plan.asInstanceOf[BinaryExecNode].left)
verify(s.rightRelation.expectedCoalescedNumBuckets, plan.asInstanceOf[BinaryExecNode].right)
}
}
test("bucket coalescing - basic") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") {
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, Some(4)), joinOperator = SORT_MERGE_JOIN))
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, Some(4)), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
}
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "false") {
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, None), joinOperator = SORT_MERGE_JOIN))
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, None), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
}
}
test("bucket coalescing should work only for sort merge join and shuffled hash join") {
Seq(true, false).foreach { enabled =>
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> enabled.toString) {
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, None), joinOperator = BROADCAST_HASH_JOIN))
}
}
}
test("bucket coalescing shouldn't be applied to shuffled hash join build side") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") {
run(JoinSetting(
RelationSetting(4, None), RelationSetting(8, None), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildRight)))
}
}
test("bucket coalescing shouldn't be applied when the number of buckets are the same") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") {
run(JoinSetting(
RelationSetting(8, None), RelationSetting(8, None), joinOperator = SORT_MERGE_JOIN))
run(JoinSetting(
RelationSetting(8, None), RelationSetting(8, None), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
}
}
test("number of bucket is not divisible by other number of bucket") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") {
run(JoinSetting(
RelationSetting(3, None), RelationSetting(8, None), joinOperator = SORT_MERGE_JOIN))
run(JoinSetting(
RelationSetting(3, None), RelationSetting(8, None), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
}
}
test("the ratio of the number of buckets is greater than max allowed") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true",
SQLConf.COALESCE_BUCKETS_IN_JOIN_MAX_BUCKET_RATIO.key -> "2") {
run(JoinSetting(
RelationSetting(4, None), RelationSetting(16, None), joinOperator = SORT_MERGE_JOIN))
run(JoinSetting(
RelationSetting(4, None), RelationSetting(16, None), joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
}
}
test("join keys should match with output partitioning") {
withSQLConf(SQLConf.COALESCE_BUCKETS_IN_JOIN_ENABLED.key -> "true") {
val lCols = Seq(
AttributeReference("l1", IntegerType)(),
AttributeReference("l2", IntegerType)())
val rCols = Seq(
AttributeReference("r1", IntegerType)(),
AttributeReference("r2", IntegerType)())
val lRel = RelationSetting(lCols, 4, None)
val rRel = RelationSetting(rCols, 8, None)
// The following should not be coalesced because join keys do not match with output
// partitioning (missing one expression).
run(JoinSetting(
leftKeys = Seq(lCols.head),
rightKeys = Seq(rCols.head),
leftRelation = lRel,
rightRelation = rRel,
joinOperator = SORT_MERGE_JOIN,
shjBuildSide = None))
run(JoinSetting(
leftKeys = Seq(lCols.head),
rightKeys = Seq(rCols.head),
leftRelation = lRel,
rightRelation = rRel,
joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
// The following should not be coalesced because join keys do not match with output
// partitioning (more expressions).
run(JoinSetting(
leftKeys = lCols :+ AttributeReference("l3", IntegerType)(),
rightKeys = rCols :+ AttributeReference("r3", IntegerType)(),
leftRelation = lRel,
rightRelation = rRel,
joinOperator = SORT_MERGE_JOIN,
shjBuildSide = None))
run(JoinSetting(
leftKeys = lCols :+ AttributeReference("l3", IntegerType)(),
rightKeys = rCols :+ AttributeReference("r3", IntegerType)(),
leftRelation = lRel,
rightRelation = rRel,
joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
// The following will be coalesced since ordering should not matter because it will be
// adjusted in `EnsureRequirements`.
run(JoinSetting(
leftKeys = lCols.reverse,
rightKeys = rCols.reverse,
leftRelation = lRel,
rightRelation = RelationSetting(rCols, 8, Some(4)),
joinOperator = SORT_MERGE_JOIN,
shjBuildSide = None))
run(JoinSetting(
leftKeys = lCols.reverse,
rightKeys = rCols.reverse,
leftRelation = lRel,
rightRelation = RelationSetting(rCols, 8, Some(4)),
joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildLeft)))
run(JoinSetting(
leftKeys = rCols.reverse,
rightKeys = lCols.reverse,
leftRelation = RelationSetting(rCols, 8, Some(4)),
rightRelation = lRel,
joinOperator = SHUFFLED_HASH_JOIN,
shjBuildSide = Some(BuildRight)))
}
}
test("FileSourceScanExec's metadata should be updated with coalesced info") {
val scan = newFileSourceScanExec(RelationSetting(8, None))
val value = scan.metadata("SelectedBucketsCount")
assert(value === "8 out of 8")
val scanWithCoalescing = scan.copy(optionalNumCoalescedBuckets = Some(4))
val valueWithCoalescing = scanWithCoalescing.metadata("SelectedBucketsCount")
assert(valueWithCoalescing == "8 out of 8 (Coalesced to 4)")
}
}
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/bucketing/CoalesceBucketsInJoinSuite.scala | Scala | apache-2.0 | 11,084 |
package xsbti
import java.io.File
import scala.collection.mutable.ArrayBuffer
import xsbti.api.SourceAPI
import xsbti.DependencyContext._
class TestCallback(override val nameHashing: Boolean, override val includeSynthToNameHashing: Boolean) extends AnalysisCallback
{
val sourceDependencies = new ArrayBuffer[(File, File, DependencyContext)]
val binaryDependencies = new ArrayBuffer[(File, String, File, DependencyContext)]
val products = new ArrayBuffer[(File, File, String)]
val usedNames = scala.collection.mutable.Map.empty[File, Set[String]].withDefaultValue(Set.empty)
val apis: scala.collection.mutable.Map[File, SourceAPI] = scala.collection.mutable.Map.empty
def sourceDependency(dependsOn: File, source: File, inherited: Boolean): Unit = {
val context = if(inherited) DependencyByInheritance else DependencyByMemberRef
sourceDependency(dependsOn, source, context)
}
def sourceDependency(dependsOn: File, source: File, context: DependencyContext): Unit = { sourceDependencies += ((dependsOn, source, context)) }
def binaryDependency(binary: File, name: String, source: File, inherited: Boolean): Unit = {
val context = if(inherited) DependencyByInheritance else DependencyByMemberRef
binaryDependency(binary, name, source, context)
}
def binaryDependency(binary: File, name: String, source: File, context: DependencyContext): Unit = { binaryDependencies += ((binary, name, source, context)) }
def generatedClass(source: File, module: File, name: String): Unit = { products += ((source, module, name)) }
def usedName(source: File, name: String): Unit = { usedNames(source) += name }
def api(source: File, sourceAPI: SourceAPI): Unit = {
assert(!apis.contains(source), s"The `api` method should be called once per source file: $source")
apis(source) = sourceAPI
}
def problem(category: String, pos: xsbti.Position, message: String, severity: xsbti.Severity, reported: Boolean): Unit = ()
}
| som-snytt/xsbt | interface/src/test/scala/xsbti/TestCallback.scala | Scala | bsd-3-clause | 1,929 |
trait Foo[T <: Foo[T, Enum], Enum <: Enumeration] {
type StV = Enum#Value
type Meta = MegaFoo[T, Enum]
type Slog <: Enumeration
def getSingleton: Meta
}
trait MegaFoo[T <: Foo[T, Enum], Enum <: Enumeration] extends Foo[T, Enum] {
def doSomething(what: T, misc: StV, dog: Meta#Event) = None
abstract class Event
object Event
def stateEnumeration: Slog
def se2: Enum
}
object E extends Enumeration {
val A = Value
val B = Value
}
class RFoo extends Foo[RFoo, E.type] {
def getSingleton = MegaRFoo
type Slog = E.type
}
object MegaRFoo extends RFoo with MegaFoo[RFoo, E.type] {
def stateEnumeration = E
def se2 = E
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t1292.scala | Scala | apache-2.0 | 651 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Wed Feb 20 17:39:57 EST 2013
* @see LICENSE (MIT style license file).
*/
package scalation.analytics
import scalation.linalgebra.{MatrixD, VectoD, VectorD}
import scalation.math.double_exp
import scalation.plot.Plot
import scalation.util.{Error, time}
import RegTechnique._
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PolyRegression` class supports polynomial regression. In this case,
* 't' is expanded to [1, t, t^2 ... t^k]. Fit the parameter vector 'b' in the
* regression equation
* <p>
* y = b dot x + e = b_0 + b_1 * t + b_2 * t^2 ... b_k * t^k + e
* <p>
* where 'e' represents the residuals (the part not explained by the model).
* Use Least-Squares (minimizing the residuals) to fit the parameter vector
* <p>
* b = x_pinv * y
* <p>
* where 'x_pinv' is the pseudo-inverse.
* @see www.ams.sunysb.edu/~zhu/ams57213/Team3.pptx
* @param t the input vector: t_i expands to x_i = [1, t_i, t_i^2, ... t_i^k]
* @param y the response vector
* @param k the order of the polynomial
* @param technique the technique used to solve for b in x.t*x*b = x.t*y
*/
class PolyRegression (t: VectorD, y: VectorD, k: Int, technique: RegTechnique = QR)
extends Predictor with Error
{
if (t.dim != y.dim) flaw ("constructor", "dimensions of t and y are incompatible")
if (t.dim <= k) flaw ("constructor", "not enough data points for the given order (k)")
val x = new MatrixD (t.dim, 1 + k) // design matrix built from t
for (i <- 0 until t.dim) x(i) = expand (t(i))
val rg = new Regression (x, y, technique) // regular multiple linear regression
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Expand the scalar 't' into a vector of powers of 't': [1, t, t^2 ... t^k].
* @param t the scalar to expand into the vector
*/
def expand (t: Double): VectorD =
{
val v = new VectorD (1 + k)
for (j <- 0 to k) v(j) = t~^j
v
} // expand
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Train the predictor by fitting the parameter vector (b-vector) in the
* regression equation
* y = b dot x + e = [b_0, ... b_k] dot [1, t, t^2 ... t^k] + e
* using the least squares method.
*/
def train () { rg.train () }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Retrain the predictor by fitting the parameter vector (b-vector) in the
* multiple regression equation
* yy = b dot x + e = [b_0, ... b_k] dot [1, t, t^2 ... t^k] + e
* using the least squares method.
* @param yy the new response vector
*/
def train (yy: VectorD) { rg.train (yy) }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the quality of fit including 'rSquared'.
*/
def fit: VectorD = rg.fit
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the vector of residuals/errors.
*/
override def residual: VectoD = rg.residual
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Predict the value of y = f(z) by evaluating the formula y = b dot expand (z),
* e.g., (b_0, b_1, b_2) dot (1, z, z^2).
* @param z the new scalar to predict
*/
def predict (z: Double): Double = rg.predict (expand (z))
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Predict the value of y = f(z) by evaluating the formula y = b dot z,
* e.g., (b_0, b_1, b_2) dot (1, z_1, z_2).
* @param z the new vector to predict
*/
def predict (z: VectoD): Double = rg.predict (z)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Perform backward elimination to remove the least predictive variable
* from the model, returning the variable to eliminate, the new parameter
* vector, the new R-squared value and the new F statistic.
*/
def backElim (): Tuple3 [Int, VectoD, VectorD] = rg.backElim ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the Variance Inflation Factor (VIF) for each variable to test
* for multi-collinearity by regressing 'xj' against the rest of the variables.
* A VIF over 10 indicates that over 90% of the variance of 'xj' can be predicted
* from the other variables, so 'xj' is a candidate for removal from the model.
*/
def vif: VectorD = rg.vif
} // PolyRegression class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `PolyRegressionTest` object tests `PolyRegression` class using the following
* regression equation.
* <p>
* y = b dot x = b_0 + b_1*t + b_2*t^2.
* <p>
*/
object PolyRegressionTest extends App
{
import scalation.random.Normal
val noise = Normal (0.0, 500.0)
val t = VectorD.range (0, 100)
val y = new VectorD (t.dim)
for (i <- 0 until 100) y(i) = 10.0 - 10.0 * i + i~^2 + noise.gen
println ("t = " + t)
println ("y = " + y)
val order = 8
val prg = new PolyRegression (t, y, order)
prg.train ()
println ("fit = " + prg.fit)
val z = 10.5 // predict y for one point
val yp = prg.predict (z)
println ("predict (" + z + ") = " + yp)
} // PolyRegressionTest object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/analytics/PolyRegression.scala | Scala | mit | 5,725 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.experiments.distributeservice
import scala.concurrent.Await
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestProbe}
import org.scalatest.{BeforeAndAfter, Matchers, WordSpec}
import org.apache.gearpump.cluster.AppMasterToMaster.{GetAllWorkers, RegisterAppMaster, RequestResource}
import org.apache.gearpump.cluster.AppMasterToWorker.LaunchExecutor
import org.apache.gearpump.cluster.MasterToAppMaster.{AppMasterRegistered, ResourceAllocated, WorkerList}
import org.apache.gearpump.cluster.appmaster.AppMasterRuntimeEnvironment
import org.apache.gearpump.cluster.scheduler.{Relaxation, Resource, ResourceAllocation, ResourceRequest}
import org.apache.gearpump.cluster.worker.WorkerId
import org.apache.gearpump.cluster.{AppDescription, AppMasterContext, TestUtil, UserConfig}
import org.apache.gearpump.experiments.distributeservice.DistServiceAppMaster.{FileContainer, GetFileContainer}
import org.apache.gearpump.util.ActorSystemBooter.RegisterActorSystem
import org.apache.gearpump.util.ActorUtil
class DistServiceAppMasterSpec extends WordSpec with Matchers with BeforeAndAfter {
implicit val system = ActorSystem("AppMasterSpec", TestUtil.DEFAULT_CONFIG)
val mockMaster = TestProbe()(system)
val mockWorker1 = TestProbe()(system)
val client = TestProbe()(system)
val masterProxy = mockMaster.ref
val appId = 0
val userName = "test"
val masterExecutorId = 0
val workerList = List(WorkerId(1, 0L), WorkerId(2, 0L), WorkerId(3, 0L))
val resource = Resource(1)
val appJar = None
val appDescription = AppDescription("app0", classOf[DistServiceAppMaster].getName,
UserConfig.empty)
"DistService AppMaster" should {
"responsable for service distributing" in {
val appMasterContext = AppMasterContext(appId, userName, resource, null, appJar, masterProxy)
TestActorRef[DistServiceAppMaster](
AppMasterRuntimeEnvironment.props(List(masterProxy.path), appDescription,
appMasterContext))
val registerAppMaster = mockMaster.receiveOne(15.seconds)
assert(registerAppMaster.isInstanceOf[RegisterAppMaster])
val appMaster = registerAppMaster.asInstanceOf[RegisterAppMaster].appMaster
mockMaster.reply(AppMasterRegistered(appId))
// The DistributedShell AppMaster will ask for worker list
mockMaster.expectMsg(GetAllWorkers)
mockMaster.reply(WorkerList(workerList))
// After worker list is ready, DistributedShell AppMaster will request resouce on each worker
workerList.foreach { workerId =>
mockMaster.expectMsg(RequestResource(appId, ResourceRequest(Resource(1), workerId,
relaxation = Relaxation.SPECIFICWORKER)))
}
mockMaster.reply(ResourceAllocated(Array(ResourceAllocation(resource, mockWorker1.ref,
WorkerId(1, 0L)))))
mockWorker1.expectMsgClass(classOf[LaunchExecutor])
mockWorker1.reply(RegisterActorSystem(ActorUtil.getSystemAddress(system).toString))
appMaster.tell(GetFileContainer, client.ref)
client.expectMsgClass(15.seconds, classOf[FileContainer])
}
}
after {
system.terminate()
Await.result(system.whenTerminated, Duration.Inf)
}
}
| manuzhang/incubator-gearpump | examples/distributeservice/src/test/scala/org/apache/gearpump/experiments/distributeservice/DistServiceAppMasterSpec.scala | Scala | apache-2.0 | 4,033 |
package us.illyohs.civilmagiks.common.entity
import net.minecraftforge.fml.common.registry.EntityRegistry
import us.illyohs.civilmagiks.common.core.CivilMagicks
import us.illyohs.civilmagiks.common.entity.mics.EntitySigil
object ModEnitites {
def init: Unit ={
EntityRegistry.registerModEntity(classOf[EntitySigil], "sigil", 1, CivilMagicks.instance, 64, 10, true)
}
}
| Illyohs/CivilMagicks | src/main/scala/us/illyohs/civilmagiks/common/entity/ModEnitites.scala | Scala | bsd-2-clause | 384 |
package org.etl.dsl.singlethread
import org.etl.server.ProcessAST
import org.junit.Test
import org.etl.server.ProcessExecutor
import java.nio.charset.StandardCharsets._
import java.nio.file.{Files, Paths}
class DoozleTest {
@Test def runProcess()={
val instanceName = "test.doozle.process#1"
val filePath = this.getClass.getResource("sample.json").getPath.toString
val osAppropriatePath = if (System.getProperty( "os.name" ).contains( "indow" )) filePath.substring(1) else filePath
val json = new String(Files.readAllBytes(Paths.get(osAppropriatePath)), UTF_8)
val runtimeContext = ProcessAST.loadProcessAST(instanceName,json)
try {
val runtime = ProcessExecutor.execute("org.etl.process.onethread", runtimeContext)
} catch {
case ex: Throwable => {
ex.printStackTrace()
}
} finally {
}
}
/* @Test def relativePath {
System.currentTimeMillis()
val url = this.getClass.getClassLoader.getResource("auth/Dhuruva-BMS-fa750e3a8102.p12")
println(url)
}*/
} | jpvelsamy/sparrow | sparrow-server/src/test/scala/org/etl/dsl/singlethread/DoozleAction.scala | Scala | apache-2.0 | 1,068 |
package gg.uhc.hosts
import gg.uhc.hosts.database.{AlertRuleRow, MatchRow}
object Alerts {
val allAlertFields: List[String] = "ip" :: "address" :: "hosting name" :: "content" :: "tags" :: Nil
implicit class MatchRowAlertExtensions(m: MatchRow) {
def matchesRule(rule: AlertRuleRow): Boolean = getViolatingText(rule).isDefined
def getViolatingText(rule: AlertRuleRow): Option[String] = findViolationText(rule, m)
}
implicit class AlertRuleRowAlertExtensions(rule: AlertRuleRow) {
def matchesRow(m: MatchRow): Boolean = getViolationText(m).isDefined
def getViolationText(m: MatchRow): Option[String] = findViolationText(rule, m)
}
def findViolationText(rule: AlertRuleRow, m: MatchRow): Option[String] = {
val listToSearch: List[String] = rule.field match {
case "tags" => m.tags
case "ip" => m.ip.toList
case "address" => m.address.toList
case "hosting name" => m.hostingName.toList
case "content" => List(m.content)
}
val alertOn = rule.alertOn.toLowerCase
if (rule.exact) {
listToSearch.find(item => item.toLowerCase == alertOn)
} else {
listToSearch.find(item => item.toLowerCase.contains(alertOn))
}
}
}
| Eluinhost/hosts.uhc.gg | src/main/scala/gg/uhc/hosts/Alerts.scala | Scala | mit | 1,263 |
package slinky.core
import slinky.core.facade.{ErrorBoundaryInfo, PrivateComponentClass, React, ReactElement}
import slinky.readwrite.{Reader, Writer}
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
abstract class DefinitionBase[Props, State, Snapshot](jsProps: js.Object) extends React.Component(jsProps) {
import DefinitionBase._
// we extract out props/state reader/writer from the _base value defined in on the constructor
// see componentConstructor in BaseComponentWrapper
@inline final private[this] def stateReader: Reader[State] =
this.asInstanceOf[js.Dynamic]._base._stateReader.asInstanceOf[Reader[State]]
@inline final private[slinky] def stateWriter: Writer[State] =
this.asInstanceOf[js.Dynamic]._base._stateWriter.asInstanceOf[Writer[State]]
@inline final private[this] def propsReader: Reader[Props] =
this.asInstanceOf[js.Dynamic]._base._propsReader.asInstanceOf[Reader[Props]]
def initialState: State
if (!js.isUndefined(this.asInstanceOf[js.Dynamic]._base)) {
this.asInstanceOf[js.Dynamic].state = {
val initialStateValue = this.asInstanceOf[DefinitionBase[_, _, _]].initialState
val stateWithExtraApplyFix = (if (this.asInstanceOf[js.Dynamic]._base.needsExtraApply.asInstanceOf[Boolean]) {
initialStateValue.asInstanceOf[js.Function0[State]].apply()
} else initialStateValue).asInstanceOf[State]
if (BaseComponentWrapper.scalaComponentWritingEnabled) {
DefinitionBase.writeWithWrappingAdjustment(this.asInstanceOf[DefinitionBase[_, State, _]].stateWriter)(
stateWithExtraApplyFix
)
} else js.Dynamic.literal(__ = stateWithExtraApplyFix.asInstanceOf[js.Any])
}
}
@inline final private[slinky] def readPropsValue(value: js.Object): Props = readValue(value, propsReader)
@inline final private[slinky] def readStateValue(value: js.Object): State = readValue(value, stateReader)
@JSName("props_scala")
@inline final def props: Props =
readPropsValue(this.asInstanceOf[PrivateComponentClass].propsR)
@JSName("state_scala")
@inline final def state: State =
readStateValue(this.asInstanceOf[PrivateComponentClass].stateR)
@JSName("setState_scala_1")
@inline final def setState(s: State): Unit = {
val stateObject = if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
this.asInstanceOf[PrivateComponentClass].setStateR(stateObject)
}
@JSName("setState_scala_2")
@inline final def setState(fn: State => State): Unit =
this.asInstanceOf[PrivateComponentClass].setStateR { (ps: js.Object) =>
val s = fn(readStateValue(ps))
if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
}
@JSName("setState_scala_3")
@inline final def setState(fn: (State, Props) => State): Unit =
this.asInstanceOf[PrivateComponentClass].setStateR { (ps: js.Object, p: js.Object) =>
val s = fn(readStateValue(ps), readPropsValue(p))
if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
}
@JSName("setState_scala_4")
@inline final def setState(s: State, callback: () => Unit): Unit = {
val stateObject = if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
this.asInstanceOf[PrivateComponentClass].setStateR(stateObject, callback)
}
@JSName("setState_scala_5")
@inline final def setState(fn: State => State, callback: () => Unit): Unit =
this
.asInstanceOf[PrivateComponentClass]
.setStateR(
(ps: js.Object) => {
val s = fn(readStateValue(ps))
if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
},
callback
)
@JSName("setState_scala_6")
@inline final def setState(fn: (State, Props) => State, callback: () => Unit): Unit =
this
.asInstanceOf[PrivateComponentClass]
.setStateR(
(ps: js.Object, p: js.Object) => {
val s = fn(readStateValue(ps), readPropsValue(p))
if (BaseComponentWrapper.scalaComponentWritingEnabled) {
writeWithWrappingAdjustment(stateWriter)(s)
} else js.Dynamic.literal(__ = s.asInstanceOf[js.Any])
},
callback
)
def componentWillMount(): Unit = {}
def componentDidMount(): Unit = {}
def componentWillReceiveProps(nextProps: Props): Unit = {}
def shouldComponentUpdate(nextProps: Props, nextState: State): Boolean = true
def componentWillUpdate(nextProps: Props, nextState: State): Unit = {}
def getSnapshotBeforeUpdate(prevProps: Props, prevState: State): Snapshot = null.asInstanceOf[Snapshot]
def componentDidUpdate(prevProps: Props, prevState: State): Unit = {}
def componentDidUpdate(prevProps: Props, prevState: State, snapshot: Snapshot): Unit =
this
.asInstanceOf[js.Dynamic]
.componentDidUpdateScala(prevProps.asInstanceOf[js.Any], prevState.asInstanceOf[js.Any])
.asInstanceOf[Unit]
def componentWillUnmount(): Unit = {}
def componentDidCatch(error: js.Error, info: ErrorBoundaryInfo): Unit = {}
def render(): ReactElement
}
object DefinitionBase {
private[slinky] val defaultBase = new DefinitionBase[Unit, Unit, Unit](null) {
override def initialState: Unit = ()
override def render(): ReactElement = null
}.asInstanceOf[js.Dynamic]
@inline final private[slinky] def readValue[P](value: js.Object, propsReader: => Reader[P]): P =
if (js.typeOf(value) == "object" && value.hasOwnProperty("__")) {
value.asInstanceOf[js.Dynamic].__.asInstanceOf[P]
} else {
readWithWrappingAdjustment(propsReader)(value)
}
final private[slinky] def readWithWrappingAdjustment[T](reader: Reader[T])(value: js.Object): T = {
val __value = value.asInstanceOf[js.Dynamic].__value
if (value.hasOwnProperty("__value")) {
reader.read(__value.asInstanceOf[js.Object])
} else {
reader.read(value)
}
}
final private[slinky] def writeWithWrappingAdjustment[T](writer: Writer[T])(value: T): js.Object = {
val __value = writer.write(value)
if (js.typeOf(__value) == "object") {
__value
} else {
js.Dynamic.literal(__value = __value)
}
}
}
| shadaj/slinky | core/src/main/scala/slinky/core/DefinitionBase.scala | Scala | mit | 6,728 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette
import java.net.URI
/**
* HTTP related interfaces and implementations.
*/
package object http {
protected[silhouette] object Fake {
type Request = RequestPipeline[SilhouetteRequest]
type Response = ResponsePipeline[SilhouetteResponse]
def request(
uri: URI = new URI("http://localhost/"),
method: Method = Method.GET,
headers: Seq[Header] = Seq(),
cookies: Seq[Cookie] = Seq(),
queryParams: Map[String, Seq[String]] = Map()
): Request = SilhouetteRequestPipeline(SilhouetteRequest(
uri,
method,
headers,
cookies,
queryParams
))
def request: Request = request()
val response: Response = SilhouetteResponsePipeline(SilhouetteResponse(Status.OK))
}
}
| mohiva/silhouette | modules/http/src/main/scala/silhouette/http/package.scala | Scala | apache-2.0 | 1,530 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.index
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom.Geometry
import org.apache.accumulo.core.client.IteratorSetting
import org.apache.hadoop.io.Text
import org.joda.time.format.DateTimeFormatter
import org.joda.time.{DateTime, DateTimeZone}
import org.locationtech.geomesa.core.index.KeyUtils._
import org.locationtech.geomesa.utils.CartesianProductIterable
import org.locationtech.geomesa.utils.geohash.{GeoHash, GeohashUtils}
case class QueryPlan(iterators: Seq[IteratorSetting], ranges: Seq[org.apache.accumulo.core.data.Range], cf: Seq[Text])
trait KeyPlanningFilter
case object AcceptEverythingFilter extends KeyPlanningFilter
case class SpatialFilter(geom: Geometry) extends KeyPlanningFilter
case class DateFilter(dt:DateTime) extends KeyPlanningFilter
case class DateRangeFilter(start:DateTime, end:DateTime) extends KeyPlanningFilter
case class SpatialDateFilter(geom: Geometry, dt:DateTime) extends KeyPlanningFilter
case class SpatialDateRangeFilter(geom: Geometry, start:DateTime, end:DateTime)
extends KeyPlanningFilter
sealed trait KeyPlan {
def join(right: KeyPlan, sep: String): KeyPlan
def toRange : KeyPlan = KeyInvalid
def hasRange : Boolean = toRange != KeyInvalid
def toRegex : KeyPlan = KeyInvalid
def hasRegex : Boolean = toRegex != KeyInvalid
def toList : KeyPlan = KeyInvalid
def hasList : Boolean = toList != KeyInvalid
}
case object KeyInvalid extends KeyPlan {
def join(right: KeyPlan, sep: String): KeyPlan = this
}
// this key-plan accepts all inputs
case object KeyAccept extends KeyPlan {
// a word on Accumulo characters: Even though the string-parser says that it
// can handle bytes represented from \\x00 to \\xFF, it turns out that anything
// above \\x7F throws a "unsupported non-ascii character" error, which is why
// our maximum character here is \\0x7E, "~"
val MIN_START = "\\u0000"
val MAX_END = "~"
def join(right: KeyPlan, sep: String): KeyPlan = right match {
case KeyRange(rstart, rend) => KeyRange(MIN_START + sep + rstart, MAX_END + sep + rend)
case KeyRegex(rregex) => KeyRegex(".*?" + sep + rregex)
case KeyList(rkeys) => {
val sorted = rkeys.sorted
KeyRange(MIN_START+sep+sorted.head, MAX_END+sep+sorted.last)
}
case KeyAccept => KeyRange(MIN_START + sep + MIN_START, MAX_END + sep + MAX_END)
case _ => KeyInvalid
}
}
case class KeyRange(start: String, end: String) extends KeyPlan {
def join(right: KeyPlan, sep: String): KeyPlan = right match {
case KeyRange(rstart, rend) => KeyRange(start + sep + rstart, end + sep + rend)
case KeyRegex(rregex) => KeyRegex(estimateRangeRegex(start, end) + sep + rregex)
case KeyList(rkeys) => {
val sorted = rkeys.sorted
KeyRange(start+sep+sorted.head, end+sep+sorted.last)
}
case KeyAccept => KeyRange(start + sep + KeyAccept.MIN_START, end + sep + KeyAccept.MAX_END)
case KeyInvalid => KeyInvalid
case _ => throw new Exception("Invalid KeyPlan match")
}
override def toRange = this
override def toRegex = KeyRegex(estimateRangeRegex(start, end))
}
case class KeyRanges(ranges:Seq[KeyRange]) extends KeyPlan {
// required of the KeyPlan contract, but never used
def join(right: KeyPlan, sep: String): KeyPlan = KeyInvalid
}
case class KeyRegex(regex: String) extends KeyPlan {
def join(right: KeyPlan, sep: String) = right match {
case KeyRange(rstart,rend) => KeyRegex(regex+sep+estimateRangeRegex(rstart,rend))
case KeyRegex(rregex) => KeyRegex(regex + sep + rregex)
case KeyList(rkeys) => KeyRegex(regex + sep +
(if (rkeys.size > MAX_KEYS_IN_REGEX) {
generalizeStringsToRegex(rkeys)
} else rkeys.mkString("(","|",")"))
)
case KeyAccept => KeyRegex(regex + sep + ".*?")
case KeyInvalid => KeyInvalid
case _ => throw new Exception("Invalid KeyPlan match")
}
override def toRegex = this
}
case class KeyList(keys:Seq[String]) extends KeyPlan {
lazy val sorted = keys.sorted
def join(right: KeyPlan, sep: String) = right match {
case KeyRange(rstart,rend) =>
KeyRange(sorted.head+sep+rstart, sorted.last+sep+rend)
case KeyRegex(rregex) => KeyRegex(
(if (keys.size > MAX_KEYS_IN_REGEX) {
generalizeStringsToRegex(keys)
} else keys.mkString("(","|",")")) + sep + rregex)
case KeyList(rkeys) => {
val combiner = CartesianProductIterable(Seq(keys, rkeys))
if (combiner.expectedSize > MAX_KEYS_IN_LIST) {
// there are too many keys; consolidate them
// if there are too many (total) entries, use a range (losing some data);
// otherwise, use a regular expression joining the lists
if ((keys.size+rkeys.size) > MAX_KEYS_IN_REGEX) {
val thatSorted = rkeys.sorted
KeyRange(
sorted.head + sep + thatSorted.head,
sorted.last + sep + thatSorted.last
)
}
else KeyRegex("((" + keys.mkString("|") + ")" + sep + "(" + rkeys.mkString("|") + "))")
} else {
// there are few enough combinations that we can preserve the explicit list
KeyList(combiner.iterator.toList.map(_.mkString(sep)))
}
}
case KeyAccept => KeyRegex("((" + keys.mkString("|") + ")" + sep + "(.*?))")
case KeyInvalid => KeyInvalid
case _ => throw new Exception("Invalid KeyPlan match")
}
override def toRange = {
val sortedKeys = keys.sorted
KeyRange(sortedKeys.head, sortedKeys.last)
}
override def toRegex = if (keys.size > MAX_KEYS_IN_REGEX)
KeyRegex(generalizeStringsToRegex(keys))
else KeyRegex("(" + keys.mkString("|") + ")")
override def toList = this
}
/**
* Accumulates tiers of discrete keys that can be reduced to one or more key-
* plans once the information is complete.
*
* The problem with assuming a single KeyRange is that it sets up a conflict
* between our partitioning scheme -- designed to spread queries across the
* entire cluster -- and the ordering that makes per-tablet-server querying
* efficient. Consider the following KeyRange end-points for a query:
*
* 01~randomdatapoint~d~201111
* 99~randomdatapoint~d~201201
*
* As a single range, this is largely ineffective, because it will include
* ALL of the entries for partitions 02 to 98 (97/99), removing very few of
* the entries from subsequent decoding and/or filtering. A better plan would
* be a sequence of ranges, one per partition:
*
* 01~randomdatapoint~d~201111, 01~randomdatapoint~d~201201
* 02~randomdatapoint~d~201111, 02~randomdatapoint~d~201201
* ...
* 99~randomdatapoint~d~201111, 99~randomdatapoint~d~201201
*
* Accumulo allows us to set multiple ranges per query, presumably for
* exactly this type of sharded-range planning.
*/
sealed trait KeyTiered extends KeyPlan {
val parent: Option[KeyTiered]
val optList: Option[KeyList] = None
val optRange: Option[KeyRange] = None
def toRanges: Seq[KeyRange] = if (optRange.isDefined) Seq(optRange.get)
else optList.get.keys.map(key => KeyRange(key, key))
def toRanges(parentRange: KeyRange, sep: String): Seq[KeyRange] =
toRanges.map(range =>
KeyRange(parentRange.start + sep + range.start,
parentRange.end + sep + range.end))
def toRanges(sep: String): Seq[KeyRange] = parent match {
case Some(kt:KeyTiered) => kt.toRanges(sep).flatMap { range => toRanges(range, sep) }
case None => toRanges
case _ => throw new Exception("Invalid parent for KeyTiered")
}
def join(right: KeyPlan, sep: String): KeyPlan = right match {
case KeyRangeTiered(rstart, rend, None) => KeyRangeTiered(rstart, rend, Some(this))
case KeyRange(rstart, rend) => KeyRangeTiered(rstart, rend, Some(this))
case KeyListTiered(rkeys, None) => KeyListTiered(rkeys, Some(this))
case KeyList(rkeys) => KeyListTiered(rkeys, Some(this))
case KeyAccept => KeyRangeTiered(KeyAccept.MIN_START, KeyAccept.MAX_END, Some(this))
case _ => KeyInvalid // degenerate case
}
}
case class KeyRangeTiered(start: String, end: String, parent:Option[KeyTiered]=None) extends KeyTiered {
override val optRange = Some(KeyRange(start, end))
}
case class KeyListTiered(keys:Seq[String], parent:Option[KeyTiered]=None) extends KeyTiered {
override val optList = Some(KeyList(keys))
}
object KeyUtils {
val MAX_KEYS_IN_LIST = 65536
val MAX_KEYS_IN_REGEX = 1024
// assume that strings are all of the same size
// (this will necessarily throw away information you will wish you had kept)
def generalizeStringsToRegex(seq:Seq[String]) : String = {
val pairs = seq.map(s => s.zipWithIndex.map(_ match { case (c,i) => (i,c)})).flatten
val mapChars : Map[Int,List[Char]] = pairs.foldLeft(Map[Int,List[Char]]())((mapSoFar,pair) => pair match { case (i,c) => {
if (mapSoFar.contains(i)) {
val oldList : List[Char] = mapSoFar(i)
(mapSoFar - i) + (i -> (c :: oldList).distinct)
}
else mapSoFar + (i -> List(c))
}})
// count how many expressions are allowable under this per-character regex
val ranges = mapChars.keys.toList.sorted.map(i => {mapChars(i).sorted.mkString})
val numCombinations = ranges.map(s => s.length.toLong).product
// depending on 1) how selective the per-character regex is; and
// 2) how many explicit strings there are to list;
// determine which form is more useful
if ((numCombinations > 10L*seq.size.toLong) && (seq.size < 1024)) seq.mkString("(","|",")")
else ranges.map(s => if (s.length==1) s else "["+s+"]").mkString
}
// assumes an ASCII (not unicode) encoding
def encode(c:Char) : String = c match {
case a if (a>='a' && a<='z') => a.toString
case a if (a>='A' && a<='A') => a.toString
case d if (d>='0' && d<='9') => d.toString
case _ => """\\x""" + c.toInt.toHexString.reverse.padTo(2,"0").reverse.mkString
}
// encode as choices, so as to avoid syntax issues with ranges and hex-encoding
def encodeRange(cMin:Char, cMax:Char) : String =
(cMin to cMax).map(c => encode(c)).mkString("(","|",")")
// assume that the two strings are the same size, and ASCII-encoded
// (this will necessarily throw away information you will wish you had kept)
def estimateRangeRegex(start:String, end:String) : String = {
start.zip(end).foldLeft((true,""))((t1,t2) =>
t1 match { case (inFrontMatch,regexSoFar) => {
t2 match { case (cA,cB) => {
if (inFrontMatch) {
if (cA==cB) (true,regexSoFar+encode(cA))
else {
val cMin = if (cA < cB) cA else cB
val cMax = if (cA > cB) cA else cB
(false, regexSoFar+encodeRange(cMin,cMax))
}
} else (false,regexSoFar+".")
}
}}})._2
}
}
trait KeyPlanner {
def getKeyPlan(filter:KeyPlanningFilter, output: ExplainerOutputType): KeyPlan
}
trait ColumnFamilyPlanner {
def getColumnFamiliesToFetch(filter: KeyPlanningFilter): KeyPlan
}
trait GeoHashPlanner extends Logging {
def geomToGeoHashes(geom: Geometry, offset: Int, bits: Int): Seq[String] =
GeohashUtils.getUniqueGeohashSubstringsInPolygon(geom, offset, bits, MAX_KEYS_IN_LIST)
// takes care of the case where overflow forces a return value
// that is an empty list
def polyToPlan(geom: Geometry, offset: Int, bits: Int): KeyPlan = {
val subHashes = geomToGeoHashes(geom, offset, bits).sorted
logger.debug(s"Geom to GeoHashes has returned: ${subHashes.size} subhashes to cover $geom $offset $bits.")
subHashes match {
case subs if subs.size == 0 =>
// if the list is empty, then there are probably too many 35-bit GeoHashes
// that fall inside the given polygon; in this case, return the LL, UR
// GeoHash endpoints of the entire range (which could encompass many
// more GeoHashes than we wish, but can only be better than (or equal
// to) a full-table scan)
val env = geom.getEnvelopeInternal
val ghLL = GeoHash(env.getMinX, env.getMinY)
val ghUR = GeoHash(env.getMaxX, env.getMaxY)
KeyRange(ghLL.hash, ghUR.hash)
case subs => KeyList(subs.sorted)
}
}
def getKeyPlan(filter: KeyPlanningFilter, offset: Int, bits: Int) = filter match {
case SpatialFilter(geom) =>
polyToPlan(geom, offset, bits)
case SpatialDateFilter(geom, _) =>
polyToPlan(geom, offset, bits)
case SpatialDateRangeFilter(geom, _, _) =>
polyToPlan(geom, offset, bits)
case AcceptEverythingFilter => KeyAccept
case _ => KeyInvalid // degenerate outcome
}
}
case class GeoHashKeyPlanner(offset: Int, bits: Int) extends KeyPlanner with GeoHashPlanner {
def getKeyPlan(filter: KeyPlanningFilter, output: ExplainerOutputType) = getKeyPlan(filter, offset, bits) match {
case KeyList(keys) => {
output(s"GeoHashKeyPlanner is setting ${keys.size}: $keys")
KeyListTiered(keys)
}
case KeyAccept => KeyAccept
case _ => KeyInvalid
}
}
case class GeoHashColumnFamilyPlanner(offset: Int, bits: Int) extends ColumnFamilyPlanner with GeoHashPlanner {
def getColumnFamiliesToFetch(filter: KeyPlanningFilter): KeyPlan = getKeyPlan(filter, offset, bits)
}
case class RandomPartitionPlanner(numPartitions: Int) extends KeyPlanner {
val numBits: Int = numPartitions.toString.length
def getKeyPlan(filter: KeyPlanningFilter, output: ExplainerOutputType) = {
val keys = (0 to numPartitions).map(_.toString.reverse.padTo(numBits,"0").reverse.mkString)
output(s"Random Partition Planner $keys")
KeyListTiered(keys)
}
}
case class ConstStringPlanner(cstr: String) extends KeyPlanner {
def getKeyPlan(filter:KeyPlanningFilter, output: ExplainerOutputType) = KeyListTiered(List(cstr))
}
case class DatePlanner(formatter: DateTimeFormatter) extends KeyPlanner {
val endDates = List(9999,12,31,23,59,59,999)
val startDates = List(0,1,1,0,0,0,0)
def getKeyPlan(filter:KeyPlanningFilter, output: ExplainerOutputType) = {
val plan = filter match {
case DateFilter(dt) => KeyRange(formatter.print(dt), formatter.print(dt))
case SpatialDateFilter(_, dt) => KeyRange(formatter.print(dt), formatter.print(dt))
case DateRangeFilter(start,end) => {// @todo - add better ranges for wrap-around case
if (formatter.print(start).take(4).toInt == start.getYear && formatter.print(end).take(4).toInt == end.getYear) {
// ***ASSUME*** that time components have been provided in order (if they start with the year)!
KeyRangeTiered(formatter.print(start), formatter.print(end))
} else {
val matchedTime = getTimeComponents(start).zip(getTimeComponents(end)).takeWhile(tup => tup._1 == tup._2).map(_._1)
val zeroTimeHead: List[Int] = bufferMatchedTime(matchedTime, startDates, start)
val zeroTime = extractRelevantTimeUnits(zeroTimeHead, startDates)
val endTimeHead = bufferMatchedTime(matchedTime, endDates, end)
val endTime = extractRelevantTimeUnits(endTimeHead, endDates)
KeyRangeTiered(formatter.print(createDate(zeroTime)),formatter.print(createDate(endTime)))
}
}
case SpatialDateRangeFilter(_, start, end) => {// @todo - add better ranges for wrap-around case
if (formatter.print(start).take(4).toInt == start.getYear && formatter.print(end).take(4).toInt == end.getYear) {
// ***ASSUME*** that time components have been provided in order (if they start with the year)!
KeyRangeTiered(formatter.print(start), formatter.print(end))
} else {
val matchedTime = getTimeComponents(start).zip(getTimeComponents(end)).takeWhile(tup => tup._1 == tup._2).map(_._1)
val zeroTimeHead: List[Int] = bufferMatchedTime(matchedTime, startDates, start)
val zeroTime = extractRelevantTimeUnits(zeroTimeHead, startDates)
val endTimeHead = bufferMatchedTime(matchedTime, endDates, end)
val endTime = extractRelevantTimeUnits(endTimeHead, endDates)
KeyRangeTiered(formatter.print(createDate(zeroTime)),formatter.print(createDate(endTime)))
}
}
case _ => defaultKeyRange
}
plan match {
case KeyRange(start, end) => output(s"DatePlanner: start: $start end: $end")
case KeyRangeTiered(start, end, _) => output(s"DatePlanner: start: $start end: $end")
}
plan
}
def bufferMatchedTime(matchedTime: List[Int], dates: List[Int], time: DateTime): List[Int] =
if (matchedTime.length < dates.length)
matchedTime ++ List(getTimeComponents(time)(matchedTime.length))
else
matchedTime
private def extractRelevantTimeUnits(timeList: List[Int], base: List[Int]) =
timeList.zipAll(base, -1, -1).map { case (l,r) => if(l>=0) l else r }
private def getTimeComponents(dt: DateTime) =
List(math.min(9999,math.max(0,dt.getYear)), // constrain to 4-digit years
dt.getMonthOfYear,dt.getDayOfMonth,dt.getHourOfDay,dt.getMinuteOfDay)
private def createDate(list:List[Int]) = list match {
case year::month::date::hour::minute::second::ms::Nil => {
val day = if (date >= 28)
new DateTime(year, month, 1, 0, 0).dayOfMonth().withMaximumValue().getDayOfMonth
else
date
new DateTime(year, month, day, hour, minute, second, ms, DateTimeZone.forID("UTC"))
}
case _ => throw new Exception("invalid date list.")
}
private val defaultKeyRange =
KeyRange(
formatter.print(createDate(startDates)),// seems to print in local time, so force 0's
formatter.print(createDate(endDates)))
}
case class CompositePlanner(seq: Seq[KeyPlanner], sep: String) extends KeyPlanner {
def getKeyPlan(filter: KeyPlanningFilter, output: ExplainerOutputType): KeyPlan = {
val joined = seq.map(_.getKeyPlan(filter, output)).reduce(_.join(_, sep))
joined match {
case kt:KeyTiered => KeyRanges(kt.toRanges(sep))
case KeyRegex(regex) => joined.join(KeyRegex(".*"), "")
case _ => joined
}
}
}
| nhambletCCRI/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/index/QueryPlanners.scala | Scala | apache-2.0 | 18,724 |
package org.automanlang.core.scheduler
import org.automanlang.core.logging.LogLevelDebug
import org.automanlang.core.policy.aggregation.UserDefinableSpawnPolicy
import org.scalatest._
import java.util.UUID
import org.automanlang.test._
import org.automanlang.adapters.mturk.DSL._
import org.automanlang.adapters.mturk.mock.MockSetup
class ComplicatedTimeoutTest extends FlatSpec with Matchers {
"A radio button program" should "timeout and a bunch of stuff should happen" in {
implicit val mt = mturk (
access_key_id = UUID.randomUUID().toString,
secret_access_key = UUID.randomUUID().toString,
use_mock = MockSetup(balance = 8.00),
logging = LogConfig.NO_LOGGING,
log_verbosity = LogLevelDebug()
)
def which_one() = radio (
budget = 10.00,
text = "ComplicatedTimeoutTest?",
// make sure that this task times out after exactly 30s
initial_worker_timeout_in_s = 30,
question_timeout_multiplier = 1,
options = List(
choice('z, "Z"),
choice('zz, "ZZ"),
choice('a, "A"),
choice('zzz, "ZZZ"),
choice('zzzz, "ZZZZ")
),
mock_answers = makeTimedMocks(
List(
('a, 0),
('z, 1),
('zz, 3),
('a, 45),
('z, 45),
('zzz, 45),
('zzzz, 45),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65),
('a, 65)
)
),
minimum_spawn_policy = UserDefinableSpawnPolicy(0)
)
automan(mt, test_mode = true) {
which_one().answer match {
case Answer(ans, cost, conf, qid, _) =>
println("question_id = " + qid + ", Answer: '" + ans + "', cost: '" + cost + "', confidence: " + conf)
(ans == 'a) should be (true)
(cost >= BigDecimal(2.34)) should be (true)
(conf > 0.95) should be (true)
case _ =>
fail()
}
}
}
}
| dbarowy/AutoMan | libautoman/src/test/scala/org/automanlang/core/scheduler/ComplicatedTimeoutTest.scala | Scala | gpl-2.0 | 2,108 |
package com.datawizards.sparklocal.examples.dataset
import com.datawizards.dmg.annotations.{column, table}
import com.datawizards.dmg.dialects
import com.datawizards.sparklocal.dataset.io.ModelDialects
object Model {
case class Person(id: Int, name: String, gender: String)
case class WorkExperience(personId: Int, year: Int, title: String)
case class HRReport(year: Int, title: String, gender: String, count: Int)
@table("PEOPLE", dialect = dialects.Hive)
case class PersonWithMapping(
@column("PERSON_NAME", dialect = dialects.Hive)
@column("PERSON_NAME", dialect = ModelDialects.CSV)
@column("personName", dialect = ModelDialects.JSON)
@column("personName", dialect = ModelDialects.Avro)
@column("personName", dialect = ModelDialects.Parquet)
name: String,
@column("PERSON_AGE", dialect = dialects.Hive)
@column("PERSON_AGE", dialect = ModelDialects.CSV)
@column("personAge", dialect = ModelDialects.JSON)
@column("personAge", dialect = ModelDialects.Avro)
@column("personAge", dialect = ModelDialects.Parquet)
age: Int
)
} | piotr-kalanski/spark-local | src/main/scala/com/datawizards/sparklocal/examples/dataset/Model.scala | Scala | apache-2.0 | 1,452 |
package com.twitter.jvm
import java.util.concurrent.locks.ReentrantLock
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.concurrent.Eventually
import org.scalatest.junit.JUnitRunner
import org.scalatest.time.{Millis, Seconds, Span}
import com.twitter.util.{Await, Promise}
class Philosopher {
val ready = new Promise[Unit]
private val lock = new ReentrantLock()
def dine(neighbor: Philosopher): Unit = {
lock.lockInterruptibly()
ready.setValue(Unit)
Await.ready(neighbor.ready)
neighbor.dine(this)
lock.unlock()
}
}
@RunWith(classOf[JUnitRunner])
class ContentionTest extends FunSuite with Eventually {
implicit override val patienceConfig =
PatienceConfig(timeout = scaled(Span(15, Seconds)), interval = scaled(Span(5, Millis)))
test("Deadlocks") {
val c = new ContentionSnapshot()
val descartes = new Philosopher()
val plato = new Philosopher()
val d = new Thread(new Runnable() {
def run() { descartes.dine(plato) }
})
d.start()
val p = new Thread(new Runnable() {
def run() { plato.dine(descartes) }
})
p.start()
Await.all(descartes.ready, plato.ready)
eventually { assert(c.snap().deadlocks.size === 2) }
d.interrupt()
p.interrupt()
p.join()
d.join()
assert(c.snap().deadlocks.size == 0)
}
}
| travisbrown/util | util-jvm/src/test/scala/com/twitter/jvm/ContentionTest.scala | Scala | apache-2.0 | 1,356 |
package io.getquill.context.cassandra
import io.getquill._
import io.getquill.context.mirror.Row
class CassandraContextMacroSpec extends Spec {
val context = mirrorContext
import mirrorContext._
"runs queries" - {
"static" in {
val q = quote {
qr1.filter(t => t.i == lift(1))
}
val mirror = mirrorContext.run(q)
mirror.string mustEqual "SELECT s, i, l, o FROM TestEntity WHERE i = ?"
mirror.prepareRow mustEqual Row(1)
}
"dynamic" in {
val q: Quoted[Query[TestEntity]] = quote {
qr1.filter(t => t.i == lift(1))
}
val mirror = mirrorContext.run(q)
mirror.string mustEqual "SELECT s, i, l, o FROM TestEntity WHERE i = ?"
mirror.prepareRow mustEqual Row(1)
}
}
"probes queries" in {
val ctx = new CassandraMirrorContextWithQueryProbing
import ctx._
val q = quote {
query[TestEntity].filter(_.s == "fail")
}
"ctx.run(q)" mustNot compile
}
"binds inputs according to the cql terms order" - {
"filter.update" in {
val q = quote {
qr1.filter(t => t.i == lift(1)).update(t => t.l -> lift(2L))
}
val mirror = mirrorContext.run(q)
mirror.string mustEqual "UPDATE TestEntity SET l = ? WHERE i = ?"
mirror.prepareRow mustEqual Row(2l, 1)
}
"filter.map" in {
val q = quote {
qr1.filter(t => t.i == lift(1)).map(t => lift(2L))
}
val mirror = mirrorContext.run(q)
mirror.string mustEqual "SELECT ? FROM TestEntity WHERE i = ?"
mirror.prepareRow mustEqual Row(2l, 1)
}
}
}
| mentegy/quill | quill-cassandra/src/test/scala/io/getquill/context/cassandra/CassandraContextMacroSpec.scala | Scala | apache-2.0 | 1,589 |
package sds.classfile
import sds.classfile.{ClassfileStream => Stream}
import sds.classfile.attribute.{AttributeInfo => Attribute}
import sds.classfile.constant_pool.{ConstantInfo => Cons}
import sds.util.AccessFlag.get
import sds.util.DescriptorParser.parse
class MemberInfo(data: Stream, pool: Array[Cons], f: (Stream, Array[Cons]) => Attribute) extends ClassfileInformation {
private val declaration: Array[String] = (() => {
val accIndex: Int = data.short
val nameIndex: Int = data.short
val descIndex: Int = data.short
val name: String = extract(nameIndex, pool)
val desc: String = parse(extract(descIndex, pool))
Array(get(accIndex, if(desc.contains("(")) "method" else "field"), name, desc)
})()
val attributes: Array[Attribute] = (0 until data.short).map((_: Int) => f(data, pool)).toArray
def access: String = declaration(0)
def name: String = declaration(1)
def desc: String = declaration(2)
def _type: String = if(declaration(2).contains("(")) "method" else "field"
override def toString(): String =
if(_type.equals("field")) declaration(0) + declaration(2) + " " + declaration(1)
else declaration(0) + declaration(1) + declaration(2)
} | g1144146/sds_for_scala | src/main/scala/sds/classfile/MemberInfo.scala | Scala | apache-2.0 | 1,275 |
object infpaths3 {
object a {
trait T { t =>
type M <: t.g.N // error
type T <: a.T
val f: t.T
trait U { u =>
type N <: t.f.M
type U <: a.x.g.U // error
val f: u.U
}
val g: t.U
}
val x: a.T = ???
}
}
| som-snytt/dotty | tests/neg/i280.scala | Scala | apache-2.0 | 279 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.spi
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
import org.kiji.annotations.Inheritance
import org.kiji.delegation.NamedProvider
import org.kiji.schema.shell.Environment
/**
* Plugin SPI that specifies an extension to the data tracked by the Environment
* object in the Kiji shell.
*
* This SPI is a "decorator" SPI that should be attached to a {@link ParserPluginFactory}.
* It declares that the ParserPluginFactory also creates an object to be placed in the
* {@link Environment}. This object can hold any state required to facilitate your plugin's
* operation over a session consisting of multiple commands. Within the Environment,
* this is keyed by the same "name" returned by {@link #getName} as in the
* ParserPluginFactory itself.
*
* The type argument to this class represents the type of object that holds your state.
*
* ===Best Practices Regarding Mutability===
*
* Changes to the Environment can only happen in the `exec()` method of a
* `DDLCommand`. Your `ParserPlugin` will emit a `DDLCommand` that
* represents the custom command to run. The DDLCommand's `exec()` method then
* returns the Environment to feed forward into the next command.
*
* The main `Environment` is an immutable object; "updates" to the Environment
* are reflected by creating a new `Environment` object that differs from the previous
* Environment only by a single field.
*
* <em>It is recommended that you make your own environment extension immutable as well.</em>
* When updating the state to propagate forward to subsequent commands, you should create a
* new instance of your environment state class, holding the new information you need. You
* can then call the {@link DDLCommand#setExtensionState} method to insert this new object in
* the Environment (actually, this returns a new Environment that contains the updated extension
* mapping). Your {@link DDLCommand#exec} method should then return this new Environment.
*/
@ApiAudience.Framework
@ApiStability.Experimental
@Inheritance.Extensible
trait EnvironmentPlugin[EnvState] extends NamedProvider {
/**
* Create the initial state held by your extension to the Environment. This will
* be called once when your module loads to initialize the Environment with your
* default data.
*
* @return a new instance of your environment state container, for use when your
* module is first initialized.
*/
def createExtensionState(): EnvState
}
| kijiproject/kiji-schema-shell | src/main/scala/org/kiji/schema/shell/spi/EnvironmentPlugin.scala | Scala | apache-2.0 | 3,232 |
package edu.berkeley.nlp.coref
import edu.berkeley.nlp.futile.util.Logger
import edu.berkeley.nlp.futile.fig.basic.Indexer
trait DocumentInferencer {
def getInitialWeightVector(featureIndexer: Indexer[String]): Array[Double];
def computeLikelihood(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Double): Double;
def addUnregularizedStochasticGradient(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer,
lossFcn: (CorefDoc, Int, Int) => Double,
gradient: Array[Double]);
def viterbiDecode(docGraph: DocumentGraph,
pairwiseScorer: PairwiseScorer): Array[Int];
def finishPrintStats();
def viterbiDecodeAll(docGraphs: Seq[DocumentGraph], pairwiseScorer: PairwiseScorer): Array[Array[Int]] = {
val allPredBackptrs = new Array[Array[Int]](docGraphs.size);
for (i <- 0 until docGraphs.size) {
val docGraph = docGraphs(i);
Logger.logs("Decoding " + i);
val predBackptrs = viterbiDecode(docGraph, pairwiseScorer);
allPredBackptrs(i) = predBackptrs;
}
allPredBackptrs;
}
def viterbiDecodeAllFormClusterings(docGraphs: Seq[DocumentGraph], pairwiseScorer: PairwiseScorer): (Array[Array[Int]], Array[OrderedClustering]) = {
val allPredBackptrs = viterbiDecodeAll(docGraphs, pairwiseScorer);
val allPredClusteringsSeq = (0 until docGraphs.size).map(i => OrderedClustering.createFromBackpointers(allPredBackptrs(i)));
(allPredBackptrs, allPredClusteringsSeq.toArray)
}
} | nate331/jbt-berkeley-coref-resolution | src/main/java/edu/berkeley/nlp/coref/DocumentInferencer.scala | Scala | gpl-3.0 | 1,686 |
package com.moon.pattern
/**
* Created by Paul on 2017/3/6.
*/
object ScalaPattern3 {
var purchases: Vector[() => Unit] = Vector()
def makePurchase(register: ScalaCashRegister, amount: Int): () => Unit = {
() => {
println("purchase in amount: " + amount)
register.total + amount
}
}
def executePurchase(purchase: () => Unit): Unit = {
purchases = purchases :+ purchase
// purchase()
}
def main(args: Array[String]) {
val register: ScalaCashRegister = new ScalaCashRegister(0)
val purchaseOne = makePurchase(register, 100)
val purchaseTwo = makePurchase(register, 50)
executePurchase(purchaseOne)
executePurchase(purchaseTwo)
for(purchase<-purchases)
purchase.apply()
}
}
class ScalaCashRegister(var total: Int) {
def addCash(toAdd: Int): Unit = {
total += toAdd
}
}
| linpingchuan/misc | scala/pattern/src/test/scala-2.12/com/moon/pattern/ScalaPattern3.scala | Scala | gpl-2.0 | 855 |
package colang.utils
import java.util.Locale
object InternalErrors {
def missingPrelude: Nothing = {
val message = Locale.getDefault.getLanguage match {
case "be" => "Памылка: 'prelude.co' ня знойдзены. Калі ласка, усталюйце стандартную бібліятэку CO."
case "ru" => "Ошибка: 'prelude.co' не найден. Пожалуйста, установите стандартную библиотеку CO."
case "en" | _ => "Error: 'prelude.co' not found. Please install CO standard library."
}
System.err.println(message)
sys.exit(2)
}
def primitiveTypeIsNotAType(typeName: String): Nothing = {
val message = Locale.getDefault.getLanguage match {
case "be" => s"Памылка: '$typeName' не з'яўляецца тыпам у стандартнай бібліятэцы. Калі ласка, праверце, ці CO " +
s"усталяваны карэктна і няма новых абнаўленьняў."
case "ru" => s"Ошибка: '$typeName' - не тип в стандартной библиотеке. Пожалуйста, проверьте, правильно ли " +
s"установлен CO, и нет ли новых обновлений."
case "en" | _ => s"Error: '$typeName' is not a type in the standard library. Please check if your " +
s"CO installation is correct and up-to-date."
}
System.err.println(message)
sys.exit(2)
}
def missingPrimitiveType(typeName: String): Nothing = {
val message = Locale.getDefault.getLanguage match {
case "be" => s"Памылка: ў стандартнай бібліятэцы няма акрэсьленьня тыпу '$typeName'. Калі ласка, праверце, ці CO " +
s"усталяваны карэктна і няма новых абнаўленьняў."
case "ru" => s"Ошибка: в стандартной библиотеке отсутствует определение типа '$typeName'. Пожалуйста, проверьте, " +
s"правильно ли установлен CO, и нет ли новых обновлений."
case "en" | _ => s"Error: '$typeName' type definition not found in the standard library. Please check if your " +
s"CO installation is correct and up-to-date."
}
System.err.println(message)
sys.exit(2)
}
def noNativeSymbol(name: String): Nothing = {
val message = Locale.getDefault.getLanguage match {
case "be" => s"Унутраная памылка ў кампілятары: адсутнічае нізкаўзроўневае прадстаўленьне для сімвала '$name'"
case "ru" => s"Внутренняя ошибка в компиляторе: отсутствует низкоуровневое представление для символа '$name'"
case "en" | _ => s"Internal compiler error: no native representation for symbol '$name'."
}
System.err.println(message)
sys.exit(2)
}
}
| merkispavel/colang | src/main/scala/colang/utils/InternalErrors.scala | Scala | mit | 3,110 |
package spatial.codegen.pirgen
import spatial.aliases._
import argon.codegen.FileGen
import argon.core._
import scala.language.postfixOps
import scala.sys.process._
trait PIRFileGen extends FileGen {
override protected def emitMain[S:Type](b: Block[S]): Unit = emitBlock(b)
override protected def emitFileHeader() {
emit("import pir._")
emit("import pir.node._")
emit("import arch._")
emit("import pirc.enums._")
emit("")
open(s"""object ${config.name} extends PIRApp {""")
//emit(s"""override val arch = SN_4x4""")
open(s"""def main(top:Top) = {""")
super.emitFileHeader()
}
override protected def emitFileFooter() {
emit(s"")
close("}")
close("}")
super.emitFileFooter()
}
override protected def process[S:Type](b: Block[S]): Block[S] = {
super.process(b)
//TODO: Cannot treat this as a dependency because postprocess is called before stream is closed
if (sys.env.get("PIR_HOME").isDefined && sys.env("PIR_HOME") != "") {
// what should be the cleaner way of doing this?
val PIR_HOME = sys.env("PIR_HOME")
val dir = spatialConfig.pirsrc.getOrElse(s"$PIR_HOME/pir/apps/src")
var cmd = s"mkdir -p $dir"
info(cmd)
cmd.!
cmd = s"cp ${config.genDir}/pir/main.scala $dir/${config.name}.scala"
println(cmd)
cmd.!
}
else {
warn("Set PIR_HOME environment variable to automatically copy app")
}
b
}
}
| stanford-ppl/spatial-lang | spatial/core/src/spatial/codegen/pirgen/PIRFileGen.scala | Scala | mit | 1,459 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.utils
/**
*
*
* Contains the Declaration reasons
*/
object DeclarationReason {
// Declarations
val ValueLessThanNilRateBand = "valueLessThanNilRateBand"
val ValueLessThanNilRateBandAfterExemption = "valueLessThanNilRateBandAfterExemption"
val ValueLessThanTransferredNilRateBand = "valueLessThanTransferredNilRateBand"
val ValueLessThanTransferredNilRateBandAfterExemption = "valueLessThanTransferredNilRateBandAfterExemption"
val ExemptionEstateLessThanThreshold = "Estate value is less than Threshold value"
val TnrbEstateLessThanThreshold = "Estate value is more than Threshold value"
val TnrbEstateLessThanThresholdWithNilExemption = "Estate value is less than threshold with nil Exemption"
}
| hmrc/iht-frontend | app/iht/utils/DeclarationReason.scala | Scala | apache-2.0 | 1,338 |
package com.twitter.logging
import com.twitter.app.{App, Flaggable}
object Logging {
implicit object LevelFlaggable extends Flaggable[Level] {
def parse(s: String) =
if (Logger.levelNames contains s)
Logger.levelNames(s)
else
throw new Exception("Invalid log level: "+s)
}
implicit object PolicyFlaggable extends Flaggable[Policy] {
def parse(s: String) = Policy.parse(s)
}
}
/**
* A [[com.twitter.app.App]] mixin to use for logging. Defines flags
* to configure the (default) logger setup. When adding logging to a
* twitter-server, mix in a trait that extends Logging but overrides factories.
*/
trait Logging { self: App =>
import Logging._
lazy val log = Logger(name)
def defaultFormatter: Formatter = new Formatter()
def defaultOutput: String = "/dev/stderr"
def defaultLogLevel: Level = Level.INFO
def defaultRollPolicy: Policy = Policy.Never
def defaultAppend: Boolean = true
def defaultRotateCount: Int = -1
protected[this] val inferClassNamesFlag = flag("log.async.inferClassNames", false,
"Infer class and method names synchronously. See com.twitter.logging.QueueingHandler")
protected[this] val outputFlag = flag("log.output", defaultOutput, "Output file")
protected[this] val levelFlag = flag("log.level", defaultLogLevel, "Log level")
protected[this] val asyncFlag = flag("log.async", true, "Log asynchronously")
protected[this] val asyncMaxSizeFlag =
flag("log.async.maxsize", 4096, "Max queue size for async logging")
// FileHandler-related flags are ignored if outputFlag is not overridden.
protected[this] val rollPolicyFlag = flag("log.rollPolicy", defaultRollPolicy,
"When or how frequently to roll the logfile. " +
"See com.twitter.logging.Policy#parse documentation for DSL details."
)
protected[this] val appendFlag =
flag("log.append", defaultAppend, "If true, appends to existing logfile. Otherwise, file is truncated.")
protected[this] val rotateCountFlag =
flag("log.rotateCount", defaultRotateCount, "How many rotated logfiles to keep around")
/**
* By default, the root [[com.twitter.logging.LoggerFactory]] only has a single
* [[com.twitter.logging.Handler]] which is configured via command line flags.
* You can override this method to add additional handlers.
*/
def handlers: List[() => Handler] = {
val output = outputFlag()
val level = Some(levelFlag())
val handler =
if (output == "/dev/stderr")
ConsoleHandler(defaultFormatter, level)
else
FileHandler(
output,
rollPolicyFlag(),
appendFlag(),
rotateCountFlag(),
defaultFormatter,
level
)
List(
if (asyncFlag())
QueueingHandler(handler, asyncMaxSizeFlag(), inferClassNamesFlag())
else
handler
)
}
def loggerFactories: List[LoggerFactory] = {
LoggerFactory(
node = "",
level = Some(levelFlag()),
handlers = handlers
) :: Nil
}
/**
* Configuration of the Logger Factories removes any other configured
* loggers. Override if you do not want the default LoggerFactories
* to be configured. This is generally when you want to ensure that
* any handlers already installed are not removed or replaced.
*/
protected def configureLoggerFactories(): Unit = {
Logger.configure(loggerFactories)
}
premain {
configureLoggerFactories()
}
}
| BuoyantIO/twitter-util | util-logging/src/main/scala/com/twitter/logging/App.scala | Scala | apache-2.0 | 3,464 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
class State
object DONE extends State
object READY extends State
object NOT_READY extends State
object FAILED extends State
/**
* Transliteration of the iterator template in google collections. To implement an iterator
* override makeNext and call allDone() when there is no more items
*/
abstract class IteratorTemplate[T] extends Iterator[T] with java.util.Iterator[T] {
private var state: State = NOT_READY
private var nextItem: Option[T] = None
def next(): T = {
if(!hasNext())
throw new NoSuchElementException()
state = NOT_READY
nextItem match {
case Some(item) => item
case None => throw new IllegalStateException("Expected item but none found.")
}
}
def hasNext(): Boolean = {
if(state == FAILED)
throw new IllegalStateException("Iterator is in failed state")
state match {
case DONE => false
case READY => true
case _ => maybeComputeNext()
}
}
protected def makeNext(): T
def maybeComputeNext(): Boolean = {
state = FAILED
nextItem = Some(makeNext())
if(state == DONE) {
false
} else {
state = READY
true
}
}
protected def allDone(): T = {
state = DONE
null.asInstanceOf[T]
}
def remove =
throw new UnsupportedOperationException("Removal not supported")
}
| jinfei21/kafka | src/kafka/utils/IteratorTemplate.scala | Scala | apache-2.0 | 1,953 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.domain
import scala.util.Random
class SaUtrGenerator(random: Random = new Random) extends Modulus11Check {
def this(seed: Int) = this(new scala.util.Random(seed))
def nextSaUtr: SaUtr = {
val suffix = f"${random.nextInt(100000)}%09d"
val checkCharacter = calculateCheckCharacter(suffix)
SaUtr(s"$checkCharacter$suffix")
}
}
| hmrc/domain | src/main/scala/uk/gov/hmrc/domain/SaUtrGenerator.scala | Scala | apache-2.0 | 970 |
/* ___ _ ___ _ _ *\\
** / __| |/ (_) | | The SKilL Generator **
** \\__ \\ ' <| | | |__ (c) 2013-16 University of Stuttgart **
** |___/_|\\_\\_|_|____| see LICENSE **
\\* */
package de.ust.skill.generator.jforeign.internal
import de.ust.skill.generator.jforeign.GeneralOutputMaker
trait StateMaker extends GeneralOutputMaker {
abstract override def make {
super.make
val out = files.open("internal/SkillState.java")
out.write(s"""package ${packagePrefix}internal;
import java.io.File;
import java.io.IOException;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import de.ust.skill.common.jforeign.api.Access;
import de.ust.skill.common.jforeign.api.FieldDeclaration;
import de.ust.skill.common.jforeign.api.SkillException;
import de.ust.skill.common.jforeign.internal.BasePool;
import de.ust.skill.common.jforeign.internal.SkillObject;
import de.ust.skill.common.jforeign.internal.StoragePool;
import de.ust.skill.common.jforeign.internal.StringPool;
import de.ust.skill.common.jforeign.internal.fieldTypes.Annotation;
import de.ust.skill.common.jforeign.internal.fieldTypes.StringType;
import de.ust.skill.common.jvm.streams.FileInputStream;
import ${packagePrefix}api.SkillFile;
/**
* Internal implementation of SkillFile.
*
* @author Timm Felden
* @note type access fields start with a capital letter to avoid collisions
*/
${
suppressWarnings
}public final class SkillState extends de.ust.skill.common.jforeign.internal.SkillState implements SkillFile {
// types by skill name
private final HashMap<String, StoragePool<?, ?>> poolByName;
@Override
public HashMap<String, StoragePool<?, ?>> poolByName() {
return poolByName;
}
/**
* Create a new skill file based on argument path and mode.
*
* @throws IOException
* on IO and mode related errors
* @throws SkillException
* on file or specification consistency errors
*/
public static SkillState open(String path, Mode... mode) throws IOException, SkillException {
File f = new File(path);
assert f.exists() : "can only open files that already exist in genarel, because of java.nio restrictions";
return open(f, mode);
}
/**
* Create a new skill file based on argument path and mode.
*
* @throws IOException
* on IO and mode related errors
* @throws SkillException
* on file or specification consistency errors
*/
public static SkillState open(File path, Mode... mode) throws IOException, SkillException {
assert path.exists() : "can only open files that already exist in genarel, because of java.nio restrictions";
return open(path.toPath(), mode);
}
/**
* Create a new skill file based on argument path and mode.
*
* @throws IOException
* on IO and mode related errors
* @throws SkillException
* on file or specification consistency errors
* @note suppress unused warnings, because sometimes type declarations are
* created, although nobody is using them
*/
@SuppressWarnings("unused")
public static SkillState open(Path path, Mode... mode) throws IOException, SkillException {
ActualMode actualMode = new ActualMode(mode);
switch (actualMode.open) {
case Create:
// initialization order of type information has to match file parser
// and can not be done in place
StringPool strings = new StringPool(null);
ArrayList<StoragePool<?, ?>> types = new ArrayList<>(1);
StringType stringType = new StringType(strings);
Annotation annotation = new Annotation(types);
// create type information${
var i = -1
(for (t ← IR)
yield s"""
${name(t)}Access ${name(t)} = new ${name(t)}Access(${i += 1; i}${
if (null == t.getSuperType) ""
else { ", "+name(t.getSuperType) }
});
types.add(${name(t)});"""
).mkString("")
}
return new SkillState(strings, types, stringType, annotation, path, actualMode.close);
case Read:
return FileParser.read(FileInputStream.open(path, actualMode.close == Mode.ReadOnly), actualMode.close);
default:
throw new IllegalStateException("should never happen");
}
}
public SkillState(StringPool strings, ArrayList<StoragePool<?, ?>> types, StringType stringType,
Annotation annotationType, Path path, Mode mode) {
super(strings, path, mode, types, stringType, annotationType);
poolByName = new HashMap<>();
for (StoragePool<?, ?> p : types)
poolByName.put(p.name(), p);
${
var i = -1
(for (t ← IR)
yield s"""
${name(t)}s = (${name(t)}Access) poolByName.get("${t.getName.getInternalName}");
"""
).mkString("")
}
finalizePools();
}
public SkillState(HashMap<String, StoragePool<?, ?>> poolByName, StringPool strings, StringType stringType,
Annotation annotationType,
ArrayList<StoragePool<?, ?>> types, Path path, Mode mode) {
super(strings, path, mode, types, stringType, annotationType);
this.poolByName = poolByName;
${
var i = -1
(for (t ← IR)
yield s"""
${name(t)}s = (${name(t)}Access) poolByName.get("${t.getName.getInternalName}");"""
).mkString("")
}
finalizePools();
}
${
var i = -1
(for (t ← IR)
yield s"""
private final ${name(t)}Access ${name(t)}s;
@Override
public ${name(t)}Access ${name(t)}s() {
return ${name(t)}s;
}
"""
).mkString("")
}
}
""")
out.close()
}
}
| skill-lang/skill | src/main/scala/de/ust/skill/generator/jforeign/internal/StateMaker.scala | Scala | bsd-3-clause | 6,082 |
/**
* This file is part of warg.
*
* warg is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* warg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with warg. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright (C) 2014 Stanislavs Rubens
*/
package org.warg.modules.dnd2ed.dices
import scala.util.Random
trait Dice {
def sides = 0 to 0
final def roll(): Int = {
sides(Random.nextInt(sides length))
}
} | starub/warg | src/main/scala/org/warg/modules/dnd2ed/dices/Dice.scala | Scala | gpl-3.0 | 881 |
package com.sksamuel.avro4s.schema
import com.sksamuel.avro4s.{AvroSchema, BigDecimals, ScalePrecision, SchemaFor}
import org.apache.avro.Schema
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
case class BigDecimalSeqOption(biggies: Seq[Option[BigDecimal]])
case class BigDecimalSeq(biggies: Seq[BigDecimal])
case class BigDecimalDefault(decimal: BigDecimal = 964.55)
class BigDecimalSchemaTest extends AnyWordSpec with Matchers {
"SchemaEncoder" should {
"encode big decimal" in {
case class Test(decimal: BigDecimal)
val schema = AvroSchema[Test]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal.json"))
schema shouldBe expected
}
"accept big decimal as logical type on bytes with custom scale and precision" in {
implicit val sp = ScalePrecision(8, 20)
case class Test(decimal: BigDecimal)
val schema = AvroSchema[Test]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal-scale-and-precision.json"))
schema shouldBe expected
}
// todo once magnolia has scala 3 default support
// "support big decimal with default" in {
// val schema = AvroSchema[BigDecimalDefault]
// val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal_default.json"))
// schema shouldBe expected
// }
"suport Option[BigDecimal] as a union" in {
case class BigDecimalOption(decimal: Option[BigDecimal])
val schema = AvroSchema[BigDecimalOption]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal_option.json"))
schema shouldBe expected
}
"Seq[BigDecimal] be represented as an array of logical types" in {
val schema = AvroSchema[BigDecimalSeq]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal_seq.json"))
schema shouldBe expected
}
"Seq[Option[BigDecimal]] be represented as an array of unions of nulls/bigdecimals" in {
val schema = AvroSchema[BigDecimalSeqOption]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/bigdecimal_seq_option.json"))
schema shouldBe expected
}
"allow big decimals to be encoded as STRING when custom typeclasses are provided" in {
given SchemaFor[BigDecimal] = BigDecimals.AsString
case class BigDecimalAsStringTest(decimal: BigDecimal)
val schema = AvroSchema[BigDecimalAsStringTest]
val expected = new org.apache.avro.Schema.Parser().parse(this.getClass.getResourceAsStream("/bigdecimal_as_string.json"))
schema shouldBe expected
}
"allow big decimals to be encoded as FIXED when custom typeclasses are provided" in {
given SchemaFor[BigDecimal] = SchemaFor[BigDecimal](Schema.createFixed("bigdecimal", null, null, 55))
case class BigDecimalAsFixedTest(decimal: BigDecimal)
val schema = AvroSchema[BigDecimalAsFixedTest]
val expected = new org.apache.avro.Schema.Parser().parse(this.getClass.getResourceAsStream("/bigdecimal_as_fixed.json"))
schema shouldBe expected
}
// "fail when trying to convert a BigDecimal into ByteBuffer without specifying the scale and precision and rounding mode and rounding is required" in {
// val n = BigDecimal(7.851)
// the[java.lang.ArithmeticException] thrownBy {
// BigDecimalFromValue.apply(BigDecimalToValue.apply(n))
// } should have message "Rounding necessary"
// }
// "convert a BigDecimal into ByteBuffer with specifying the scale and precision and rounding mode and rounding is not required" in {
// val sp = ScaleAndPrecisionAndRoundingMode(3, 8, HALF_EVEN)
// val n = BigDecimal(7.85)
// BigDecimalFromValue(sp)(BigDecimalToValue(sp)(n)) shouldBe BigDecimal(7.850)
// }
// "convert a BigDecimal into ByteBuffer with specifying the scale and precision and rounding mode and rounding is required" in {
// val sp = ScaleAndPrecisionAndRoundingMode(3, 8, HALF_EVEN)
// val n = BigDecimal(7.8516)
// BigDecimalFromValue(sp)(BigDecimalToValue(sp)(n)) shouldBe BigDecimal(7.852)
// }
}
} | sksamuel/avro4s | avro4s-core/src/test/scala/com/sksamuel/avro4s/schema/BigDecimalSchemaTest.scala | Scala | apache-2.0 | 4,350 |
/*
* Copyright © 2015 Reactific Software LLC. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package rxmongo.messages
import akka.util.ByteString
import rxmongo.bson.{ BSONBuilder, BSONObject, BSONProvider }
/** Storage Engine Configuration */
sealed trait StorageEngineConfig extends BSONProvider {
def name : String
def doc : BSONObject
def wrapAndTerminate : ByteString = {
BSONBuilder().obj(name, doc).wrapAndTerminate
}
}
/** WiredTiger Engine Configuration
*
* @param doc The WiredTiger configuration options as a document.
*/
case class WiredTigerConfig(doc : BSONObject) extends { val name : String = "WiredTiger" } with StorageEngineConfig
/** MMapV1 Engine Configuration
*
* @param doc The MMapV1 configuration options as a document.
*/
case class MMapV1Config(doc : BSONObject) extends { val name : String = "mmapv1" } with StorageEngineConfig
/** Options For Index Creation
* @see [[http://docs.mongodb.org/v3.0/reference/method/db.collection.createIndex/#ensureindex-options]]
* @param unique Creates a unique index so that the collection will not accept insertion of documents where the
* index key or keys match an existing value in the index. Specify true to create a unique index.
* The default value is false. The option is unavailable for hashed indexes.
* @param sparse If true, the index only references documents with the specified field. These indexes use less
* space but behave differently in some situations (particularly sorts). The default value is false.
* 2dsphere indexes are sparse by default and ignore this option. For a compound index that includes
* 2dsphere index key(s) along with keys of other types, only the 2dsphere index fields determine
* whether the index references a document. 2d, geoHaystack, and text indexes behave similarly to
* the 2dsphere indexes.
* @param background Builds the index in the background so that building an index does not block other database
* activities. Specify true to build in the background. The default value is false.
* @param name The name of the index. If unspecified, MongoDB generates an index name by concatenating the names of
* the indexed fields and the sort order. Whether user specified or MongoDB generated, index names
* including their full namespace (i.e. database.collection) cannot be longer than the Index Name Limit.
* @param expireAfterSeconds Specifies a value, in seconds, as a TTL to control how long MongoDB retains documents in
* this collection. This applies only to TTL indexes.
* @param storageEngine Allows users to specify configuration to the storage engine on a per-index basis when
* creating an index. Storage engine configuration specified when creating indexes are validated
* and logged to the oplog during replication to support replica sets with members that use
* different storage engines.
* @param weights For Text indices, specifies the weights to assign fields in the document
* @param default_language For Text indices, specifies the default language for the text
* @param language_override For Text indices, specifies the field name that provides the language of the text
* @param bits For 2d indices, the number of precision of the stored geohash value of the location data. The bits
* value ranges from 1 to 32 inclusive. The default value is 26.
* @param min For 2d indices, the lower inclusive boundary for the longitude and latitude values. The default
* value is -180.0.
* @param max For 2d indices, the upper inclusive boundary for the longitude and latitude values. The default
* value is 180.0.
* @param bucketSize For geoHaystack indexes, specify the number of units within which to group the location
* values; i.e. group in the same bucket those location values that are within the specified
* number of units to each other. The value must be greater than 0.
*/
case class IndexOptions(
unique : Option[Boolean] = None,
sparse : Option[Boolean] = None,
background : Option[Boolean] = None,
name : Option[String] = None,
expireAfterSeconds : Option[Int] = None,
storageEngine : Option[StorageEngineConfig] = None,
weights : Seq[(String, Integer)] = Seq.empty[(String, Integer)],
default_language : Option[String] = None,
language_override : Option[String] = None,
bits : Option[Int] = None,
min : Option[Double] = None,
max : Option[Double] = None,
bucketSize : Option[Double] = None) extends BSONProvider {
def wrapAndTerminate : ByteString = {
val b = BSONBuilder()
unique.map { unique ⇒ b.boolean("unique", unique) }
sparse.map { sparse ⇒ b.boolean("sparse", sparse) }
background.map { background ⇒ b.boolean("background", background) }
name.map { name ⇒ b.string("name", name) }
expireAfterSeconds.map { expire ⇒ b.integer("expireAfterSeconds", expire) }
storageEngine.map { se ⇒ b.obj("storageEngine", se.result) }
if (weights.nonEmpty) {
val w = BSONBuilder()
for ((field, weight) ← weights) { w.integer(field, weight) }
b.obj("weights", w.result)
}
default_language.map { default_language ⇒ b.string("default_language", default_language) }
language_override.map { language_override ⇒ b.string("language_override", language_override) }
bits.map { bits ⇒ b.integer("bits", bits) }
min.map { min ⇒ b.double("min", min) }
max.map { max ⇒ b.double("max", max) }
bucketSize.map { bucketSize ⇒ b.double("bucketSize", bucketSize) }
b.wrapAndTerminate
}
}
/** Abstract base class of Index case classes
* This just implements the toByteString method in terms of the construct method making it easier for
* subclasses to deliver their result.
*/
sealed trait IndexTrait extends BSONProvider {
def construct : BSONBuilder = {
BSONBuilder()
}
def name : String
final def wrapAndTerminate : ByteString = construct.wrapAndTerminate
}
/** A Simple Single-Field Index
* @see [[http://docs.mongodb.org/v3.0/core/index-single/]]
* @see [[http://docs.mongodb.org/v3.0/reference/method/db.collection.createIndex/#db.collection.createIndex]]
* @param fieldName The name of the field to index
* @param ascending True for ascending values, false for descending values
*/
case class Index(
fieldName : String,
ascending : Boolean) extends IndexTrait {
override def construct : BSONBuilder = {
super.construct.integer(fieldName, if (ascending) 1 else -1)
}
def name = fieldName
}
/** A Compound Index
* @see [[http://docs.mongodb.org/v3.0/core/index-compound/]]
* @see [[http://docs.mongodb.org/v3.0/reference/method/db.collection.createIndex/#db.collection.createIndex]]
* @param fields A mapping of field names to whether that field is ascending (true) or descending (false)
*/
case class CompoundIndex(
fields : (String, Boolean)*) extends IndexTrait {
require(fields.size <= 31, "Only up to 31 fields may comprise a compound index")
override def construct : BSONBuilder = {
val b = super.construct
for ((field, ascending) ← fields) { b.integer(field, if (ascending) 1 else -1) }
b
}
def name = fields.map { case (name, ascending) ⇒ name } mkString (".")
}
/** A Text Index
* @see [[http://docs.mongodb.org/v3.0/core/index-text/]]
* @param fields The string (or array of string) fields for the text index
*/
case class TextIndex(
fields : String*) extends IndexTrait {
override def construct : BSONBuilder = {
val b = super.construct
for (field ← fields) { b.string(field, "text") }
b
}
def name = fields.mkString(".") + ".text"
}
/** A Hashed Index
* @see [[http://docs.mongodb.org/v3.0/core/index-hashed/]]
* @param field The name of the field for the hashed index
*/
case class HashedIndex(
field : String) extends IndexTrait {
override def construct : BSONBuilder = {
val b = super.construct
b.string(field, "hashed")
}
def name = field + ".hashed"
}
/** A 2d Index
* @see [[http://docs.mongodb.org/v3.0/core/2d/]]
* @param locationField The name of the field that contains the GeoJSON location information
* @param otherFields Other fields to include in a compound index (optional)
*/
case class TwoDIndex(
locationField : String,
otherFields : (String, Boolean)*) extends IndexTrait {
require(otherFields.size < 31, "Only up to 31 fields may comprise a compound index")
override def construct : BSONBuilder = {
val b = super.construct
b.string(locationField, "2d")
for ((field, ascending) ← otherFields) { b.integer(field, if (ascending) 1 else -1) }
b
}
def name = locationField + ".2d"
}
/** A 2dsphere Index
* @see [[http://docs.mongodb.org/v3.0/core/2dsphere/]]
* @param prefixFields Non-2d fields that come before the location field
* @param locationField The name of the field that contains the location information
* @param suffixFields Non-2d fields that come after the locationf ield
*/
case class TwoDSphereIndex(
prefixFields : Seq[(String, Boolean)] = Seq.empty[(String, Boolean)],
locationField : String,
suffixFields : Seq[(String, Boolean)] = Seq.empty[(String, Boolean)]) extends IndexTrait {
require(prefixFields.size + suffixFields.size < 31, "Only up to 31 fields may comprise a compound index")
override def construct : BSONBuilder = {
val b = super.construct
for ((field, ascending) ← prefixFields) { b.integer(field, if (ascending) 1 else -1) }
b.string(locationField, "2dsphere")
for ((field, ascending) ← suffixFields) { b.integer(field, if (ascending) 1 else -1) }
b
}
def name = {
(prefixFields.map { case (name, asc) ⇒ name } mkString ".") +
s".$locationField." +
(suffixFields.map { case (name, asc) ⇒ name } mkString ".") +
".2dsphere"
}
}
/** A geoHaystack Index
* @see [[http://docs.mongodb.org/v3.0/core/geohaystack/]]
* @param locationField
* @param otherFields
*/
case class GeoHaystackIndex(
locationField : String,
otherFields : (String, Boolean)*) extends IndexTrait {
require(otherFields.size < 31, "Only up to 31 fields may comprise a compound index")
override def construct : BSONBuilder = {
val b = super.construct
b.string(locationField, "geoHaystack")
for ((field, ascending) ← otherFields) { b.integer(field, if (ascending) 1 else -1) }
b
}
def name = locationField + "." + otherFields.map { case (name, asc) ⇒ name } mkString (".") + ".geoHaystack"
}
| reactific/RxMongo | messages/src/main/scala/rxmongo/messages/Indices.scala | Scala | mit | 11,510 |
package test
import sbt._
object Global {
val x = 3
}
object GlobalAutoPlugin extends AutoPlugin {
object autoImport {
lazy val globalAutoPluginSetting = settingKey[String]("A top level setting declared in a plugin.")
}
}
| dansanduleac/sbt | sbt/src/sbt-test/project/global-plugin/global/plugins/A.scala | Scala | bsd-3-clause | 237 |
package org.apache.spot.netflow
object FlowColumnIndex extends Enumeration {
val HOUR = 4
val MINUTE = 5
val SECOND = 6
val SOURCEIP = 8
val DESTIP = 9
val SOURCEPORT = 10
val DESTPORT = 11
val IPKT = 16
val IBYT = 17
val NUMTIME = 27
val IBYTBIN = 28
val IPKTYBIN = 29
val TIMEBIN = 30
val PORTWORD = 31
val IPPAIR = 32
val SOURCEWORD = 33
val DESTWORD = 34
}
| kpeiruza/incubator-spot | spot-ml/src/main/scala/org/apache/spot/netflow/FlowColumnIndex.scala | Scala | apache-2.0 | 433 |
import sbt._
import Keys._
import play.Project._
import de.johoop.jacoco4sbt._
import JacocoPlugin._
object ApplicationBuild extends Build {
val appName = "ProtoPoll"
val appVersion = "0.2.0-SNAPSHOT"
val appDependencies = Seq(
javaCore,
javaJdbc,
javaEbean,
"org.webjars" % "html5shiv" % "3.6.1",
"org.webjars" % "webjars-play" % "2.1.0-1",
"org.webjars" % "bootstrap" % "2.3.1",
"org.webjars" % "font-awesome" % "3.0.2",
"org.webjars" % "bootstrap-datepicker" % "1.0.1",
"mysql" % "mysql-connector-java" % "5.1.22",
"commons-codec" % "commons-codec" % "1.7",
"org.markdownj" % "markdownj" % "0.3.0-1.0.2b4",
"org.mockito" % "mockito-all" % "1.9.0" % "test"
)
lazy val s = Defaults.defaultSettings ++ Seq(jacoco.settings:_*)
val main = play.Project(appName, appVersion, appDependencies, settings = s).settings(
// Code coverage.
parallelExecution in jacoco.Config := false,
jacoco.reportFormats in jacoco.Config := Seq(XMLReport("utf-8"), HTMLReport("utf-8")),
jacoco.excludes in jacoco.Config := Seq("views.*", "controllers.Reverse*", "controllers.javascript.*", "controllers.ref.*", "Routes*"),
// Custom binders for routes
routesImport += "util.binders.UuidBinder._"
)
}
| adericbourg/proto-poll | project/Build.scala | Scala | gpl-3.0 | 1,288 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package viper.silver.testing
import java.nio.file._
import collection.mutable
import org.scalatest.{ConfigMap, BeforeAndAfterAll}
import viper.silver.verifier._
import viper.silver.ast.{TranslatedPosition, SourcePosition}
import viper.silver.frontend.Frontend
/** A test suite for verification toolchains that use SIL. */
abstract class SilSuite extends AnnotationBasedTestSuite with BeforeAndAfterAll {
/** The list of verifiers to be used. Should be overridden by a lazy val
* if the verifiers need to access the config map provided by ScalaTest.
*/
def verifiers: Seq[Verifier]
/** The frontend to be used. */
def frontend(verifier: Verifier, files: Seq[Path]): Frontend
/** The list of projects under the test.
*
* Silver is always included - this means that e.g. IgnoreFile annotations
* for Silver project will cause the file to be ignored by all verifiers
*/
def projectInfo: ProjectInfo = new ProjectInfo(List("Silver"))
/** Populated by splitting the (key, values) in `configMap` (which is
* expected to be non-null) into (prefix, actual key, value) triples such
* that each prefix maps to a map from actual keys to values. A colon (':')
* is used as the split point for splitting a key into (prefix, actual key)
* pairs. If not prefix (colon) is given, `defaultKeyPrefix` is used as the
* prefix. Each key in `configMap` may have at least one colon.
*/
lazy val prefixSpecificConfigMap: Map[String, Map[String, Any]] =
splitConfigMap(configMap)
/** Invoked by ScalaTest before any test of the current suite is run.
* Starts all verifiers specified by `verifiers`.
*
* @param configMap The config map provided by ScalaTest.
*/
override def beforeAll(configMap: ConfigMap) {
this.configMap = configMap
verifiers foreach (_.start())
}
/** Invoked by ScalaTest after all tests of the current suite have been run.
* Stops all verifiers specified by `verifiers`.
*/
override def afterAll() {
verifiers foreach (_.stop())
}
def systemsUnderTest: Seq[SystemUnderTest] =
verifiers.map(VerifierUnderTest)
val defaultKeyPrefix = ""
/** See description of `prefixSpecificConfigMap`.
*
* @param configMap The config map provided by ScalaTest.
* @return A map mapping key prefixes to (key, value) pairs.
*/
protected def splitConfigMap(configMap: Map[String, Any]): Map[String, Map[String, Any]] = {
val prefixSpecificConfigMap = mutable.HashMap[String, mutable.HashMap[String, Any]]()
configMap foreach {
case (potentialKey, value) =>
val (prefix, key) =
potentialKey.split(':') match {
case Array(_key) => (defaultKeyPrefix, _key)
case Array(_prefix, _key) => (_prefix, _key)
case _ => sys.error(s"Unexpected key $potentialKey in config map $configMap. Keys are expected to contain at most one colon (':').")
}
prefixSpecificConfigMap.getOrElseUpdate(prefix, mutable.HashMap()).update(key, value)
}
prefixSpecificConfigMap.mapValues(_.toMap).toMap
}
private case class VerifierUnderTest(verifier: Verifier)
extends SystemUnderTest with TimingUtils {
val projectInfo: ProjectInfo = SilSuite.this.projectInfo.update(verifier.name)
def run(input: AnnotatedTestInput): Seq[AbstractOutput] = {
val fe = frontend(verifier, input.files)
val tPhases = fe.phases.map { p =>
time(p.action)._2 + " (" + p.name + ")"
}.mkString(", ")
info(s"Verifier used: ${verifier.name} ${verifier.version}.")
info(s"Time required: $tPhases.")
val actualErrors = fe.result match {
case Success => Nil
case Failure(es) => es
}
actualErrors.map(SilOutput)
}
}
}
/**
* Simple adapter for outputs produced by the SIL toolchain, i.e.,
* [[viper.silver.verifier.AbstractError]]s.
*
* The advantage is that it allows [[viper.silver.testing.AbstractOutput]]
* to be independent from the SIL AST.
*
* @param error the error produced by the SIL toolchain.
*/
case class SilOutput(error: AbstractError) extends AbstractOutput {
def isSameLine(file: Path, lineNr: Int): Boolean = error.pos match {
case p: SourcePosition => lineNr == p.line
case p: TranslatedPosition => file == p.file && lineNr == p.line
case _ => false
}
def fullId: String = error.fullId
override def toString: String = error.toString
}
trait TimingUtils {
/** Formats a time in milliseconds. */
def formatTime(millis: Long): String = {
if (millis > 1000) "%.2f sec".format(millis * 1.0 / 1000)
else "%s msec".format(millis.toString)
}
/**
* Measures the time it takes to execute `f` and returns the result of `f`
* as well as the required time.
*/
def time[T](f: () => T): (T, String) = {
val start = System.currentTimeMillis()
val r = f.apply()
(r, formatTime(System.currentTimeMillis() - start))
}
}
| sccblom/vercors | viper/silver/src/main/scala/viper/silver/testing/SilSuite.scala | Scala | mpl-2.0 | 5,167 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.metadata.{Partition => HivePartition}
import org.apache.hadoop.hive.ql.plan.TableDesc
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.hadoop.hive.serde2.objectinspector._
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.CastSupport
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.client.HiveClientImpl
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{BooleanType, DataType}
import org.apache.spark.util.Utils
/**
* The Hive table scan operator. Column and partition pruning are both handled.
*
* @param requestedAttributes Attributes to be fetched from the Hive table.
* @param relation The Hive table be scanned.
* @param partitionPruningPred An optional partition pruning predicate for partitioned table.
*/
private[hive]
case class HiveTableScanExec(
requestedAttributes: Seq[Attribute],
relation: HiveTableRelation,
partitionPruningPred: Seq[Expression])(
@transient private val sparkSession: SparkSession)
extends LeafExecNode with CastSupport {
require(partitionPruningPred.isEmpty || relation.isPartitioned,
"Partition pruning predicates only supported for partitioned tables.")
override def conf: SQLConf = sparkSession.sessionState.conf
override def nodeName: String = s"Scan hive ${relation.tableMeta.qualifiedName}"
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def producedAttributes: AttributeSet = outputSet ++
AttributeSet(partitionPruningPred.flatMap(_.references))
private val originalAttributes = AttributeMap(relation.output.map(a => a -> a))
override val output: Seq[Attribute] = {
// Retrieve the original attributes based on expression ID so that capitalization matches.
requestedAttributes.map(originalAttributes)
}
// Bind all partition key attribute references in the partition pruning predicate for later
// evaluation.
private lazy val boundPruningPred = partitionPruningPred.reduceLeftOption(And).map { pred =>
require(pred.dataType == BooleanType,
s"Data type of predicate $pred must be ${BooleanType.catalogString} rather than " +
s"${pred.dataType.catalogString}.")
BindReferences.bindReference(pred, relation.partitionCols)
}
@transient private lazy val hiveQlTable = HiveClientImpl.toHiveTable(relation.tableMeta)
@transient private lazy val tableDesc = new TableDesc(
hiveQlTable.getInputFormatClass,
hiveQlTable.getOutputFormatClass,
hiveQlTable.getMetadata)
// Create a local copy of hadoopConf,so that scan specific modifications should not impact
// other queries
@transient private lazy val hadoopConf = {
val c = sparkSession.sessionState.newHadoopConf()
// append columns ids and names before broadcast
addColumnMetadataToConf(c)
c
}
@transient private lazy val hadoopReader = new HadoopTableReader(
output,
relation.partitionCols,
tableDesc,
sparkSession,
hadoopConf)
private def castFromString(value: String, dataType: DataType) = {
cast(Literal(value), dataType).eval(null)
}
private def addColumnMetadataToConf(hiveConf: Configuration): Unit = {
// Specifies needed column IDs for those non-partitioning columns.
val columnOrdinals = AttributeMap(relation.dataCols.zipWithIndex)
val neededColumnIDs = output.flatMap(columnOrdinals.get).map(o => o: Integer)
HiveShim.appendReadColumns(hiveConf, neededColumnIDs, output.map(_.name))
val deserializer = tableDesc.getDeserializerClass.getConstructor().newInstance()
deserializer.initialize(hiveConf, tableDesc.getProperties)
// Specifies types and object inspectors of columns to be scanned.
val structOI = ObjectInspectorUtils
.getStandardObjectInspector(
deserializer.getObjectInspector,
ObjectInspectorCopyOption.JAVA)
.asInstanceOf[StructObjectInspector]
val columnTypeNames = structOI
.getAllStructFieldRefs.asScala
.map(_.getFieldObjectInspector)
.map(TypeInfoUtils.getTypeInfoFromObjectInspector(_).getTypeName)
.mkString(",")
hiveConf.set(serdeConstants.LIST_COLUMN_TYPES, columnTypeNames)
hiveConf.set(serdeConstants.LIST_COLUMNS, relation.dataCols.map(_.name).mkString(","))
}
/**
* Prunes partitions not involve the query plan.
*
* @param partitions All partitions of the relation.
* @return Partitions that are involved in the query plan.
*/
private[hive] def prunePartitions(partitions: Seq[HivePartition]): Seq[HivePartition] = {
boundPruningPred match {
case None => partitions
case Some(shouldKeep) => partitions.filter { part =>
val dataTypes = relation.partitionCols.map(_.dataType)
val castedValues = part.getValues.asScala.zip(dataTypes)
.map { case (value, dataType) => castFromString(value, dataType) }
// Only partitioned values are needed here, since the predicate has already been bound to
// partition key attribute references.
val row = InternalRow.fromSeq(castedValues.toSeq)
shouldKeep.eval(row).asInstanceOf[Boolean]
}
}
}
@transient lazy val prunedPartitions: Seq[HivePartition] = {
if (relation.prunedPartitions.nonEmpty) {
val hivePartitions =
relation.prunedPartitions.get.map(HiveClientImpl.toHivePartition(_, hiveQlTable))
if (partitionPruningPred.forall(!ExecSubqueryExpression.hasSubquery(_))) {
hivePartitions
} else {
prunePartitions(hivePartitions)
}
} else {
if (sparkSession.sessionState.conf.metastorePartitionPruning &&
partitionPruningPred.nonEmpty) {
rawPartitions
} else {
prunePartitions(rawPartitions)
}
}
}
// exposed for tests
@transient lazy val rawPartitions: Seq[HivePartition] = {
val prunedPartitions =
if (sparkSession.sessionState.conf.metastorePartitionPruning &&
partitionPruningPred.nonEmpty) {
// Retrieve the original attributes based on expression ID so that capitalization matches.
val normalizedFilters = partitionPruningPred.map(_.transform {
case a: AttributeReference => originalAttributes(a)
})
sparkSession.sessionState.catalog
.listPartitionsByFilter(relation.tableMeta.identifier, normalizedFilters)
} else {
sparkSession.sessionState.catalog.listPartitions(relation.tableMeta.identifier)
}
prunedPartitions.map(HiveClientImpl.toHivePartition(_, hiveQlTable))
}
protected override def doExecute(): RDD[InternalRow] = {
// Using dummyCallSite, as getCallSite can turn out to be expensive with
// multiple partitions.
val rdd = if (!relation.isPartitioned) {
Utils.withDummyCallSite(sqlContext.sparkContext) {
hadoopReader.makeRDDForTable(hiveQlTable)
}
} else {
Utils.withDummyCallSite(sqlContext.sparkContext) {
hadoopReader.makeRDDForPartitionedTable(prunedPartitions)
}
}
val numOutputRows = longMetric("numOutputRows")
// Avoid to serialize MetastoreRelation because schema is lazy. (see SPARK-15649)
val outputSchema = schema
rdd.mapPartitionsWithIndexInternal { (index, iter) =>
val proj = UnsafeProjection.create(outputSchema)
proj.initialize(index)
iter.map { r =>
numOutputRows += 1
proj(r)
}
}
}
override def doCanonicalize(): HiveTableScanExec = {
val input: AttributeSeq = relation.output
HiveTableScanExec(
requestedAttributes.map(QueryPlan.normalizeExpressions(_, input)),
relation.canonicalized.asInstanceOf[HiveTableRelation],
QueryPlan.normalizePredicates(partitionPruningPred, input))(sparkSession)
}
override def otherCopyArgs: Seq[AnyRef] = Seq(sparkSession)
}
| witgo/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveTableScanExec.scala | Scala | apache-2.0 | 9,360 |
package time
import jeqa.types.Sentence
/**
* @author K.Sakamoto
*/
object TimeSorter {
def sort(input: Seq[Sentence]): Seq[Sentence] = {
for (d <- {
for (data <- input) yield {
val time: TimeTmp = new TimeTmp(
if (Option(data.getBeginTime).nonEmpty) {Option(data.getBeginTime.getYear)} else None,
if (Option(data.getEndTime).nonEmpty) {Option(data.getEndTime.getYear)} else None,
Nil,
Nil
)
val m: Int = time.beginTime match {
case Some(b) =>
time.endTime match {
case Some(e) => (b + e) / 2
case None => b
}
case None =>
time.endTime match {
case Some(e) => e
case None => 0
}
}
(m, data)
}}.sortWith((s1, s2) => s1._1 < s2._1)) yield {
d._2
}
}
}
| ktr-skmt/FelisCatusZero | src/main/scala/time/TimeSorter.scala | Scala | apache-2.0 | 889 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import monix.tail.batches._
import org.scalacheck.Arbitrary
import scala.reflect.ClassTag
abstract class BatchCursorSuite[A : ClassTag](implicit
arbA: Arbitrary[A],
arbAtoA: Arbitrary[A => A],
arbAtoBoolean: Arbitrary[A => Boolean])
extends BaseTestSuite {
type Cursor <: BatchCursor[A]
def fromList(list: List[A]): Cursor
test("cursor.toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.toList == list
}
}
test("cursor.toArray") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.toArray.toList == list
}
}
test("cursor.drop(2).toArray") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(2).toArray.toList == list.drop(2)
}
}
test("cursor.take(2).toArray") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.take(2).toArray.toList == list.take(2)
}
}
test("cursor.toBatch") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.toBatch.cursor().toList == list
}
}
test("cursor.drop(2).toBatch") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(2).toBatch.cursor().toList == list.drop(2)
}
}
test("cursor.take(2).toBatch") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.take(2).toBatch.cursor().toList == list.take(2)
}
}
test("cursor.drop(5).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(5).toList == list.drop(5)
}
}
test("cursor.drop(1000).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(1000).toList == list.drop(1000)
}
}
test("cursor.take(5).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(5).toList == list.drop(5)
}
}
test("cursor.take(1000).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(1000).toList == list.drop(1000)
}
}
test("cursor.take(5).drop(5).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.take(5).drop(5).toList == Nil
}
}
test("cursor.drop(5).take(5).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.drop(5).take(5).toList == list.slice(5, 10)
}
}
test("cursor.slice(5,5).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.slice(5,5).toList == list.slice(5,5)
}
}
test("cursor.slice(5,10).toList") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.slice(5,10).toList == list.slice(5, 10)
}
}
test("cursor.map") { _ =>
check2 { (list: List[A], f: A => A) =>
val cursor = fromList(list)
cursor.map(f).toList == list.map(f)
}
}
test("cursor.filter") { _ =>
check2 { (list: List[A], f: A => Boolean) =>
val cursor = fromList(list)
cursor.filter(f).toList == list.filter(f)
}
}
test("cursor.collect") { _ =>
check3 { (list: List[A], p: A => Boolean, f: A => A) =>
val pf: PartialFunction[A, A] = { case x if p(x) => f(x) }
val cursor = fromList(list)
cursor.collect(pf).toList == list.collect(pf)
}
}
test("cursor.toIterator") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.toIterator.toList == list
}
}
test("cursor.hasNext") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
var seen = 0
while (cursor.hasNext()) {
cursor.next()
seen += 1
}
seen == list.length
}
}
test("cursor.hasNext <=> !cursor.isEmpty") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.hasNext() == !cursor.isEmpty
}
}
test("cursor.hasNext <=> cursor.nonEmpty") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.hasNext() == cursor.nonEmpty
}
}
test("cursor.hasNext == list.nonEmpty") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
list.nonEmpty == cursor.hasNext()
}
}
test("recommendedBatchSize is positive") { _ =>
check1 { (list: List[A]) =>
val cursor = fromList(list)
cursor.recommendedBatchSize > 0
}
}
test("BatchCursor.fromArray") { _ =>
check1 { (array: Array[A]) =>
BatchCursor.fromArray(array).toArray.toSeq == array.toSeq
}
}
}
object GenericCursorSuite extends BatchCursorSuite[Int] {
type Cursor = GenericCursor[Int]
override def fromList(list: List[Int]): Cursor =
new GenericCursor[Int] {
private[this] val iter = list.iterator
def hasNext(): Boolean = iter.hasNext
def next(): Int = iter.next()
def recommendedBatchSize: Int = 10
}
}
object ArrayCursorSuite extends BatchCursorSuite[Int] {
type Cursor = ArrayCursor[Int]
override def fromList(list: List[Int]): Cursor =
BatchCursor.fromArray(list.toArray)
}
object ArraySliceCursorSuite extends BatchCursorSuite[Int] {
type Cursor = ArrayCursor[Int]
override def fromList(list: List[Int]): Cursor = {
val listOf5 = (0 until 5).toList
val fullList = listOf5 ::: list ::: listOf5
BatchCursor.fromArray(fullList.toArray, 5, list.length)
}
}
object IteratorCursorSuite extends BatchCursorSuite[Int] {
type Cursor = BatchCursor[Int]
override def fromList(list: List[Int]): Cursor =
BatchCursor.fromIterator(list.iterator)
}
object BooleansCursorSuite extends BatchCursorSuite[Boolean] {
type Cursor = BooleansCursor
override def fromList(list: List[Boolean]): BooleansCursor =
BatchCursor.booleans(list.toArray)
}
object BytesCursorSuite extends BatchCursorSuite[Byte] {
type Cursor = BytesCursor
override def fromList(list: List[Byte]): BytesCursor =
BatchCursor.bytes(list.toArray)
}
object CharsCursorSuite extends BatchCursorSuite[Char] {
type Cursor = CharsCursor
override def fromList(list: List[Char]): CharsCursor =
BatchCursor.chars(list.toArray)
}
object IntegersCursorSuite extends BatchCursorSuite[Int] {
type Cursor = IntegersCursor
override def fromList(list: List[Int]): IntegersCursor =
BatchCursor.integers(list.toArray)
}
object LongsCursorSuite extends BatchCursorSuite[Long] {
type Cursor = LongsCursor
override def fromList(list: List[Long]): LongsCursor =
BatchCursor.longs(list.toArray)
}
object DoublesCursorSuite extends BatchCursorSuite[Double] {
type Cursor = DoublesCursor
override def fromList(list: List[Double]): DoublesCursor =
BatchCursor.doubles(list.toArray)
}
| Wogan/monix | monix-tail/shared/src/test/scala/monix/tail/BatchCursorSuite.scala | Scala | apache-2.0 | 7,463 |
package util
import core.Astar
import scala.util.Random
/**
* Created by gabriel on 5/4/16.
*/
trait Movable {
type Position = (Int, Int)
def randomPath(map: Map[Position, Boolean], location: Position): List[Position] = {
Astar.search(map, location, randomPosition(map))
}
private def randomPosition(map: Map[Position, Boolean]): Position = {
Random.shuffle(map.filter(p => p._2 == false)).head._1
}
}
| riosgabriel/66Taxis | app/util/Movable.scala | Scala | mit | 431 |
package au.id.cxd.text.count
import au.id.cxd.text.model.LatentSemanticIndex
import breeze.linalg.{DenseMatrix, DenseVector}
import scala.collection.mutable
/**
* Created by cd on 12/1/17.
*/
trait DocumentTermVectoriser {
/**
* count the single query array and create a vector
* that assigns the tfidf term weights for the query into the indices of the term matrix
* that was constructed form the lsi model.
* @param query
* @param lsi
* @return
*/
def countQuery(query:Array[String], lsi:LatentSemanticIndex):DenseVector[Double]
/**
* Compute the count for the sequence of tokens found in each document.
* Each record in the outer sequence is considered a document.
* Each inner sequence is considered the collection of tokens within the document.
*
* @param data
* @return (termIndexMap, TF-IDF Matrix)
*
* (mutable.Map[Int, (String, Int)], DenseMatrix[Double])
*
* The return is term index map that indicates which column each term is mapped to.
* The map key contains the index of the column and the value corresponds to the term and its hashcode
*
* (columnIndex x (Term x Hashcode))
*
* The second item in the tuple is the TF-IDF matrix. Each row represents a document, each column contains the TF-IDF for the
* corresponding term within the document.
*
*/
def count(data: Seq[Array[String]]): (mutable.Map[Int, (String, Int, Int)], DenseMatrix[Double])
}
| cxd/scala-au.id.cxd.math | math/src/main/scala/au/id/cxd/text/count/DocumentTermVectoriser.scala | Scala | mit | 1,480 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.csv
import java.net.URI
import java.nio.charset.{Charset, StandardCharsets}
import com.univocity.parsers.csv.CsvParser
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapreduce.Job
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.spark.TaskContext
import org.apache.spark.input.{PortableDataStream, StreamInputFormat}
import org.apache.spark.rdd.{BinaryFileRDD, RDD}
import org.apache.spark.sql.{Dataset, Encoders, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.text.TextFileFormat
import org.apache.spark.sql.types.StructType
/**
* Common functions for parsing CSV files
*/
abstract class CSVDataSource extends Serializable {
def isSplitable: Boolean
/**
* Parse a [[PartitionedFile]] into [[InternalRow]] instances.
*/
def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow]
/**
* Infers the schema from `inputPaths` files.
*/
final def inferSchema(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): Option[StructType] = {
if (inputPaths.nonEmpty) {
Some(infer(sparkSession, inputPaths, parsedOptions))
} else {
None
}
}
protected def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType
/**
* Generates a header from the given row which is null-safe and duplicate-safe.
*/
protected def makeSafeHeader(
row: Array[String],
caseSensitive: Boolean,
options: CSVOptions): Array[String] = {
if (options.headerFlag) {
val duplicates = {
val headerNames = row.filter(_ != null)
.map(name => if (caseSensitive) name else name.toLowerCase)
headerNames.diff(headerNames.distinct).distinct
}
row.zipWithIndex.map { case (value, index) =>
if (value == null || value.isEmpty || value == options.nullValue) {
// When there are empty strings or the values set in `nullValue`, put the
// index as the suffix.
s"_c$index"
} else if (!caseSensitive && duplicates.contains(value.toLowerCase)) {
// When there are case-insensitive duplicates, put the index as the suffix.
s"$value$index"
} else if (duplicates.contains(value)) {
// When there are duplicates, put the index as the suffix.
s"$value$index"
} else {
value
}
}
} else {
row.zipWithIndex.map { case (_, index) =>
// Uses default column names, "_c#" where # is its position of fields
// when header option is disabled.
s"_c$index"
}
}
}
}
object CSVDataSource {
def apply(options: CSVOptions): CSVDataSource = {
if (options.multiLine) {
MultiLineCSVDataSource
} else {
TextInputCSVDataSource
}
}
}
object TextInputCSVDataSource extends CSVDataSource {
override val isSplitable: Boolean = true
override def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow] = {
val lines = {
val linesReader = new HadoopFileLinesReader(file, conf)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => linesReader.close()))
linesReader.map { line =>
new String(line.getBytes, 0, line.getLength, parser.options.charset)
}
}
val shouldDropHeader = parser.options.headerFlag && file.start == 0
UnivocityParser.parseIterator(lines, shouldDropHeader, parser, schema)
}
override def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType = {
val csv = createBaseDataset(sparkSession, inputPaths, parsedOptions)
val maybeFirstLine = CSVUtils.filterCommentAndEmpty(csv, parsedOptions).take(1).headOption
inferFromDataset(sparkSession, csv, maybeFirstLine, parsedOptions)
}
/**
* Infers the schema from `Dataset` that stores CSV string records.
*/
def inferFromDataset(
sparkSession: SparkSession,
csv: Dataset[String],
maybeFirstLine: Option[String],
parsedOptions: CSVOptions): StructType = maybeFirstLine match {
case Some(firstLine) =>
val firstRow = new CsvParser(parsedOptions.asParserSettings).parseLine(firstLine)
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
val header = makeSafeHeader(firstRow, caseSensitive, parsedOptions)
val sampled: Dataset[String] = CSVUtils.sample(csv, parsedOptions)
val tokenRDD = sampled.rdd.mapPartitions { iter =>
val filteredLines = CSVUtils.filterCommentAndEmpty(iter, parsedOptions)
val linesWithoutHeader =
CSVUtils.filterHeaderLine(filteredLines, firstLine, parsedOptions)
val parser = new CsvParser(parsedOptions.asParserSettings)
linesWithoutHeader.map(parser.parseLine)
}
CSVInferSchema.infer(tokenRDD, header, parsedOptions)
case None =>
// If the first line could not be read, just return the empty schema.
StructType(Nil)
}
private def createBaseDataset(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
options: CSVOptions): Dataset[String] = {
val paths = inputPaths.map(_.getPath.toString)
if (Charset.forName(options.charset) == StandardCharsets.UTF_8) {
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
className = classOf[TextFileFormat].getName
).resolveRelation(checkFilesExist = false))
.select("value").as[String](Encoders.STRING)
} else {
val charset = options.charset
val rdd = sparkSession.sparkContext
.hadoopFile[LongWritable, Text, TextInputFormat](paths.mkString(","))
.mapPartitions(_.map(pair => new String(pair._2.getBytes, 0, pair._2.getLength, charset)))
sparkSession.createDataset(rdd)(Encoders.STRING)
}
}
}
object MultiLineCSVDataSource extends CSVDataSource {
override val isSplitable: Boolean = false
override def readFile(
conf: Configuration,
file: PartitionedFile,
parser: UnivocityParser,
schema: StructType): Iterator[InternalRow] = {
UnivocityParser.parseStream(
CodecStreams.createInputStreamWithCloseResource(conf, new Path(new URI(file.filePath))),
parser.options.headerFlag,
parser,
schema)
}
override def infer(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
parsedOptions: CSVOptions): StructType = {
val csv = createBaseRdd(sparkSession, inputPaths, parsedOptions)
csv.flatMap { lines =>
val path = new Path(lines.getPath())
UnivocityParser.tokenizeStream(
CodecStreams.createInputStreamWithCloseResource(lines.getConfiguration, path),
shouldDropHeader = false,
new CsvParser(parsedOptions.asParserSettings))
}.take(1).headOption match {
case Some(firstRow) =>
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
val header = makeSafeHeader(firstRow, caseSensitive, parsedOptions)
val tokenRDD = csv.flatMap { lines =>
UnivocityParser.tokenizeStream(
CodecStreams.createInputStreamWithCloseResource(
lines.getConfiguration,
new Path(lines.getPath())),
parsedOptions.headerFlag,
new CsvParser(parsedOptions.asParserSettings))
}
val sampled = CSVUtils.sample(tokenRDD, parsedOptions)
CSVInferSchema.infer(sampled, header, parsedOptions)
case None =>
// If the first row could not be read, just return the empty schema.
StructType(Nil)
}
}
private def createBaseRdd(
sparkSession: SparkSession,
inputPaths: Seq[FileStatus],
options: CSVOptions): RDD[PortableDataStream] = {
val paths = inputPaths.map(_.getPath)
val name = paths.mkString(",")
val job = Job.getInstance(sparkSession.sessionState.newHadoopConf())
FileInputFormat.setInputPaths(job, paths: _*)
val conf = job.getConfiguration
val rdd = new BinaryFileRDD(
sparkSession.sparkContext,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
conf,
sparkSession.sparkContext.defaultMinPartitions)
// Only returns `PortableDataStream`s without paths.
rdd.setName(s"CSVFile: $name").values
}
}
| joseph-torres/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/csv/CSVDataSource.scala | Scala | apache-2.0 | 9,696 |
package com.sfxcode.nosql.mongo
import org.bson.conversions.Bson
import org.mongodb.scala.model.Sorts.{ascending, descending, orderBy}
object Sort extends Sort
trait Sort {
def sortByKey(key: String, sortAscending: Boolean = true): Bson =
if (sortAscending)
orderBy(ascending(key))
else
orderBy(descending(key))
}
| sfxcode/simple-mongo | src/main/scala/com/sfxcode/nosql/mongo/Sort.scala | Scala | apache-2.0 | 341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io.{ObjectInputStream, ObjectOutputStream}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler.AccumulableInfo
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{AccumulatorV2, Utils}
// ==============================================================================================
// NOTE: new task end reasons MUST be accompanied with serialization logic in util.JsonProtocol!
// ==============================================================================================
/**
* :: DeveloperApi ::
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
* tasks several times for "ephemeral" failures, and only report back failures that require some
* old stages to be resubmitted, such as shuffle map fetch failures.
*/
@DeveloperApi
sealed trait TaskEndReason
/**
* :: DeveloperApi ::
* Task succeeded.
*/
@DeveloperApi
case object Success extends TaskEndReason
/**
* :: DeveloperApi ::
* Various possible reasons why a task failed.
*/
@DeveloperApi
sealed trait TaskFailedReason extends TaskEndReason {
/** Error message displayed in the web UI. */
def toErrorString: String
/**
* Whether this task failure should be counted towards the maximum number of times the task is
* allowed to fail before the stage is aborted. Set to false in cases where the task's failure
* was unrelated to the task; for example, if the task failed because the executor it was running
* on was killed.
*/
def countTowardsTaskFailures: Boolean = true
}
/**
* :: DeveloperApi ::
* A `org.apache.spark.scheduler.ShuffleMapTask` that completed successfully earlier, but we
* lost the executor before the stage completed. This means Spark needs to reschedule the task
* to be re-executed on a different executor.
*/
@DeveloperApi
case object Resubmitted extends TaskFailedReason {
override def toErrorString: String = "Resubmitted (resubmitted due to lost executor)"
}
/**
* :: DeveloperApi ::
* Task failed to fetch shuffle data from a remote node. Probably means we have lost the remote
* executors the task is trying to fetch from, and thus need to rerun the previous stage.
*/
@DeveloperApi
case class FetchFailed(
bmAddress: BlockManagerId, // Note that bmAddress can be null
shuffleId: Int,
mapId: Int,
reduceId: Int,
message: String)
extends TaskFailedReason {
override def toErrorString: String = {
val bmAddressString = if (bmAddress == null) "null" else bmAddress.toString
s"FetchFailed($bmAddressString, shuffleId=$shuffleId, mapId=$mapId, reduceId=$reduceId, " +
s"message=\\n$message\\n)"
}
/**
* Fetch failures lead to a different failure handling path: (1) we don't abort the stage after
* 4 task failures, instead we immediately go back to the stage which generated the map output,
* and regenerate the missing data. (2) we don't count fetch failures for blacklisting, since
* presumably its not the fault of the executor where the task ran, but the executor which
* stored the data. This is especially important because we might rack up a bunch of
* fetch-failures in rapid succession, on all nodes of the cluster, due to one bad node.
*/
override def countTowardsTaskFailures: Boolean = false
}
/**
* :: DeveloperApi ::
* Task failed due to a runtime exception. This is the most common failure case and also captures
* user program exceptions.
*
* `stackTrace` contains the stack trace of the exception itself. It still exists for backward
* compatibility. It's better to use `this(e: Throwable, metrics: Option[TaskMetrics])` to
* create `ExceptionFailure` as it will handle the backward compatibility properly.
*
* `fullStackTrace` is a better representation of the stack trace because it contains the whole
* stack trace including the exception and its causes
*
* `exception` is the actual exception that caused the task to fail. It may be `None` in
* the case that the exception is not in fact serializable. If a task fails more than
* once (due to retries), `exception` is that one that caused the last failure.
*/
@DeveloperApi
case class ExceptionFailure(
className: String,
description: String,
stackTrace: Array[StackTraceElement],
fullStackTrace: String,
private val exceptionWrapper: Option[ThrowableSerializationWrapper],
accumUpdates: Seq[AccumulableInfo] = Seq.empty,
private[spark] var accums: Seq[AccumulatorV2[_, _]] = Nil)
extends TaskFailedReason {
/**
* `preserveCause` is used to keep the exception itself so it is available to the
* driver. This may be set to `false` in the event that the exception is not in fact
* serializable.
*/
private[spark] def this(
e: Throwable,
accumUpdates: Seq[AccumulableInfo],
preserveCause: Boolean) {
this(e.getClass.getName, e.getMessage, e.getStackTrace, Utils.exceptionString(e),
if (preserveCause) Some(new ThrowableSerializationWrapper(e)) else None, accumUpdates)
}
private[spark] def this(e: Throwable, accumUpdates: Seq[AccumulableInfo]) {
this(e, accumUpdates, preserveCause = true)
}
private[spark] def withAccums(accums: Seq[AccumulatorV2[_, _]]): ExceptionFailure = {
this.accums = accums
this
}
def exception: Option[Throwable] = exceptionWrapper.flatMap(w => Option(w.exception))
override def toErrorString: String =
if (fullStackTrace == null) {
// fullStackTrace is added in 1.2.0
// If fullStackTrace is null, use the old error string for backward compatibility
exceptionString(className, description, stackTrace)
} else {
fullStackTrace
}
/**
* Return a nice string representation of the exception, including the stack trace.
* Note: It does not include the exception's causes, and is only used for backward compatibility.
*/
private def exceptionString(
className: String,
description: String,
stackTrace: Array[StackTraceElement]): String = {
val desc = if (description == null) "" else description
val st = if (stackTrace == null) "" else stackTrace.map(" " + _).mkString("\\n")
s"$className: $desc\\n$st"
}
}
/**
* A class for recovering from exceptions when deserializing a Throwable that was
* thrown in user task code. If the Throwable cannot be deserialized it will be null,
* but the stacktrace and message will be preserved correctly in SparkException.
*/
private[spark] class ThrowableSerializationWrapper(var exception: Throwable) extends
Serializable with Logging {
private def writeObject(out: ObjectOutputStream): Unit = {
out.writeObject(exception)
}
private def readObject(in: ObjectInputStream): Unit = {
try {
exception = in.readObject().asInstanceOf[Throwable]
} catch {
case e : Exception => log.warn("Task exception could not be deserialized", e)
}
}
}
/**
* :: DeveloperApi ::
* The task finished successfully, but the result was lost from the executor's block manager before
* it was fetched.
*/
@DeveloperApi
case object TaskResultLost extends TaskFailedReason {
override def toErrorString: String = "TaskResultLost (result lost from block manager)"
}
/**
* :: DeveloperApi ::
* Task was killed intentionally and needs to be rescheduled.
*/
@DeveloperApi
case object TaskKilled extends TaskFailedReason {
override def toErrorString: String = "TaskKilled (killed intentionally)"
override def countTowardsTaskFailures: Boolean = false
}
/**
* :: DeveloperApi ::
* Task requested the driver to commit, but was denied.
*/
@DeveloperApi
case class TaskCommitDenied(
jobID: Int,
partitionID: Int,
attemptNumber: Int) extends TaskFailedReason {
override def toErrorString: String = s"TaskCommitDenied (Driver denied task commit)" +
s" for job: $jobID, partition: $partitionID, attemptNumber: $attemptNumber"
/**
* If a task failed because its attempt to commit was denied, do not count this failure
* towards failing the stage. This is intended to prevent spurious stage failures in cases
* where many speculative tasks are launched and denied to commit.
*/
override def countTowardsTaskFailures: Boolean = false
}
/**
* :: DeveloperApi ::
* The task failed because the executor that it was running on was lost. This may happen because
* the task crashed the JVM.
*/
@DeveloperApi
case class ExecutorLostFailure(
execId: String,
exitCausedByApp: Boolean = true,
reason: Option[String]) extends TaskFailedReason {
override def toErrorString: String = {
val exitBehavior = if (exitCausedByApp) {
"caused by one of the running tasks"
} else {
"unrelated to the running tasks"
}
s"ExecutorLostFailure (executor ${execId} exited ${exitBehavior})" +
reason.map { r => s" Reason: $r" }.getOrElse("")
}
override def countTowardsTaskFailures: Boolean = exitCausedByApp
}
/**
* :: DeveloperApi ::
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
* deserializing the task result.
*/
@DeveloperApi
case object UnknownReason extends TaskFailedReason {
override def toErrorString: String = "UnknownReason"
}
| sachintyagi22/spark | core/src/main/scala/org/apache/spark/TaskEndReason.scala | Scala | apache-2.0 | 10,095 |
/**
* sbt-dependency-manager - fetch and merge byte code and source code jars, align broken sources within jars.
* For example, it is allow easy source code lookup for IDE while developing SBT plugins (not only).
*
* Copyright (c) 2012-2013 Alexey Aksenov [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sbt.dependency.manager
import java.io.BufferedOutputStream
import java.io.ByteArrayOutputStream
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.PrintWriter
import java.util.jar.JarInputStream
import java.util.jar.JarOutputStream
import java.util.zip.ZipEntry
import java.util.zip.ZipException
import scala.Option.option2Iterable
import scala.collection.mutable.HashSet
import sbt._
import sbt.Keys._
import sbt.dependency.manager.Keys._
import xsbti.AppConfiguration
/**
* sbt-dependency-manager plugin entry
*/
object Plugin extends sbt.Plugin {
def logPrefix(name: String) = "[Dep manager:%s] ".format(name)
lazy val defaultSettings = inConfig(Keys.DependencyConf)(Seq(
dependencyEnableCustom := true,
dependencyBundlePath <<= (target, normalizedName) map { (target, name) => target / (name + "-development-bundle.jar") },
dependencyClasspathFilter <<= (dependencyLookupClasspathTask) map (cp =>
cp.flatMap(_.get(moduleID.key)).foldLeft(moduleFilter(NothingFilter, NothingFilter, NothingFilter))((acc, m) => acc |
moduleFilter(GlobFilter(m.organization), GlobFilter(m.name), GlobFilter(m.revision)))),
dependencyFilter <<= dependencyClasspathFilter map (dcf => Some(dcf -
moduleFilter(organization = GlobFilter("org.scala-lang"), name = GlobFilter("scala-library")))),
dependencyIgnoreConfiguration := true,
dependencyLookupClasspath <<= dependencyLookupClasspathTask,
dependencyPath <<= (target in LocalRootProject) { _ / "deps" },
dependencyResourceFilter := resourceFilter,
dependencySkipResolved := true,
// add the empty classifier ""
transitiveClassifiers in Global :== Seq("", Artifact.SourceClassifier, Artifact.DocClassifier))) ++
// global settings
Seq(
dependencyTaskBundle <<= dependencyTaskBundleTask,
dependencyTaskBundleWithArtifact <<= dependencyTaskBundleWithArtifactTask,
dependencyTaskFetch <<= dependencyTaskFetchTask,
dependencyTaskFetchAlign <<= dependencyTaskFetchAlignTask,
dependencyTaskFetchWithSources <<= dependencyTaskFetchWithSourcesTask)
/** Implementation of dependency-bundle */
def dependencyTaskBundleTask =
(classifiersModule in updateSbtClassifiers, dependencyBundlePath in DependencyConf, dependencyFilter in DependencyConf,
dependencyLookupClasspath in DependencyConf, ivySbt, streams, state, thisProjectRef) map { (origClassifiersModule, pathBundle,
dependencyFilter, dependencyClasspath, ivySbt, streams, state, thisProjectRef) =>
val extracted: Extracted = Project.extract(state)
val thisScope = Load.projectScope(thisProjectRef).copy(config = Select(DependencyConf))
if ((dependencyEnable in thisScope get extracted.structure.data) getOrElse true) {
val name = (sbt.Keys.name in thisScope get extracted.structure.data) getOrElse thisProjectRef.project
streams.log.info(logPrefix(name) + "Fetch dependencies and align to bundle")
val result = for {
appConfiguration <- appConfiguration in thisScope get extracted.structure.data
ivyLoggingLevel <- ivyLoggingLevel in thisScope get extracted.structure.data
ivyScala <- ivyScala in thisScope get extracted.structure.data
pathTarget <- target in thisScope get extracted.structure.data
updateConfiguration <- updateConfiguration in thisScope get extracted.structure.data
dependencyEnableCustom <- dependencyEnableCustom in thisScope get extracted.structure.data
dependencyIgnoreConfiguration <- dependencyIgnoreConfiguration in thisScope get extracted.structure.data
dependencyPath <- dependencyPath in thisScope get extracted.structure.data
dependencyResourceFilter <- dependencyResourceFilter in thisScope get extracted.structure.data
dependencySkipResolved <- dependencySkipResolved in thisScope get extracted.structure.data
libraryDependenciesCompile <- libraryDependencies in thisScope in Compile get extracted.structure.data
libraryDependenciesTest <- libraryDependencies in thisScope in Test get extracted.structure.data
} yield {
val libraryDependencies = (libraryDependenciesCompile ++ libraryDependenciesTest).distinct
val argument = TaskArgument(appConfiguration, ivyLoggingLevel, ivySbt, ivyScala, libraryDependencies, name,
origClassifiersModule, new UpdateConfiguration(updateConfiguration.retrieve, true, ivyLoggingLevel),
pathBundle, dependencyPath, pathTarget, streams, dependencyEnableCustom, None, true, dependencyClasspath,
dependencyFilter, dependencyIgnoreConfiguration, dependencyResourceFilter, dependencySkipResolved)
commonFetchTask(argument, doFetchWithSources)
}
result.get
() // Returns Unit. Return type isn't defined explicitly because it is different for different SBT versions.
}
}
/** Implementation of dependency-bundle-with-artifact */
def dependencyTaskBundleWithArtifactTask =
(classifiersModule in updateSbtClassifiers, dependencyBundlePath in DependencyConf,
dependencyFilter in DependencyConf, dependencyLookupClasspath in DependencyConf,
ivySbt, packageBin in Compile, state, streams, thisProjectRef) map { (origClassifiersModule, pathBundle,
dependencyFilter, dependencyClasspath, ivySbt, packageBin, state, streams, thisProjectRef) =>
val extracted: Extracted = Project.extract(state)
val thisScope = Load.projectScope(thisProjectRef).copy(config = Select(DependencyConf))
if ((dependencyEnable in thisScope get extracted.structure.data) getOrElse true) {
val name = (sbt.Keys.name in thisScope get extracted.structure.data) getOrElse thisProjectRef.project
streams.log.info(logPrefix(name) + "Fetch dependencies with artifact and align to bundle")
val result = for {
appConfiguration <- appConfiguration in thisScope get extracted.structure.data
ivyLoggingLevel <- ivyLoggingLevel in thisScope get extracted.structure.data
ivyScala <- ivyScala in thisScope get extracted.structure.data
pathTarget <- target in thisScope get extracted.structure.data
updateConfiguration <- updateConfiguration in thisScope get extracted.structure.data
dependencyEnableCustom <- dependencyEnableCustom in thisScope get extracted.structure.data
dependencyIgnoreConfiguration <- dependencyIgnoreConfiguration in thisScope get extracted.structure.data
dependencyPath <- dependencyPath in thisScope get extracted.structure.data
dependencyResourceFilter <- dependencyResourceFilter in thisScope get extracted.structure.data
dependencySkipResolved <- dependencySkipResolved in thisScope get extracted.structure.data
libraryDependenciesCompile <- libraryDependencies in thisScope in Compile get extracted.structure.data
libraryDependenciesTest <- libraryDependencies in thisScope in Test get extracted.structure.data
} yield {
val libraryDependencies = (libraryDependenciesCompile ++ libraryDependenciesTest).distinct
val argument = TaskArgument(appConfiguration, ivyLoggingLevel, ivySbt, ivyScala, libraryDependencies, name,
origClassifiersModule, new UpdateConfiguration(updateConfiguration.retrieve, true, ivyLoggingLevel),
pathBundle, dependencyPath, pathTarget, streams, dependencyEnableCustom, Some(packageBin), true, dependencyClasspath,
dependencyFilter, dependencyIgnoreConfiguration, dependencyResourceFilter, dependencySkipResolved)
commonFetchTask(argument, doFetchWithSources)
}
result.get
() // Returns Unit. Return type isn't defined explicitly because it is different for different SBT versions.
}
}
/** Implementation of dependency-fetch-align */
def dependencyTaskFetchAlignTask =
(classifiersModule in updateSbtClassifiers, dependencyBundlePath in DependencyConf, dependencyFilter in DependencyConf,
dependencyLookupClasspath in DependencyConf, ivySbt, state, streams, thisProjectRef) map { (origClassifiersModule, pathBundle,
dependencyFilter, dependencyClasspath, ivySbt, state, streams, thisProjectRef) =>
val extracted: Extracted = Project.extract(state)
val thisScope = Load.projectScope(thisProjectRef).copy(config = Select(DependencyConf))
if ((dependencyEnable in thisScope get extracted.structure.data) getOrElse true) {
val name = (sbt.Keys.name in thisScope get extracted.structure.data) getOrElse thisProjectRef.project
streams.log.info(logPrefix(name) + "Fetch dependencies and align")
val result = for {
appConfiguration <- appConfiguration in thisScope get extracted.structure.data
ivyLoggingLevel <- ivyLoggingLevel in thisScope get extracted.structure.data
ivyScala <- ivyScala in thisScope get extracted.structure.data
pathTarget <- target in thisScope get extracted.structure.data
updateConfiguration <- updateConfiguration in thisScope get extracted.structure.data
dependencyEnableCustom <- dependencyEnableCustom in thisScope get extracted.structure.data
dependencyIgnoreConfiguration <- dependencyIgnoreConfiguration in thisScope get extracted.structure.data
dependencyPath <- dependencyPath in thisScope get extracted.structure.data
dependencyResourceFilter <- dependencyResourceFilter in thisScope get extracted.structure.data
dependencySkipResolved <- dependencySkipResolved in thisScope get extracted.structure.data
libraryDependenciesCompile <- libraryDependencies in thisScope in Compile get extracted.structure.data
libraryDependenciesTest <- libraryDependencies in thisScope in Test get extracted.structure.data
} yield {
val libraryDependencies = (libraryDependenciesCompile ++ libraryDependenciesTest).distinct
val argument = TaskArgument(appConfiguration, ivyLoggingLevel, ivySbt, ivyScala, libraryDependencies, name,
origClassifiersModule, new UpdateConfiguration(updateConfiguration.retrieve, true, ivyLoggingLevel),
pathBundle, dependencyPath, pathTarget, streams, dependencyEnableCustom, None, false, dependencyClasspath,
dependencyFilter, dependencyIgnoreConfiguration, dependencyResourceFilter, dependencySkipResolved)
commonFetchTask(argument, doFetchAlign)
}
result.get
() // Returns Unit. Return type isn't defined explicitly because it is different for different SBT versions.
}
}
/** Implementation of dependency-fetch-with-sources */
def dependencyTaskFetchWithSourcesTask =
(classifiersModule in updateSbtClassifiers, dependencyBundlePath in DependencyConf, dependencyFilter in DependencyConf,
dependencyLookupClasspath in DependencyConf, ivySbt, state, streams, thisProjectRef) map { (origClassifiersModule, pathBundle,
dependencyFilter, dependencyClasspath, ivySbt, state, streams, thisProjectRef) =>
val extracted: Extracted = Project.extract(state)
val thisScope = Load.projectScope(thisProjectRef).copy(config = Select(DependencyConf))
if ((dependencyEnable in thisScope get extracted.structure.data) getOrElse true) {
val name = (sbt.Keys.name in thisScope get extracted.structure.data) getOrElse thisProjectRef.project
streams.log.info(logPrefix(name) + "Fetch dependencies with source code")
val result = for {
appConfiguration <- appConfiguration in thisScope get extracted.structure.data
ivyLoggingLevel <- ivyLoggingLevel in thisScope get extracted.structure.data
ivyScala <- ivyScala in thisScope get extracted.structure.data
pathTarget <- target in thisScope get extracted.structure.data
updateConfiguration <- updateConfiguration in thisScope get extracted.structure.data
dependencyEnableCustom <- dependencyEnableCustom in thisScope get extracted.structure.data
dependencyIgnoreConfiguration <- dependencyIgnoreConfiguration in thisScope get extracted.structure.data
dependencyPath <- dependencyPath in thisScope get extracted.structure.data
dependencyResourceFilter <- dependencyResourceFilter in thisScope get extracted.structure.data
dependencySkipResolved <- dependencySkipResolved in thisScope get extracted.structure.data
libraryDependenciesCompile <- libraryDependencies in thisScope in Compile get extracted.structure.data
libraryDependenciesTest <- libraryDependencies in thisScope in Test get extracted.structure.data
} yield {
val libraryDependencies = (libraryDependenciesCompile ++ libraryDependenciesTest).distinct
val argument = TaskArgument(appConfiguration, ivyLoggingLevel, ivySbt, ivyScala, libraryDependencies, name,
origClassifiersModule, new UpdateConfiguration(updateConfiguration.retrieve, true, ivyLoggingLevel),
pathBundle, dependencyPath, pathTarget, streams,
dependencyEnableCustom, None, false, dependencyClasspath,
dependencyFilter, dependencyIgnoreConfiguration, dependencyResourceFilter, dependencySkipResolved)
commonFetchTask(argument, doFetchWithSources)
}
result.get
() // Returns Unit. Return type isn't defined explicitly because it is different for different SBT versions.
}
}
/** Implementation of dependency-fetch */
def dependencyTaskFetchTask =
(classifiersModule in updateSbtClassifiers, dependencyBundlePath in DependencyConf, dependencyFilter in DependencyConf,
dependencyLookupClasspath in DependencyConf, ivySbt, state, streams, thisProjectRef) map {
(origClassifiersModule, pathBundle, dependencyFilter, dependencyClasspath, ivySbt, state, streams, thisProjectRef) =>
val extracted: Extracted = Project.extract(state)
val thisScope = Load.projectScope(thisProjectRef).copy(config = Select(DependencyConf))
if ((dependencyEnable in thisScope get extracted.structure.data) getOrElse true) {
val name = (sbt.Keys.name in thisScope get extracted.structure.data) getOrElse thisProjectRef.project
streams.log.info(logPrefix(name) + "Fetch dependencies")
val result = for {
appConfiguration <- appConfiguration in thisScope get extracted.structure.data
ivyLoggingLevel <- ivyLoggingLevel in thisScope get extracted.structure.data
ivyScala <- ivyScala in thisScope get extracted.structure.data
pathTarget <- target in thisScope get extracted.structure.data
updateConfiguration <- updateConfiguration in thisScope get extracted.structure.data
dependencyEnableCustom <- dependencyEnableCustom in thisScope get extracted.structure.data
dependencyIgnoreConfiguration <- dependencyIgnoreConfiguration in thisScope get extracted.structure.data
dependencyPath <- dependencyPath in thisScope get extracted.structure.data
dependencyResourceFilter <- dependencyResourceFilter in thisScope get extracted.structure.data
dependencySkipResolved <- dependencySkipResolved in thisScope get extracted.structure.data
libraryDependenciesCompile <- libraryDependencies in thisScope in Compile get extracted.structure.data
libraryDependenciesTest <- libraryDependencies in thisScope in Test get extracted.structure.data
} yield {
val libraryDependencies = (libraryDependenciesCompile ++ libraryDependenciesTest).distinct
val argument = TaskArgument(appConfiguration, ivyLoggingLevel, ivySbt, ivyScala, libraryDependencies, name,
origClassifiersModule, new UpdateConfiguration(updateConfiguration.retrieve, true, ivyLoggingLevel),
pathBundle, dependencyPath, pathTarget, streams, dependencyEnableCustom, None, false, dependencyClasspath,
dependencyFilter, dependencyIgnoreConfiguration, dependencyResourceFilter, dependencySkipResolved)
commonFetchTask(argument, doFetch)
}
result.get
() // Returns Unit. Return type isn't defined explicitly because it is different for different SBT versions.
}
}
/**
* Task that returns the union of fullClasspath in Compile and Test configurations
*/
def dependencyLookupClasspathTask =
(externalDependencyClasspath in Compile, externalDependencyClasspath in Test) map ((cpA, cpB) => (cpA ++ cpB).distinct)
/**
* Dependency resource filter
* It drops META-INF/ .SF .DSA .RSA files by default
*/
def resourceFilter(entry: ZipEntry): Boolean =
Seq("META-INF/.*\\\\.SF", "META-INF/.*\\\\.DSA", "META-INF/.*\\\\.RSA").find(entry.getName().toUpperCase().matches).nonEmpty
/** Repack sequence of jar artifacts */
protected def align(arg: TaskArgument, moduleTag: String, code: File, sources: File, targetDirectory: File, resourceFilter: ZipEntry => Boolean, s: TaskStreams,
alignEntries: HashSet[String] = HashSet[String](), output: JarOutputStream = null): Unit = {
if (!targetDirectory.exists())
if (!targetDirectory.mkdirs())
return s.log.error(logPrefix(arg.name) + "Unable to create " + targetDirectory)
val target = new File(targetDirectory, code.getName)
if (output == null) {
s.log.info(logPrefix(arg.name) + "Fetch and align " + moduleTag)
s.log.debug(logPrefix(arg.name) + "Save result to " + target.getAbsoluteFile())
} else
s.log.info(logPrefix(arg.name) + "Fetch and align " + moduleTag + ", target: bundle")
// align
var jarCode: JarInputStream = null
var jarSources: JarInputStream = null
var jarTarget: JarOutputStream = Option(output) getOrElse null
try {
jarCode = new JarInputStream(new FileInputStream(code))
jarSources = new JarInputStream(new FileInputStream(sources))
if (jarTarget == null && output == null) {
if (target.exists())
if (!target.delete()) {
try {
jarCode.close
jarSources.close
} catch {
case e: Throwable =>
}
return s.log.error(logPrefix(arg.name) + "Unable to delete " + target)
}
jarTarget = try {
new JarOutputStream(new BufferedOutputStream(new FileOutputStream(target, true)), jarCode.getManifest())
} catch {
case e: NullPointerException =>
s.log.warn(logPrefix(arg.name) + code + " has broken manifest")
new JarOutputStream(new BufferedOutputStream(new FileOutputStream(target, true)))
}
}
// copy across all entries from the original code jar
copy(arg, alignEntries, jarCode, jarTarget, resourceFilter, s)
// copy across all entries from the original sources jar
copy(arg, alignEntries, jarSources, jarTarget, resourceFilter, s)
} catch {
case e: Throwable =>
s.log.error(logPrefix(arg.name) + "Unable to align: " + e.getClass().getName() + " " + e.getMessage())
} finally {
if (jarTarget != null && output == null) {
jarTarget.flush()
jarTarget.close()
}
if (jarCode != null)
jarCode.close()
if (jarSources != null)
jarSources.close()
}
}
/** Common part for all sbt-dependency-manager tasks */
protected def commonFetchTask(arg: TaskArgument, userFunction: (TaskArgument, Seq[(sbt.ModuleID, File)], Seq[(sbt.ModuleID, File)]) => Unit): UpdateReport =
synchronized {
Classpaths.withExcludes(arg.pathTarget, arg.origClassifiersModule.classifiers, Defaults.lock(arg.appConfiguration)) { excludes =>
import arg.origClassifiersModule.{ id => origClassifiersModuleID, modules => origClassifiersModuleDeps }
if (arg.dependencyBundle)
arg.streams.log.info(logPrefix(arg.name) + "Create bundle " + arg.pathBundle)
// do default update-sbt-classifiers with libDeps
val libDeps = arg.dependencyClasspath.flatMap(_.get(moduleID.key))
val extClassifiersModuleDeps = {
val all = arg.dependencyFilter match {
case Some(filter) => (origClassifiersModuleDeps ++ libDeps).filter(filter)
case None => (origClassifiersModuleDeps ++ libDeps)
}
if (arg.dependencyIgnoreConfiguration)
all.map(_.copy(configurations = None))
else
all
}
// skip dependency that already have explicit artifacts which points to local resources
val extClassifiersModuleDepsFiltered = {
if (arg.dependencySkipResolved)
extClassifiersModuleDeps.filterNot(moduleId =>
arg.libraryDependencies.exists(id =>
id.name == moduleId.name && id.organization == moduleId.organization && id.revision == moduleId.revision &&
id.explicitArtifacts.nonEmpty && id.explicitArtifacts.forall(_.url.map(_.getProtocol()) == Some("file"))))
else
extClassifiersModuleDeps
}
val customConfig = GetClassifiersConfiguration(arg.origClassifiersModule, excludes, arg.updateConfiguration, arg.ivyScala)
val customBaseModuleID = restrictedCopy(origClassifiersModuleID, true).copy(name = origClassifiersModuleID.name + "$sbt")
val customIvySbtModule = new arg.ivySbt.Module(InlineConfiguration(customBaseModuleID, ModuleInfo(customBaseModuleID.name), extClassifiersModuleDepsFiltered).copy(ivyScala = arg.ivyScala))
val customUpdateReport = IvyActions.update(customIvySbtModule, arg.updateConfiguration, arg.streams.log)
val newConfig = customConfig.copy(module = arg.origClassifiersModule.copy(modules = customUpdateReport.allModules))
val updateReport = IvyActions.updateClassifiers(arg.ivySbt, newConfig, arg.streams.log)
// process updateReport
// get all sources
val (sources, other) = updateReport.toSeq.partition {
case (_, _, Artifact(_, _, _, Some(Artifact.SourceClassifier), _, _, _), _) => true
case _ => false
}
val sourceObjects = sources.map { case (configuration, moduleId, artifact, file) => (moduleId, file) }
val codeObjects = other.map {
case (configuration, moduleId, artifact, file) if artifact.classifier == None || artifact.classifier == Some("") =>
Some((moduleId, file))
case _ =>
None
}.flatten
// process all jars
other.sortBy(_._2.toString).foreach { module => arg.streams.log.debug("add " + module._2) }
userFunction(arg, sourceObjects, codeObjects)
// add unprocessed modules
if (arg.dependencyEnableCustom) {
// get all unprocessed dependencies with ModuleID
val unprocessedUnfiltered = arg.dependencyFilter match {
case Some(filter) =>
extClassifiersModuleDeps.filterNot(other.map(_._2).contains).distinct.filter(filter)
case None =>
extClassifiersModuleDeps.filterNot(other.map(_._2).contains).distinct
}
unprocessedUnfiltered.sortBy(_.toString).foreach { module => arg.streams.log.debug("add unprocessed " + module) }
// get all unprocessed dependencies or dependencies without ModuleID
val unprocessed = arg.dependencyClasspath.sortBy(_.toString).map(classpath => classpath.get(moduleID.key) match {
case Some(moduleId) =>
if (unprocessedUnfiltered.contains(moduleId)) {
// lookup for original ModuleIDs with explicit artifacts that points to local file system
val originalModuleID = arg.libraryDependencies.find(id =>
id.name == moduleId.name && id.organization == moduleId.organization && id.revision == moduleId.revision &&
id.explicitArtifacts.nonEmpty && id.explicitArtifacts.forall(_.url.map(_.getProtocol()) == Some("file")))
Some(originalModuleID getOrElse moduleId.name % moduleId.organization % moduleId.revision from classpath.data.toURI().toURL().toString)
} else
None // already processed
case None =>
Some("UNKNOWN" % "UNKNOWN" % "UNKNOWN" from classpath.data.toURI().toURL().toString)
}).flatten
if (arg.dependencyBundle)
unprocessed.foreach {
moduleId =>
val codeArtifact = moduleId.explicitArtifacts.find(_.classifier == None)
val sourceCodeArtifact = moduleId.explicitArtifacts.find(_.classifier == Some(Artifact.SourceClassifier))
(codeArtifact, sourceCodeArtifact) match {
case (Some(Artifact(_, _, _, _, _, Some(codeURL), _)), Some(Artifact(_, _, _, _, _, Some(sourceCodeURL), _))) =>
val code = new File(codeURL.toURI)
val source = new File(sourceCodeURL.toURI)
userFunction(arg, Seq((moduleId, source)), Seq((moduleId, code)))
case (Some(Artifact(_, _, _, _, _, Some(codeURL), _)), _) =>
val code = new File(codeURL.toURI)
arg.streams.log.info(logPrefix(arg.name) + "Fetch custom library " + code.getName())
copyToCodeBundle(arg, code)
copyToSourceBundle(arg, code)
case _ =>
arg.streams.log.error(logPrefix(arg.name) + "Unable to aquire artifacts for module " + moduleId)
}
}
else
unprocessed.foreach {
moduleId =>
val codeArtifact = moduleId.explicitArtifacts.find(_.classifier == None)
val sourceCodeArtifact = moduleId.explicitArtifacts.find(_.classifier == Some(Artifact.SourceClassifier))
(codeArtifact, sourceCodeArtifact) match {
case (Some(Artifact(_, _, _, _, _, Some(codeURL), _)), Some(Artifact(_, _, _, _, _, Some(sourceCodeURL), _))) =>
val code = new File(codeURL.toURI)
val source = new File(sourceCodeURL.toURI)
userFunction(arg, Seq((moduleId, source)), Seq((moduleId, code)))
case (Some(Artifact(_, _, _, _, _, Some(codeURL), _)), _) =>
val code = new File(codeURL.toURI)
arg.streams.log.info(logPrefix(arg.name) + "Fetch custom library " + code.getName())
sbt.IO.copyFile(code, new File(arg.pathDependency, code.getName()), false)
case _ =>
arg.streams.log.error(logPrefix(arg.name) + "Unable to aquire artifacts for module " + moduleId)
}
}
}
if (arg.dependencyBundle) {
// add artifact
arg.dependencyArtifact.foreach(copyToCodeBundle(arg, _))
arg.dependencyArtifact.foreach(copyToSourceBundle(arg, _))
arg.bundleJarCode.flush()
arg.bundleJarCode.close()
arg.bundleJarSource.flush()
arg.bundleJarSource.close()
// create bundle description
val directory = arg.pathBundle.getParentFile()
val file = arg.pathBundle.getName() + ".description"
val descriptionFile = new File(directory, file)
Some(new PrintWriter(descriptionFile)).foreach { writer =>
try {
writer.write(arg.bundleResources.toList.sorted.mkString("\\n"))
} catch {
case e: Throwable =>
arg.streams.log.error(logPrefix(arg.name) + "Unable to create bundle description " + descriptionFile.getAbsolutePath() + " " + e)
} finally {
try { writer.close } catch { case e: Throwable => }
}
}
}
updateReport
}
}
/** Specific part for tasks dependency-fetch-align, dependency-bundle, dependency-bundle-with-artifact */
protected def doFetchAlign(arg: TaskArgument, sourceObjects: Seq[(sbt.ModuleID, File)],
codeObjects: Seq[(sbt.ModuleID, File)]): Unit = codeObjects.foreach {
case (module, codeJar) =>
sourceObjects.find(source => source._1 == module) match {
case Some((_, sourceJar)) =>
if (arg.dependencyBundle) {
align(arg, module.toString, codeJar, sourceJar, arg.pathDependency, resourceFilter, arg.streams, arg.bundleEntries, arg.bundleJarCode)
arg.bundleResources += codeJar.getAbsolutePath()
} else
align(arg, module.toString, codeJar, sourceJar, arg.pathDependency, resourceFilter, arg.streams)
case None =>
arg.streams.log.debug(logPrefix(arg.name) + "Skip align for dependency " + module + " - sources not found ")
if (arg.dependencyBundle) {
arg.streams.log.info(logPrefix(arg.name) + "Fetch " + module + " to bundle without source code")
copyToCodeBundle(arg, codeJar)
} else {
arg.streams.log.info(logPrefix(arg.name) + "Fetch " + module + " without source code")
val codeTarget = new File(arg.pathDependency, codeJar.getName())
arg.streams.log.debug(logPrefix(arg.name) + "Save result to " + codeTarget.getAbsolutePath())
sbt.IO.copyFile(codeJar, codeTarget, false)
}
}
}
/** Specific part for task dependency-fetch-with-sources */
protected def doFetchWithSources(arg: TaskArgument, sourceObjects: Seq[(sbt.ModuleID, File)],
codeObjects: Seq[(sbt.ModuleID, File)]): Unit = codeObjects.foreach {
case (module, codeJar) =>
sourceObjects.find(source => source._1 == module) match {
case Some((_, sourceJar)) =>
if (arg.dependencyBundle) {
arg.streams.log.info(logPrefix(arg.name) + "Fetch with source code " + module + ", target: bundle")
copyToCodeBundle(arg, codeJar)
copyToSourceBundle(arg, sourceJar)
arg.bundleResources += codeJar.getAbsolutePath()
} else {
val codeTarget = new File(arg.pathDependency, codeJar.getName())
val sourceTarget = new File(arg.pathDependency, sourceJar.getName())
arg.streams.log.info(logPrefix(arg.name) + "Fetch with source code " + module)
arg.streams.log.debug(logPrefix(arg.name) + "Save results to " + codeTarget.getParentFile.getAbsolutePath())
sbt.IO.copyFile(codeJar, codeTarget, false)
sbt.IO.copyFile(sourceJar, sourceTarget, false)
}
case None =>
if (arg.dependencyBundle) {
arg.streams.log.info(logPrefix(arg.name) + "Fetch with source code " + module + ", target: bundle")
copyToCodeBundle(arg, codeJar)
} else {
arg.streams.log.info(logPrefix(arg.name) + "Fetch with source code " + module)
val codeTarget = new File(arg.pathDependency, codeJar.getName())
arg.streams.log.debug(logPrefix(arg.name) + "Save results to " + codeTarget.getParentFile.getAbsolutePath())
sbt.IO.copyFile(codeJar, codeTarget, false)
}
}
}
/** Specific part for task dependency-fetch */
protected def doFetch(arg: TaskArgument, sourceObjects: Seq[(sbt.ModuleID, File)],
codeObjects: Seq[(sbt.ModuleID, File)]): Unit = codeObjects.foreach {
case (module, codeJar) =>
sourceObjects.find(source => source._1 == module) match {
case Some((_, sourceJar)) =>
arg.streams.log.info(logPrefix(arg.name) + "Fetch " + module)
val codeTarget = new File(arg.pathDependency, codeJar.getName())
arg.streams.log.debug(logPrefix(arg.name) + "Save result to " + codeTarget.getAbsolutePath())
sbt.IO.copyFile(codeJar, codeTarget, false)
case None =>
arg.streams.log.debug(logPrefix(arg.name) + "Skip " + module)
}
}
/** Repack content of jar artifact */
private def alignScalaSource(arg: TaskArgument, alignEntries: HashSet[String], entry: ZipEntry, content: String, s: TaskStreams): Option[ZipEntry] = {
val searchFor = "/" + entry.getName.takeWhile(_ != '.')
val distance = alignEntries.toSeq.map(path => (path.indexOf(searchFor), path)).filter(_._1 > 1).sortBy(_._1).headOption
distance match {
case Some((idx, entryPath)) =>
val newEntry = new ZipEntry(entryPath.substring(0, idx) + searchFor + ".scala")
s.log.debug(logPrefix(arg.name) + "Align " + entry.getName + " to " + newEntry.getName())
newEntry.setComment(entry.getComment())
newEntry.setCompressedSize(entry.getCompressedSize())
newEntry.setCrc(entry.getCrc())
newEntry.setExtra(entry.getExtra())
newEntry.setMethod(entry.getMethod())
newEntry.setSize(entry.getSize())
newEntry.setTime(entry.getTime())
Some(newEntry)
case None =>
var path = Seq[String]()
val pattern = """\\s*package\\s+([a-z\\\\._$-]+).*""".r
content.split("\\n").foreach {
case pattern(packageName) =>
path = path :+ packageName.replaceAll("\\\\.", "/")
case line =>
}
if (path.nonEmpty) {
val prefix = path.mkString("/") + "/"
alignEntries.toSeq.find(_.startsWith(prefix)) match {
case Some(path) =>
val newEntry = new ZipEntry(prefix + entry.getName())
s.log.debug(logPrefix(arg.name) + "Align " + entry.getName + " to " + newEntry.getName())
newEntry.setComment(entry.getComment())
newEntry.setCompressedSize(entry.getCompressedSize())
newEntry.setCrc(entry.getCrc())
newEntry.setExtra(entry.getExtra())
newEntry.setMethod(entry.getMethod())
newEntry.setSize(entry.getSize())
newEntry.setTime(entry.getTime())
Some(newEntry)
case None =>
s.log.warn(logPrefix(arg.name) + "Failed to align source " + entry.getName())
None
}
} else
None
}
}
/** Copy content of jar artifact */
private def copy(arg: TaskArgument, alignEntries: HashSet[String], in: JarInputStream, out: JarOutputStream, resourceFilter: ZipEntry => Boolean, s: TaskStreams) {
var entry: ZipEntry = null
// copy across all entries from the original code jar
var value: Int = 0
try {
val buffer = new Array[Byte](2048)
entry = in.getNextEntry()
while (entry != null) {
if (alignEntries(entry.getName))
s.log.debug(logPrefix(arg.name) + "Skip, entry already in jar: " + entry.getName())
else if (resourceFilter(entry)) {
s.log.debug(logPrefix(arg.name) + "Skip, filtered " + entry)
} else
try {
alignEntries(entry.getName) = true
val bos = new ByteArrayOutputStream()
value = in.read(buffer)
while (value > 0) {
bos.write(buffer, 0, value)
value = in.read(buffer)
}
val destEntry = new ZipEntry(entry.getName)
out.putNextEntry(destEntry)
out.write(bos.toByteArray())
// adjust root scala sources
if (entry.getName.endsWith(".scala") && entry.getName.indexOf("/") == -1)
alignScalaSource(arg, alignEntries, entry, bos.toString, s).foreach {
entry =>
if (alignEntries(entry.getName))
s.log.debug(logPrefix(arg.name) + "Skip, entry already in jar: " + entry.getName())
else {
out.putNextEntry(entry)
out.write(bos.toByteArray())
}
}
} catch {
case e: ZipException =>
s.log.error(logPrefix(arg.name) + "Zip failed: " + e.getMessage())
}
entry = in.getNextEntry()
}
} catch {
case e: Throwable =>
s.log.error(logPrefix(arg.name) + "Copy failed: " + e.getClass().getName() + " " + e.getMessage())
}
}
/** Copy content to code bundle */
private def copyToCodeBundle(arg: TaskArgument, codeJar: File) {
arg.streams.log.debug(logPrefix(arg.name) + "Append %s to code bundle".format(codeJar.getName()))
// copy across all entries from the original code jar
val jarCode = new JarInputStream(new FileInputStream(codeJar))
try {
copy(arg, arg.bundleEntries, jarCode, arg.bundleJarCode, resourceFilter, arg.streams)
arg.bundleResources += codeJar.getAbsolutePath()
} catch {
case e: Throwable =>
arg.streams.log.error(logPrefix(arg.name) + "Unable to merge: " + e.getClass().getName() + " " + e.getMessage())
} finally {
if (jarCode != null)
jarCode.close()
}
}
/** Copy content to source bundle */
private def copyToSourceBundle(arg: TaskArgument, sourceJar: File) {
arg.streams.log.debug("append %s to source bundle".format(sourceJar.getName()))
// copy across all entries from the original code jar
val jarSource = new JarInputStream(new FileInputStream(sourceJar))
try {
copy(arg, arg.bundleEntries, jarSource, arg.bundleJarSource, resourceFilter, arg.streams)
} catch {
case e: Throwable =>
arg.streams.log.error(logPrefix(arg.name) + "Unable to merge: " + e.getClass().getName() + " " + e.getMessage())
} finally {
if (jarSource != null)
jarSource.close()
}
}
private[this] def restrictedCopy(m: ModuleID, confs: Boolean) =
ModuleID(m.organization, m.name, m.revision, crossVersion = m.crossVersion, extraAttributes = m.extraAttributes, configurations = if (confs) m.configurations else None)
/** Consolidated argument with all required information */
case class TaskArgument(
/** Application configuration that provides information about SBT process */
appConfiguration: AppConfiguration,
/** The property representing Ivy process log level */
ivyLogLevel: UpdateLogging.Value,
/** Ivy wrapper that contains org.apache.ivy.Ivy and org.apache.ivy.core.settings.IvySettings */
ivySbt: IvySbt,
/** Ivy scala artifacts description */
ivyScala: Option[IvyScala],
/** Original ModuleIDs from SBT project definition */
libraryDependencies: Seq[ModuleID],
/** Current project name */
name: String,
/** GetClassifiersModule */
origClassifiersModule: GetClassifiersModule,
/** Update configuration */
updateConfiguration: UpdateConfiguration,
/** Bundle path with jar name */
pathBundle: java.io.File,
/** Path to Fetched artifacts */
pathDependency: java.io.File,
/** Target path */
pathTarget: java.io.File,
/** SBT task streams for logging */
streams: TaskStreams,
/** Flag indicating whether custom libraries without ModuleID should be fetched */
dependencyEnableCustom: Boolean,
/** The property representing artifact location */
dependencyArtifact: Option[java.io.File],
/** Flag indicating whether plugin should create bundle */
dependencyBundle: Boolean,
/** Classpath that is used to build dependency sequence */
dependencyClasspath: Classpath,
/** Fetch filter */
dependencyFilter: Option[ModuleFilter],
/** Flag indicating whether plugin should ignore a dependency configuration while lookup ('test' for example) */
dependencyIgnoreConfiguration: Boolean,
/** Function that filters jar content */
dependencyResourceFilter: ZipEntry => Boolean,
/** Skip resolved dependencies with explicit artifacts which points to local resources */
dependencySkipResolved: Boolean) {
/** Output stream for bundle with compiled code */
val bundleJarCode: JarOutputStream = if (dependencyBundle) {
assert(pathBundle.name endsWith ".jar", "incorrect dependency-bundle-path, must be path to jar file")
pathBundle.delete() // remove old bundle
new JarOutputStream(new BufferedOutputStream(new FileOutputStream(pathBundle, true)))
} else
null
/** Output stream for bundle with compiled code */
val bundleJarSource: JarOutputStream = if (dependencyBundle) {
assert(pathBundle.name endsWith ".jar", "incorrect dependency-bundle-path, must be path to jar file")
val directory = pathBundle.getParentFile()
val name = pathBundle.getName
val pathSourceBundle = new File(directory, name.replaceFirst(""".jar$""", """-sources.jar"""))
pathSourceBundle.delete() // remove old bundle
new JarOutputStream(new BufferedOutputStream(new FileOutputStream(pathSourceBundle, true)))
} else
null
val bundleEntries = HashSet[String]()
val bundleResources = HashSet[String]()
}
}
| sbt-android-mill/sbt-dependency-manager | src/main/scala/sbt/dependency/manager/Plugin.scala | Scala | apache-2.0 | 41,927 |
import scala.deriving.*
import scala.quoted.*
object Macro1 {
def mirrorFields[T: Type](using Quotes): List[String] =
Type.of[T] match {
case '[field *: fields] => Type.show[field] :: mirrorFields[fields]
case '[EmptyTuple] => Nil
}
// Demonstrates the use of quoted pattern matching
// over a refined type extracting the tuple type
// for e.g., MirroredElemLabels
inline def test1[T](value: =>T): List[String] =
${ test1Impl('value) }
def test1Impl[T: Type](value: Expr[T])(using Quotes): Expr[List[String]] = {
import quotes.reflect.*
val mirrorTpe = Type.of[Mirror.Of[T]]
Expr.summon(using mirrorTpe).get match {
case '{ $m: Mirror.ProductOf[T]{ type MirroredElemLabels = elems } } => {
Expr(mirrorFields[elems])
}
}
}
} | dotty-staging/dotty | tests/run-macros/i8007/Macro_1.scala | Scala | apache-2.0 | 804 |
object ch11_8 {
import scala.language.higherKinds
import ch11.Functor
trait Monad[F[_]] extends Functor[F] {
def unit[A](a: => A): F[A]
def compose[A,B,C](f: A => F[B], g: B => F[C]): A => F[C]
def flatMap[A,B](ma: F[A])(f: A => F[B]): F[B] =
compose(map(ma), f)
}
}
import ch11_8._
/*
from repl you can test typing:
:load src/main/scala/fpinscala/ch11/Monad.scala
:load src/main/scala/fpinscala/ch11/Exercise8.scala
*/
| rucka/fpinscala | src/main/scala/fpinscala/ch11/Exercise8.scala | Scala | gpl-2.0 | 446 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.util.concurrent.atomic._
import reflect._
import scala.math._
object SegmentList {
val MaxAttempts: Int = 20
}
/**
* A copy-on-write list implementation that provides consistent views. The view() method
* provides an immutable sequence representing a consistent state of the list. The user can do
* iterative operations on this sequence such as binary search without locking all access to the list.
* Even if the range of the underlying list changes no change will be made to the view
*/
class SegmentList[T](seq: Seq[T])(implicit m: ClassManifest[T]) {
val contents: AtomicReference[Array[T]] = new AtomicReference(seq.toArray)
/**
* Append the given items to the end of the list
*/
def append(ts: T*)(implicit m: ClassManifest[T]) {
while(true){
val curr = contents.get()
val updated = new Array[T](curr.length + ts.length)
Array.copy(curr, 0, updated, 0, curr.length)
for(i <- 0 until ts.length)
updated(curr.length + i) = ts(i)
if(contents.compareAndSet(curr, updated))
return
}
}
/**
* Delete the first n items from the list
*/
def trunc(newStart: Int): Seq[T] = {
if(newStart < 0)
throw new IllegalArgumentException("Starting index must be positive.");
var deleted: Array[T] = null
var done = false
while(!done) {
val curr = contents.get()
val newLength = max(curr.length - newStart, 0)
val updated = new Array[T](newLength)
Array.copy(curr, min(newStart, curr.length - 1), updated, 0, newLength)
if(contents.compareAndSet(curr, updated)) {
deleted = new Array[T](newStart)
Array.copy(curr, 0, deleted, 0, curr.length - newLength)
done = true
}
}
deleted
}
/**
* Get a consistent view of the sequence
*/
def view: Array[T] = contents.get()
/**
* Nicer toString method
*/
override def toString(): String = view.toString
}
| jinfei21/kafka | src/kafka/log/SegmentList.scala | Scala | apache-2.0 | 2,570 |
package com.twitter.util
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import scala.annotation.tailrec
import scala.collection.generic.CanBuild
import scala.collection.immutable.Queue
/**
* Events are instantaneous values, defined only at particular
* instants in time (cf. [[com.twitter.util.Var Vars]], which are
* defined at all times). It is possible to view Events as the
* discrete counterpart to [[com.twitter.util.Var Var]]'s continuous
* nature.
*
* Events are observed by registering [[com.twitter.util.Witness Witnesses]]
* to which the Event's values are notified.
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.util.AbstractEvent]].
*/
trait Event[+T] { self =>
/**
* Register the given [[com.twitter.util.Witness Witness]] to
* this Event. Witnesses are notified of new values until it is
* deregistered by the returned [[com.twitter.util.Closable Closable]].
*/
def register(s: Witness[T]): Closable
/**
* Observe this event with function `f`. Equivalent to
* `register(Witness(f))`.
*/
final def respond(s: T => Unit): Closable = register(Witness(s))
/**
* Build a new Event by applying the given function to each value
* observed. Event values for which the partial function `f` does
* not apply are dropped; other values are transformed by `f`.
*/
def collect[U](f: PartialFunction[T, U]): Event[U] = new Event[U] {
def register(s: Witness[U]) =
self respond { t =>
f.runWith(s.notify)(t)
}
}
/**
* Build a new Event by keeping only those Event values that match
* the predicate `p`.
*/
def filter(p: T => Boolean): Event[T] =
collect { case t if p(t) => t }
/**
* Build a new Event by transforming each new event value with `f`.
*/
def map[U](f: T => U): Event[U] =
collect { case t => f(t) }
/**
* Build a new Event by incrementally accumulating over events,
* starting with value `z`. Each intermediate aggregate is notified
* to the derived event.
*/
def foldLeft[U](z: U)(f: (U, T) => U): Event[U] = new Event[U] {
def register(s: Witness[U]) = {
var a = z
val mu = new{}
self respond Function.synchronizeWith(mu) { t =>
a = f(a, t)
s.notify(a)
}
}
}
/**
* Build a new Event representing a sliding window of at-most `n`.
* Each event notified by the parent are added to a queue of size
* at-most `n`. This queue is in turn notified to register of
* the returned event.
*/
def sliding(n: Int): Event[Seq[T]] = new Event[Seq[T]] {
require(n > 0)
def register(s: Witness[Seq[T]]) = {
val mu = new{}
var q = Queue.empty[T]
self respond { t =>
s.notify(mu.synchronized {
q = q enqueue t
while (q.length > n) {
val (_, q1) = q.dequeue
q = q1
}
q
})
}
}
}
/**
* The Event which merges the events resulting from `f` applied
* to each element in this Event.
*/
def mergeMap[U](f: T => Event[U]): Event[U] = new Event[U] {
def register(s: Witness[U]) = {
@volatile var inners = Nil: List[Closable]
val outer = self respond { el =>
inners.synchronized { inners ::= f(el).register(s) }
}
Closable.make { deadline =>
outer.close(deadline) before {
Closable.all(inners:_*).close(deadline)
}
}
}
}
/**
* Merge two Events of different types.
*/
def select[U](other: Event[U]): Event[Either[T, U]] = new Event[Either[T, U]] {
def register(s: Witness[Either[T, U]]) = Closable.all(
self.register(s comap { t => Left(t) }),
other.register(s comap { u => Right(u) })
)
}
/**
* Merge two event streams in lock-step, combining corresponding
* event values.
*
* @note This can be dangerous! Since the implementation needs to
* queue outstanding Event-values from the slower producer, if one
* Event outpaces another, this queue can grow in an unbounded
* fashion.
*/
def zip[U](other: Event[U]): Event[(T, U)] = new Event[(T, U)] {
def register(s: Witness[(T, U)]) = {
val mu = new{}
var state: Option[Either[Queue[T], Queue[U]]] = None
val left = self respond Function.synchronizeWith(mu) { t =>
state match {
case None =>
state = Some(Left(Queue(t)))
case Some(Left(q)) =>
state = Some(Left(q enqueue t))
case Some(Right(Queue(u, rest@_*))) =>
if (rest.isEmpty) state = None
else state = Some(Right(Queue(rest:_*)))
s.notify((t, u))
}
}
val right = other respond Function.synchronizeWith(mu) { u =>
state match {
case None =>
state = Some(Right(Queue(u)))
case Some(Right(q)) =>
state = Some(Right(q enqueue u))
case Some(Left(Queue(t, rest@_*))) =>
if (rest.isEmpty) state = None
else state = Some(Left(Queue(rest:_*)))
s.notify((t, u))
}
}
Closable.all(left, right)
}
}
/**
* Join two events into a new Event which notifies a tuple of the
* last value in each underlying event.
*/
def joinLast[U](other: Event[U]): Event[(T, U)] = new Event[(T, U)] {
def register(s: Witness[(T, U)]) = {
import Event.JoinState
import JoinState._
var state: JoinState[T, U] = Empty
val mu = new{}
val left = self respond Function.synchronizeWith(mu) { t =>
state match {
case Empty | LeftHalf(_) =>
state = LeftHalf(t)
case RightHalf(u) =>
state = Full(t, u)
s.notify((t, u))
case Full(_, u) =>
state = Full(t, u)
s.notify((t, u))
}
}
val right = other respond Function.synchronizeWith(mu) { u =>
state match {
case Empty | RightHalf(_) =>
state = RightHalf(u)
case LeftHalf(t) =>
state = Full(t, u)
s.notify((t, u))
case Full(t, _) =>
state = Full(t, u)
s.notify((t, u))
}
}
Closable.all(left, right)
}
}
/**
* An event which consists of the first `howmany` values
* in the parent Event.
*/
def take(howmany: Int): Event[T] = new Event[T] {
def register(s: Witness[T]) = {
val n = new AtomicInteger(0)
val c = new AtomicReference(Closable.nop)
c.set(self respond { t =>
if (n.incrementAndGet() <= howmany) s.notify(t)
else c.getAndSet(Closable.nop).close()
})
if (n.get() == howmany)
c.getAndSet(Closable.nop).close()
Closable.ref(c)
}
}
/**
* Merge two events; the resulting event interleaves events
* from this and `other`.
*/
def merge[U >: T](other: Event[U]): Event[U] = new Event[U] {
def register(s: Witness[U]) = {
val c1 = self.register(s)
val c2 = other.register(s)
Closable.all(c1, c2)
}
}
/**
* Progressively build a collection of events using the passed-in
* builder. A value containing the current version of the collection
* is notified for each incoming event.
*/
def build[U >: T, That](implicit cbf: CanBuild[U, That]) = new Event[That] {
def register(s: Witness[That]) = {
val b = cbf()
self respond { t =>
b += t
s.notify(b.result())
}
}
}
/**
* A Future which is satisfied by the first value observed.
*/
def toFuture(): Future[T] = {
val p = new Promise[T]
val c = register(Witness(p))
p setInterruptHandler { case exc =>
p.updateIfEmpty(Throw(exc))
}
p ensure { c.close() }
}
/**
* The [[Event]] that stores the difference between successive
* updates to the parent event. This can be used to perform
* incremental computation on large data structures.
*/
def diff[CC[_]: Diffable, U](implicit toCC: T <:< CC[U]): Event[Diff[CC, U]] = new Event[Diff[CC, U]] {
def register(s: Witness[Diff[CC, U]]) = {
var left: CC[U] = Diffable.empty
self respond { t =>
synchronized {
val right = toCC(t)
val diff = Diffable.diff(left, right)
left = right
s.notify(diff)
}
}
}
}
/**
* Patch up an [[Event]] of differences (like those produced by
* [[Event.diff]]) into an [[Event]] that reflects the current
* version of a data structure. That is: `(event:
* Event[CC[T]]).diff.patch` is equivalent to `event`
*/
def patch[CC[_]: Diffable, U](implicit ev: T <:< Diff[CC, U]): Event[CC[U]] = new Event[CC[U]] {
def register(s: Witness[CC[U]]) = {
var last: CC[U] = Diffable.empty
self respond { diff =>
synchronized {
last = diff.patch(last)
s.notify(last)
}
}
}
}
/**
* Build a new Event by keeping only those Event values where the
* equality predicate `eq` applied to the current and new values
* does not match.
*/
def dedupWith(eq: (T, T) => Boolean): Event[T] =
sliding(2).collect {
case Seq(init) => init
case Seq(current, next) if !eq(current, next) => next
}
/**
* Builds a new Event by keeping only the Events where the predicated
* `p`, applied to the previous and current update, returns true.
*/
def dedup: Event[T] = dedupWith { (a, b) => a == b }
}
/**
* Abstract `Event` class for Java compatibility.
*/
abstract class AbstractEvent[T] extends Event[T]
/**
* Note: There is a Java-friendly API for this object: [[com.twitter.util.Events]].
*/
object Event {
private sealed trait JoinState[+T, +U]
private object JoinState {
object Empty extends JoinState[Nothing, Nothing]
case class LeftHalf[T](t: T) extends JoinState[T, Nothing]
case class RightHalf[U](u: U) extends JoinState[Nothing, U]
case class Full[T, U](t: T, u: U) extends JoinState[T, U]
}
/**
* A new Event of type T which is also a Witness.
*/
def apply[T](): Event[T] with Witness[T] = new Event[T] with Witness[T] {
private[this] val witnesses = new AtomicReference(Set.empty[Witness[T]])
def register(w: Witness[T]) = {
casAdd(w)
Closable.make { _ =>
casRemove(w)
Future.Done
}
}
/**
* Notifies registered witnesses
*
* @note This method is synchronized to ensure that all witnesses
* receive notifications in the same order. Consequently it will block
* until the witnesses are notified.
*/
def notify(t: T) = synchronized {
val current = witnesses.get
for (w <- current)
w.notify(t)
}
@tailrec
private def casAdd(w: Witness[T]): Unit = {
val current = witnesses.get
if (!witnesses.compareAndSet(current, current + w)) {
casAdd(w)
}
}
@tailrec
private def casRemove(w: Witness[T]): Unit = {
val current = witnesses.get
if (!witnesses.compareAndSet(current, current - w)) {
casRemove(w)
}
}
}
}
/**
* A witness is the recipient of [[com.twitter.util.Event Event]].
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.util.AbstractWitness]].
*/
trait Witness[-N] { self =>
/**
* Notify this Witness with the given note.
*/
def notify(note: N)
def comap[M](f: M => N): Witness[M] = new Witness[M] {
def notify(m: M) = self.notify(f(m))
}
}
/**
* Abstract `Witness` class for Java compatibility.
*/
abstract class AbstractWitness[T] extends Witness[T]
/**
* Note: There is Java-friendly API for this object: [[com.twitter.util.Witnesses]].
*/
object Witness {
/**
* Create a Witness from an atomic reference.
*/
def apply[T](ref: AtomicReference[T]): Witness[T] = new Witness[T] {
def notify(t: T) = ref.set(t)
}
/**
* Create a Witness from a [[com.twitter.util.Promise Promise]].
*/
def apply[T](p: Promise[T]): Witness[T] = new Witness[T] {
def notify(t: T) = p.updateIfEmpty(Return(t))
}
/**
* Create a Witness from a function.
*/
def apply[T](f: T => Unit): Witness[T] = new Witness[T] {
def notify(t: T) = f(t)
}
def apply[T](u: Updatable[T]): Witness[T] = new Witness[T] {
def notify(t: T) = u() = t
}
/**
* A Witness which prints to the console.
*/
val printer: Witness[Any] = Witness(println(_))
}
/**
* A Java analog of `Event[A]()`.
*/
class WitnessedEvent[T] extends Event[T] with Witness[T] {
private[this] val underlying = Event[T]()
def register(s: Witness[T]): Closable = underlying.register(s)
def notify(note: T): Unit = underlying.notify(note)
}
| stremlenye/util | util-core/src/main/scala/com/twitter/util/Event.scala | Scala | apache-2.0 | 12,742 |
package io.opencensus.scala.http4s
import cats.effect.{Effect, Resource}
import cats.implicits._
import io.opencensus.scala.Tracing
import io.opencensus.scala.http.propagation.Propagation
import io.opencensus.scala.http.{HttpAttributes => BaseHttpAttributes}
import io.opencensus.scala.http4s.HttpAttributes._
import io.opencensus.scala.http4s.TracingUtils.recordResponse
import io.opencensus.scala.http4s.propagation.Http4sFormatPropagation
import io.opencensus.trace.{Span, Status}
import org.http4s.client.Client
import org.http4s.{Header, Request, Response}
abstract class TracingClient[F[_]: Effect] {
protected val tracing: Tracing
protected val propagation: Propagation[Header, Request[F]]
/**
* Enriches the `Client[F]` by tracing and propagation of the SpanContext via http headers.
*
* @param parentSpan the current span which will act as parent of the new span if given
*/
def trace(client: Client[F], parentSpan: Option[Span] = None): Client[F] = {
val tracedOpen: Request[F] => Resource[F, Response[F]] =
req =>
for {
span <- Resource.liftF(startSpan(parentSpan, req))
enrichedReq = addTraceHeaders(req, span)
res <- client
.run(enrichedReq)
.onError(traceError(span).andThen(x => Resource.liftF(x)))
} yield recordResponse(span, tracing)(res)
Client(tracedOpen)
}
private def traceError(span: Span): PartialFunction[Throwable, F[Unit]] = {
case _ => recordException(span)
}
private def startSpan(parentSpan: Option[Span], req: Request[F]) =
Effect[F].delay(startAndEnrichSpan(req, parentSpan))
private def startAndEnrichSpan(
req: Request[F],
parentSpan: Option[Span]
): Span = {
val name = req.uri.path.toString
val span = parentSpan.fold(tracing.startSpan(name))(span =>
tracing.startSpanWithParent(name, span)
)
BaseHttpAttributes.setAttributesForRequest(span, req)
span
}
private def addTraceHeaders(request: Request[F], span: Span): Request[F] =
request.withHeaders(
request.headers.put(propagation.headersWithTracingContext(span): _*)
)
private def recordException(span: Span) =
Effect[F].delay(tracing.endSpan(span, Status.INTERNAL))
}
object TracingClient {
def apply[F[_]: Effect]: TracingClient[F] =
new TracingClient[F] {
override protected val tracing: Tracing = Tracing
override protected val propagation: Propagation[Header, Request[F]] =
new Http4sFormatPropagation[F] {}
}
}
| census-ecosystem/opencensus-scala | http4s/src/main/scala/io/opencensus/scala/http4s/TracingClient.scala | Scala | apache-2.0 | 2,536 |
package com.jdrews.logstation.service
import akka.actor._
import akka.pattern._
import com.jdrews.logstation.tailer.{LogTailerActor, LogThisFile}
import com.jdrews.logstation.utils.LogStationColorizer
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.matching.Regex
/**
* Created by jdrews on 2/21/2015.
*
* Hold the tailers and colorizer actors
*/
class LogStationServiceActor extends Actor with ActorLogging{
private var logTailers = Set.empty[ActorRef]
private var logStationColorizers = Set.empty[ActorRef]
var syntaxList = scala.collection.mutable.Map[String, Regex]()
def receive = {
case logThisFile: LogThisFile =>
log.info(s"About to begin logging ${logThisFile.logFile}")
val logStationColorizer = context.actorOf(Props[LogStationColorizer], name = s"LogStationColorizer-${logThisFile.logFile.replaceAll("[^A-Za-z0-9]", ":")}")
logStationColorizer ! syntaxList
context watch logStationColorizer
logStationColorizers += logStationColorizer
val logTailerActor = context.actorOf(Props[LogTailerActor], name = s"LogTailerActor-${logThisFile.logFile.replaceAll("[^A-Za-z0-9]", ":")}")
logTailerActor ! logThisFile
logTailerActor ! logStationColorizer
context watch logTailerActor
logTailers += logTailerActor
case syntax: scala.collection.mutable.Map[String, Regex] =>
log.info(s"passing the syntax file! $syntax")
// store list to send to any new colorizers
syntaxList = syntax
logStationColorizers.foreach(colorizer => colorizer ! syntax)
case ServiceShutdown =>
// for each logTailers and logStationColorizers, send shutdown call and wait for it to shut down.
log.info("got ServiceShutdown")
logTailers.foreach(actor =>
try {
Await.result(gracefulStop(actor, 20 seconds, ServiceShutdown), 20 seconds)
} catch {
case e: AskTimeoutException ⇒ log.error("The actor didn't stop in time!" + e.toString)
}
)
logStationColorizers.foreach(actor =>
try {
Await.result(gracefulStop(actor, 20 seconds, ServiceShutdown), 20 seconds)
} catch {
case e: AskTimeoutException ⇒ log.error("The actor didn't stop in time!" + e.toString)
}
)
context stop self
case actTerminated: Terminated => log.info(actTerminated.toString)
case something => log.warning(s"huh? $something")
}
}
| jdrews/logstation | src/main/scala/com/jdrews/logstation/service/LogStationServiceActor.scala | Scala | apache-2.0 | 2,704 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.tools.console
import io.prediction.tools.Runner
import java.io.File
case class ExportArgs(
appId: Int = 0,
channel: Option[String] = None,
outputPath: String = "",
format: String = "json")
object Export {
def eventsToFile(ca: ConsoleArgs, core: File): Int = {
val channelArg = ca.export.channel
.map(ch => Seq("--channel", ch)).getOrElse(Seq())
Runner.runOnSpark(
"io.prediction.tools.export.EventsToFile",
Seq(
"--appid",
ca.export.appId.toString,
"--output",
ca.export.outputPath,
"--format",
ca.export.format) ++ channelArg,
ca,
core)
}
}
| ydanilenko/PredictionIO | tools/src/main/scala/io/prediction/tools/console/Export.scala | Scala | apache-2.0 | 1,280 |
package models
import test.ApplicationSpecification
import org.specs2._
import specification._
import AssetSortType._
import SortDirection._
import java.sql.Timestamp
class AssetDistanceSorterSpec extends ApplicationSpecification {
args(sequential = true)
"Create a meta" should {
"for outlets" in {
val ms = new mocksorter {}
ms.assetValues.foreach { case(assetTag, metaList) =>
Asset.findByTag(assetTag.toString.toLowerCase).getOrElse {
val asset =
Asset.create(Asset(assetTag.toString.toLowerCase, Status.Unallocated.get, AssetType.ServerNode.get))
metaList.foreach { case (value, assetMetaTag) =>
AssetMeta.findOrCreateFromName(assetMetaTag)
val meta = AssetMeta.findByName(assetMetaTag).get
val mv = AssetMetaValue(asset, meta.id, value.toString)
try {
AssetMetaValue.create(mv)
} catch {
case e: RuntimeException =>
Thread.sleep(1000)
AssetMetaValue.create(mv)
}
}
}
}
true
}
}
"AssetDistanceSorter" should {
"sparse" in new mocksorter {
val expected = List("e","b","d","c","a")
val sortedAssets = AssetDistanceSorter.distributionSort(
targetAsset,
similarAssets,
SortAsc,
sortConfig)
sortedAssets.map{_.tag} must_== expected
}
"dense" in new mocksorter {
val expected = List("a","b","c","d","e")
val sortedAssets = AssetDistanceSorter.distributionSort(
targetAsset,
similarAssets,
SortDesc,
sortConfig)
sortedAssets.map{_.tag} must_== expected
}
}
trait mocksorter extends Scope {
val sortParams = List("A", "B", "C")
val sortValues = List(
("t",List(0,0,0)),
("a",List(0,0,1)),
("b",List(0,1,0)),
("c",List(0,1,1)),
("d",List(1,0,0)),
("e",List(1,0,1))
)
val sortConfig = sortParams.reverse.toSet
val assetValues = sortValues.map{case (assetTag, values) => (assetTag, values.zip(sortParams))}
def targetAsset = Asset.findByTag(sortValues.head._1).get
def similarAssets = sortValues.tail.map{t => Asset.findByTag(t._1).get}
}
"MockAssetNameEval" should {
"return correct distance" in {
val a1 = new Asset("1", 0, 0, new Timestamp(System.currentTimeMillis), None, None)
val a2 = new Asset("2", 0, 0, new Timestamp(System.currentTimeMillis), None, None)
val nameeval = new MockAssetNameEval
nameeval.distance(a1, a2) must_== 1
}
}
"AssetDistanceSorter" should {
"sort named assets in ascending order" in {
val assets = (1 to 20).map { i =>
new Asset(i.toString, 0, 0, new Timestamp(System.currentTimeMillis), None, None)
}
assets must_== AssetDistanceSorter.sort(
new Asset("0", 0, 0, new Timestamp(System.currentTimeMillis), None, None),
assets,
Name,
SortAsc
)
}
"sort permuted named assets in ascending order" in {
val assets1 = (11 to 20).map { i =>
new Asset(i.toString, 0, 0, new Timestamp(System.currentTimeMillis), None, None)
}
val assets2 = (1 to 10).map { i =>
new Asset(i.toString, 0, 0, new Timestamp(System.currentTimeMillis), None, None)
}
(assets2 ++ assets1) must_== AssetDistanceSorter.sort(
new Asset("0", 0, 0, new Timestamp(System.currentTimeMillis), None, None),
(assets1 ++ assets2),
Name,
SortAsc)
}
"sort named assets in descending order" in {
val assets = (1 to 20).map { i =>
new Asset(i.toString, 0, 0, new Timestamp(System.currentTimeMillis), None, None)
}
assets.reverse must_== AssetDistanceSorter.sort(
new Asset("0", 0, 0, new Timestamp(System.currentTimeMillis), None, None),
assets,
Name,
SortDesc)
}
} // AssetDistanceSorter should
}
| Shopify/collins | test/models/AssetDistanceSorterSpec.scala | Scala | apache-2.0 | 4,272 |