code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package edu.gemini.ags.api import edu.gemini.ags.api.AgsAnalysis.NotReachable import edu.gemini.ags.api.AgsMagnitude.{MagnitudeCalc, MagnitudeTable} import edu.gemini.catalog.api.CatalogQuery import edu.gemini.pot.ModelConverters._ import edu.gemini.spModel.ags.AgsStrategyKey import edu.gemini.spModel.core.{Angle, BandsList, Coordinates, SiderealTarget} import edu.gemini.spModel.guide.{GuideProbe, GuideStarValidation, ValidatableGuideProbe} import edu.gemini.spModel.obs.context.ObsContext import edu.gemini.spModel.rich.shared.immutable._ import edu.gemini.shared.util.immutable.{Option => JOption, Some => JSome} import edu.gemini.spModel.target.SPTarget import edu.gemini.spModel.target.env._ import scala.concurrent.{ExecutionContext, Future} import scalaz._ import Scalaz._ trait AgsStrategy { def key: AgsStrategyKey def magnitudes(ctx: ObsContext, mt: MagnitudeTable): List[(GuideProbe, MagnitudeCalc)] def analyze(ctx: ObsContext, mt: MagnitudeTable): List[AgsAnalysis] def analyze(ctx: ObsContext, mt: MagnitudeTable, guideProbe: ValidatableGuideProbe, guideStar: SiderealTarget): Option[AgsAnalysis] def analyzeForJava(ctx: ObsContext, mt: MagnitudeTable, guideProbe: ValidatableGuideProbe, guideStar: SiderealTarget): JOption[AgsAnalysis] = { val spTarget = new SPTarget(SiderealTarget.empty.copy(coordinates = Coordinates(guideStar.coordinates.ra, guideStar.coordinates.dec))) if (guideProbe.validate(spTarget, ctx) != GuideStarValidation.VALID) new JSome(NotReachable(guideProbe, guideStar, probeBands)) else analyze(ctx, mt, guideProbe, guideStar).asGeminiOpt } def candidates(ctx: ObsContext, mt: MagnitudeTable)(ec: ExecutionContext): Future[List[(GuideProbe, List[SiderealTarget])]] /** * Returns a list of catalog queries that would be used to search for guide stars with the given context */ def catalogQueries(ctx: ObsContext, mt: MagnitudeTable): List[CatalogQuery] def estimate(ctx: ObsContext, mt: MagnitudeTable)(ec: ExecutionContext): Future[AgsStrategy.Estimate] def select(ctx: ObsContext, mt: MagnitudeTable)(ec: ExecutionContext): Future[Option[AgsStrategy.Selection]] def guideProbes: List[GuideProbe] /** * Indicates the bands that will be used for a given probe */ def probeBands: BandsList } object AgsStrategy { object Estimate { val CompleteFailure = Estimate(0.0) val GuaranteedSuccess = Estimate(1.0) def toEstimate(probability: Double): Estimate = Estimate(probability).normalize } /** * Estimation of success of finding a guide star at phase 2 time. */ case class Estimate(probability: Double) extends AnyVal { def normalize: Estimate = if (probability <= 0) Estimate.CompleteFailure else if (probability >= 1) Estimate.GuaranteedSuccess else this } /** * An assignment of a guide star to a particular guide probe. */ case class Assignment(guideProbe: GuideProbe, guideStar: SiderealTarget) /** * Results of running an AGS selection. The position angle for which the * results are valid along with all assignments of guide probes to stars. */ case class Selection(posAngle: Angle, assignments: List[Assignment]) { /** * Creates a new TargetEnvironment with guide stars for each assignment in * the Selection. */ def applyTo(env: TargetEnvironment): TargetEnvironment = { import AutomaticGroup.Active val targetMap = ==>>.fromList(assignments.map { case Assignment(gp,gs) => gp -> new SPTarget(gs) }) val newAuto = Active(targetMap, posAngle): AutomaticGroup val oldAuto = TargetEnv.auto.get(env) // True if the pos angle differs. def posAngleUpdated = oldAuto match { case Active(_, oldPa) => oldPa =/= posAngle case _ => true } // SPTargets are compared by references, so we extract the names and compare. def extractNames(auto: AutomaticGroup) = auto.targetMap.map(_.getName) // If this is different from the old automatic GG, then replace. val updated = (extractNames(oldAuto) =/= extractNames(newAuto)) || posAngleUpdated if (updated) TargetEnv.auto.set(env, newAuto) else env } def applyTo(ctx: ObsContext): ObsContext = { // Make a new TargetEnvironment with the guide probe assignments. Update // the position angle as well if the automatic group is primary. applyTo(ctx.getTargets) |> ctx.withTargets |> { ctx0 => val auto = ctx0.getTargets.getGuideEnvironment.guideEnv.primaryGroup.isAutomatic auto ? ctx0.withPositionAngle(posAngle) | ctx0 } } } }
spakzad/ocs
bundle/edu.gemini.ags/src/main/scala/edu/gemini/ags/api/AgsStrategy.scala
Scala
bsd-3-clause
4,653
/* * Copyright (c) 2015, Nightfall Group * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package moe.nightfall.instrumentality.editor.gui import moe.nightfall.instrumentality.animations.NewPCAAnimation import moe.nightfall.instrumentality.editor.control._ import moe.nightfall.instrumentality.editor.{EditElement, UIUtils} import moe.nightfall.instrumentality.{Loader, ModelCache, PMXInstance} import org.lwjgl.opengl.{GL11, GL14} import org.lwjgl.BufferUtils /** * Created on 25/08/15, ported to Scala on 2015-09-20. Oh, and our date formats are inconsistent. * availableModels must contain at least null. * Also note that I intend for this to perform loading in the background. */ class ModelChooserElement(val availableModels: Seq[String], powerlineContainerElement: PowerlineContainerElement) extends EditElement { private val availableModelUnits = new Array[PMXInstance](availableModels.length) availableModels.zipWithIndex.foreach { case (k, v) => if (k != null) { val mdl = ModelCache.getLocal(k) if (mdl != null) { availableModelUnits(v) = new PMXInstance(mdl) availableModelUnits(v).anim = new NewPCAAnimation(mdl.defaultAnims) } } } // offset for rendering to get animation var renderOffset = 0D // Used to prevent a total failure. var slowLoad = 0 // No border, no shadow borderWidth = 0 shadowWidth = 0 private var mainRotary = new View3DElement { override def rotate() = () override def draw3D(): Unit = { def renderModel(index: Int, clipping: Float) { if (index > slowLoad || index < 0 || index >= availableModelUnits.length) return if (availableModels(index) == null) { // Player GL11.glPushMatrix() GL11.glEnable(GL11.GL_CLIP_PLANE0) val buffer = BufferUtils.createDoubleBuffer(4).put(Array(0D, -1D, 0D, clipping)) buffer.rewind() GL11.glClipPlane(GL11.GL_CLIP_PLANE0, buffer) Loader.applicationHost.drawPlayer() GL11.glDisable(GL11.GL_CLIP_PLANE0) GL11.glPopMatrix() } else { val mu = availableModelUnits(index) if (mu != null) { GL11.glPushMatrix() val scale = 1 / mu.theModel.height GL11.glScaled(scale, scale, scale) mu.render(1, 1, 1, clipping, 0.25F) GL11.glPopMatrix() } } } def drawText(index: Int) { if (index < 0 || index >= availableModelUnits.length) return val name = if (availableModels(index) == null) "Default" else { val mu = availableModelUnits(index) if (mu != null) mu.theFile.globalCharname else return } GL11.glPushMatrix() GL11.glTranslated(0, 1.1d, 0) GL11.glScaled(-0.1d, -0.1d, 0.1d) GL11.glScaled(0.125d, 0.125d, 0.125d) val nameSize = UIUtils.sizeText(name) val textScale = if (nameSize.getX > 64) 1 / (((nameSize.getX - 64) / 64) + 1) else 1 GL11.glScaled(textScale, textScale, 1) GL11.glTranslated(-nameSize.getX / 2, -nameSize.getY, 0) UIUtils.drawText(name) GL11.glPopMatrix() } GL11.glPushMatrix() GL11.glRotatef(180, 0, 1, 0) GL11.glTranslatef(0, 0, 1) val current = availableModels.indexOf(Loader.currentFile) // Draw text for (offset <- -2 to 2) { GL11.glPushMatrix() GL11.glTranslated((offset + renderOffset) * 0.75, 0, math.abs(offset + renderOffset) * 0.5 - 1) drawText(current + offset) GL11.glPopMatrix() } def renderModels() { for (offset <- (if (renderOffset > 0) -3 else -2) to (if (renderOffset < 0) 3 else 2)) { GL11.glPushMatrix() GL11.glTranslated((offset + renderOffset) * 0.75, 0, math.abs(offset + renderOffset) * 0.5 - 1) // Selected if (offset == 0) { GL11.glRotated(rotYaw, 0, 1, 0) } val fadeIn = if (math.abs(offset) == 3) { math.abs(renderOffset).toFloat } else if (renderOffset < 0 && offset == -2 || renderOffset > 0 && offset == 2) { 1 - math.abs(renderOffset).toFloat } else 1 renderModel(current + offset, fadeIn) GL11.glPopMatrix() } } renderModels() GL11.glScaled(1, -1, 1) renderModels() GL11.glEnable(GL11.GL_BLEND) GL11.glColor4f(1, 1, 1, 0.5F) GL11.glPopMatrix() GL11.glBegin(GL11.GL_QUADS) GL11.glVertex3d(-20, 0, -20) GL11.glVertex3d(20, 0, -20) GL11.glVertex3d(20, 0, 20) GL11.glVertex3d(-20, 0, 20) GL11.glEnd() GL11.glColor4f(1, 1, 1, 1) GL11.glDisable(GL11.GL_BLEND) slowLoad += 1 if (slowLoad > availableModelUnits.length) slowLoad = availableModelUnits.length } } mainRotary.translateY = -0.6 mainRotary.scale = 3 subElements += mainRotary private var buttonLeft = new ArrowButtonElement(180, updateModelPosition(availableModels(getFB._2), 1)) private var buttonRight = new ArrowButtonElement(0, updateModelPosition(availableModels(getFB._1), -1)) private def updateModelPosition(next: String, offset: Int) { if (next != Loader.currentFile) { Loader.setCurrentFile(next) mainRotary.rotYaw = 0 renderOffset = offset updateButtons() } } private var buttonbar = Array[TextButtonElement]( new TextButtonElement("Downloader", { val l = new DownloaderElement(powerlineContainerElement) powerlineContainerElement.addAndGo("PMX Downloader", l) }), new TextButtonElement("Benchmark", { if (Loader.currentFile != null) { val mdl = ModelCache.getLocal(Loader.currentFile) if (mdl != null) { val l = new BenchmarkElement(mdl) powerlineContainerElement.addAndGo("Benchmark", l) } } }), new TextButtonElement("Animations", { if (Loader.currentFile != null) { val mdl = ModelCache.getLocal(Loader.currentFile) if (mdl != null) { val l = new PoseTreeElement(mdl, powerlineContainerElement) powerlineContainerElement.addAndGo("Animations", l) } } }) ) subElements ++= buttonbar += buttonRight += buttonLeft private def updateButtons() { val isDefault = Loader.currentFile == null buttonbar(1).disabled = isDefault buttonbar(2).disabled = isDefault } // TODO This method does not comply with my standards because of the overuse of tuples, divide into two methods pls private def getFB: (Int, Int) = { val point = availableModels.zipWithIndex.filter(_._1 == Loader.currentFile) val index = point.head._2 val next = if (index == (availableModelUnits.length - 1)) index else index + 1 if (index == 0) return (index, next) (index - 1, next) } override def layout() { var pos = width - 35 buttonbar.view.zipWithIndex.foreach { case (button, index) => val buttonWidth = math.ceil(UIUtils.sizeText(button.text).getX).toInt * 5 pos -= buttonWidth button.setSize(buttonWidth, 48) button.posX = pos button.posY = height - button.height - 5 } buttonLeft.setSize(30, height) buttonRight.posY = 0 buttonRight.posX = 0 buttonRight.setSize(30, height) buttonRight.posY = 0 buttonRight.posX = width - 30 mainRotary.posX = 0 mainRotary.posY = 0 mainRotary.setSize(width, height) // Don't know if this is needed, just in case it was changed elsewhere... updateButtons() } override def update(dT: Double) = { // Fancy animation if (renderOffset > 0) { renderOffset -= dT * 2 if (renderOffset < 0) renderOffset = 0 } else if (renderOffset < 0) { renderOffset += dT * 2 if (renderOffset > 0) renderOffset = 0 } availableModels.zipWithIndex.foreach(kv => { if (availableModelUnits(kv._2) != null) availableModelUnits(kv._2).update(dT) }) } override def cleanup() = { availableModels.zipWithIndex.foreach(kv => { if (availableModelUnits(kv._2) != null) availableModelUnits(kv._2).cleanupGL() }) } }
Nightfall/Instrumentality
core/src/main/scala/moe/nightfall/instrumentality/editor/gui/ModelChooserElement.scala
Scala
bsd-2-clause
10,875
package scala.meta.tests package contrib import org.scalatest.FunSuite import scala.meta._ import scala.meta.contrib._ class ModReplacementTest extends FunSuite { test("Test replace class mods with no existing mods") { val newClass = q"class Foo".withMods(mod"final" :: Nil) assert(newClass.hasMod(mod"final")) } test("Test replace class mods with existing mods") { val newClass = q"sealed class Foo".withMods(mod"final" :: Nil) assert(newClass.hasMod(mod"final")) } test("Test replace trait mods") { val newTrait = q"trait Foo".withMods(mod"final" :: Nil) assert(newTrait.hasMod(mod"final")) } test("Test replace object mods") { val newObject = q"object Foo".withMods(mod"final" :: Nil) assert(newObject.hasMod(mod"final")) } test("Test replace def mods") { val newDef = q"def foo = 1".withMods(mod"final" :: Nil) assert(newDef.hasMod(mod"final")) } test("Test replace var mods") { val newVar = q"var foo = 1".withMods(mod"final" :: Nil) assert(newVar.hasMod(mod"final")) } test("Test replace val mods") { val newVal = q"val foo = 1".withMods(mod"final" :: Nil) assert(newVal.hasMod(mod"final")) } }
DavidDudson/scalameta
tests/shared/src/test/scala/scala/meta/tests/contrib/ModReplacementTest.scala
Scala
bsd-3-clause
1,200
class Bar extends JavaFoo with Foo { def read(): Int = ??? } @main def Test = val stdout = new Bar
som-snytt/dotty
tests/run/i8101/Test.scala
Scala
apache-2.0
104
/* * Copyright (c) 2014-2020 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.java8 import java.util.concurrent.{CancellationException, CompletableFuture, CompletionException} import java.util.function.BiFunction import monix.execution.{Cancelable, CancelableFuture} import scala.concurrent.{ExecutionContext, Future} import scala.util.{Failure, Success} /** * DEPRECATED — switch to Scala 2.12+ and [[monix.execution.FutureUtils]]. */ package object execution { /** * DEPRECATED — switch to Scala 2.12+ and * [[monix.execution.CancelableFuture.fromJavaCompletable CancelableFuture.fromJavaCompletable]]. */ @deprecated("Switch to Scala 2.12+ and CancelableFuture.fromJavaCompletable", "3.0.0") implicit class JavaCompletableFutureUtils[A](val source: CompletableFuture[A]) extends AnyVal { /** * DEPRECATED — switch to Scala 2.12+ and * [[monix.execution.CancelableFuture.fromJavaCompletable CancelableFuture.fromJavaCompletable]]. */ @deprecated("Switch to Scala 2.12+ and CancelableFuture.fromJavaCompletable", "3.0.0") def asScala(implicit ec: ExecutionContext): CancelableFuture[A] = { // $COVERAGE-OFF$ CancelableFuture.async(cb => { source.handle[Unit](new BiFunction[A, Throwable, Unit] { override def apply(result: A, err: Throwable): Unit = { err match { case null => cb(Success(result)) case _: CancellationException => () case ex: CompletionException if ex.getCause ne null => cb(Failure(ex.getCause)) case ex => cb(Failure(ex)) } } }) Cancelable({ () => source.cancel(true); () }) }) // $COVERAGE-ON$ } } /** * DEPRECATED — switch to Scala 2.12+ and * [[monix.execution.FutureUtils.toJavaCompletable FutureUtils.toJavaCompletable]]. */ @deprecated("Switch to Scala 2.12+ and FutureUtils.toJavaCompletable", "3.0.0") implicit class ScalaFutureUtils[A](val source: Future[A]) extends AnyVal { /** * DEPRECATED — switch to Scala 2.12+ and * [[monix.execution.FutureUtils.toJavaCompletable FutureUtils.toJavaCompletable]]. */ @deprecated("Switch to Scala 2.12+ and FutureUtils.toJavaCompletable", "3.0.0") def asJava(implicit ec: ExecutionContext): CompletableFuture[A] = { // $COVERAGE-OFF$ val cf = new CompletableFuture[A]() source.onComplete { case Success(a) => cf.complete(a) case Failure(ex) => cf.completeExceptionally(ex) } cf // $COVERAGE-ON$ } } }
alexandru/monifu
monix-java/src/main/scala/monix/java8/execution/package.scala
Scala
apache-2.0
3,293
abstract class PsiStackOverflowError { case class TGItem(tg: String) extends TreeItem[SportsmanItem] { def children = List() } case class SportsmanItem(user: String) extends TreeItem[SportsmanItem] { def children = List() } val tree = new Tree[TreeItem.Min]() { def setContent(trainingGroups: Iterable[String]) { val rootNodes = trainingGroups.map(TGItem(_)) rootNodes.foreach(/* line: 26 */addItemRecursively) rootNodes.foreach(expandItemsRecursively) } } } object TreeItem { type Min = TreeItem[T forSome {type T <: TreeItem[T]}] } trait TreeItem[+T <: TreeItem[T]] { def children: Iterable[T] } class Tree[TI <: TreeItem[TI]] extends VaadinTree { def addItemRecursively(item: TI) = 1 def addItemRecursively(s: String) { } } class VaadinTree { def expandItemsRecursively(startItemId: AnyRef): Boolean = false }
ilinum/intellij-scala
testdata/resolve2/bug3/SOE.scala
Scala
apache-2.0
875
/* * Copyright 2009 Ilja Booij * * This file is part of GarminTrainer. * * GarminTrainer is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * GarminTrainer is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GarminTrainer. If not, see <http://www.gnu.org/licenses/>. */ package nl.iljabooij.garmintrainer.model import nl.iljabooij.garmintrainer.model.Duration._ import org.junit.Assert._ import org.mockito.Mockito._ import org.scalatest.junit.{JUnit3Suite,AssertionsForJUnit} import org.scalatest.mock.MockitoSugar /** * Test for {@link StartTrackPoint}. * @author ilja * */ class StartTrackPointTest extends JUnit3Suite with AssertionsForJUnit with MockitoSugar { private val START_TIME = new DateTime private val TRACK_POINT_TIME = START_TIME + second * 10 private val DISTANCE = new Length.Meter(100.0) private var startTrackPoint:StartTrackPoint = _ private var measuredTrackPoint:MeasuredTrackPoint = _ override def setUp() { measuredTrackPoint = mock[MeasuredTrackPoint] startTrackPoint = new StartTrackPoint(START_TIME, measuredTrackPoint) } def testGetSpeed() { when (measuredTrackPoint.time).thenReturn(TRACK_POINT_TIME) when (measuredTrackPoint.distance).thenReturn(DISTANCE) // speed as calculated: val speed = Speed.speed(DISTANCE, new Duration(START_TIME, TRACK_POINT_TIME)) assertEquals(speed, startTrackPoint.speed) verify(measuredTrackPoint, times(1)).time verify(measuredTrackPoint, times(1)).distance } def testAltitudeDelta() { assertEquals("Start point should not have altitude gain", Length.ZERO, startTrackPoint.altitudeDelta) } }
chmandrade/garmintrainer
src/test/scala/nl/iljabooij/garmintrainer/model/StartTrackPointTest.scala
Scala
gpl-3.0
2,076
/* * Copyright 2014–2017 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.api import slamdata.Predef._ import quasar.csv.CsvParser import org.scalacheck.{Arbitrary, Gen} import quasar.api.MessageFormat.{JsonContentType, Csv} import JsonPrecision.{Precise,Readable} import JsonFormat.{LineDelimited,SingleArray} import scalaz.scalacheck.ScalaCheckBinding._ import scalaz._, Scalaz._ object MessageFormatGen { // The Content-Type spec specifies that control characters are not allowed which is // why we use alphaChar here // See https://github.com/tototoshi/scala-csv/issues/98 for why actually let's avoid alphaChar for now // and go with relatively "standard" csv formats implicit val arbFormat: Arbitrary[CsvParser.Format] = Arbitrary( for { del <- Gen.oneOf(List(',', '\\t', '|', ':', ';')) quote <- Gen.oneOf(List('"', '\\'')) esc <- Gen.oneOf(quote, '\\\\') term <- Gen.oneOf(List("\\r\\n")) // See https://github.com/tototoshi/scala-csv/issues/97 for why `lineTerminator` must be constant for now } yield CsvParser.Format(del,quote,esc,term)) implicit val arbCSV: Arbitrary[Csv] = arbFormat.map(Csv.apply(_,None)) implicit val arbJsonContentType: Arbitrary[JsonContentType] = Arbitrary( Gen.oneOf( JsonContentType(Readable, LineDelimited), JsonContentType(Readable,SingleArray), JsonContentType(Precise,LineDelimited), JsonContentType(Precise,SingleArray))) implicit val arbMessageFormat: Arbitrary[MessageFormat] = Arbitrary(Gen.oneOf(arbCSV.arbitrary, arbJsonContentType.arbitrary)) }
drostron/quasar
web/src/test/scala/quasar/api/MessageFormatGen.scala
Scala
apache-2.0
2,126
package at.fabricate.liftdev.common package model import net.liftweb.mapper.KeyedMetaMapper import net.liftweb.mapper.LongKeyedMapper import net.liftweb.mapper.By import net.liftweb.mapper.KeyedMapper trait MatchByID [T <: (KeyedMapper[Long,T]) ] { self: T => // add an Object for pattern matching final object MatchItemByID { def unapply(in: String): Option[T] = self.getSingleton.findByKey(in.toLong) } }
Fabricate/OpenthingsImplementation
src/main/scala/at/fabricate/liftdev/common/model/MatchByID.scala
Scala
lgpl-3.0
455
import org.scalatest.FlatSpec import org.scalatest.Matchers import Utils.java2js class InfixSpec extends FlatSpec with Matchers { import Utils._ "An InfixExpression" should " with extended ops should translate cleanly" in { val java = """ class Test { int calc() { return 1 + 2 + 3; } } """ val expected = """ export default class Test { get interfaces_() { return []; } calc() { return 1 + 2 + 3; } getClass() { return Test; } } """ java2js(java) should equal (expected) } "An InfixExpression" should " with ints and division should be truncated" in { val java = """ class Test { int calc() { return 1 / 2; } } """ val expected = """ export default class Test { get interfaces_() { return []; } calc() { return Math.trunc(1 / 2); } getClass() { return Test; } } """ java2js(java) should equal (expected) } "An InfixExpression" should " with ints and division and exops should be truncated" in { val java = """ class Test { int calc() { return 1 / 2 / 2; } } """ val expected = """ export default class Test { get interfaces_() { return []; } calc() { return Math.trunc(Math.trunc(1 / 2) / 2); } getClass() { return Test; } } """ java2js(java) should equal (expected) } }
bjornharrtell/java2estree
src/test/scala/InfixSpec.scala
Scala
mit
1,537
package de.sciss.fscape import de.sciss.kollflitz.Vec import de.sciss.numbers import scala.concurrent.Promise import scala.math.{cos, pow} class DEnvGenSpec extends UGenSpec { "The DEnvGen UGen" should "work as intended" in { val p = Promise[Vec[Double]]() val levels = Seq(0.0, -1.0, 1.0, 0.1) val lengths = Seq( 100, 200, 50) val shapes = Seq( 1, 3, 2) // lin, sine, exp val g = Graph { import graph._ val env = DEnvGen( levels = ValueDoubleSeq(levels : _*), lengths = ValueIntSeq (lengths: _*), shapes = ValueIntSeq (shapes : _*) ) DebugDoublePromise(env, p) } runGraph(g, 128) assert(p.isCompleted) val res = getPromiseVec(p) val linLevelAt: (Double, Double, Double) => Double = { (pos, y1, y2) => pos * (y2 - y1) + y1 } val expLevelAt: (Double, Double, Double) => Double = { (pos, y1, y2) => if (y1 == 0) { if (pos >= 0.5) y2 else y1 } else { y1 * pow(y2 / y1, pos) } } val sinLevelAt: (Double, Double, Double) => Double = { (pos, y1, y2) => y1 + (y2 - y1) * (-cos(Pi * pos) * 0.5 + 0.5) } val exp = (levels.sliding(2).toVector zip lengths zip shapes).flatMap { case ((Seq(start, end), len), shape) => val fun = shape match { case 1 => linLevelAt case 2 => expLevelAt case 3 => sinLevelAt } import numbers.Implicits._ Vector.tabulate(len)(i => fun(i.linLin(0, len, 0.0, 1.0), start, end)) } difOk(res, exp) } }
Sciss/FScape-next
core/jvm/src/test/scala/de/sciss/fscape/DEnvGenSpec.scala
Scala
agpl-3.0
1,592
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.carbondata.streaming.parser import java.nio.charset.Charset import java.text.SimpleDateFormat import org.apache.carbondata.core.constants.CarbonCommonConstants object FieldConverter { /** * Return a String representation of the input value * @param value input value * @param serializationNullFormat string for null value * @param delimiterLevel1 level 1 delimiter for complex type * @param delimiterLevel2 level 2 delimiter for complex type * @param timeStampFormat timestamp format * @param dateFormat date format * @param isVarcharType whether it is varchar type. A varchar type has no string length limit * @param level level for recursive call */ def objectToString( value: Any, serializationNullFormat: String, delimiterLevel1: String, delimiterLevel2: String, timeStampFormat: SimpleDateFormat, dateFormat: SimpleDateFormat, isVarcharType: Boolean = false, level: Int = 1): String = { if (value == null) { serializationNullFormat } else { value match { case s: String => if (!isVarcharType && s.length > CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT) { throw new Exception("Dataload failed, String length cannot exceed " + CarbonCommonConstants.MAX_CHARS_PER_COLUMN_DEFAULT + " characters") } else { s } case d: java.math.BigDecimal => d.toPlainString case i: java.lang.Integer => i.toString case d: java.lang.Double => d.toString case t: java.sql.Timestamp => timeStampFormat format t case d: java.sql.Date => dateFormat format d case b: java.lang.Boolean => b.toString case s: java.lang.Short => s.toString case f: java.lang.Float => f.toString case bs: Array[Byte] => new String(bs, Charset.forName(CarbonCommonConstants.DEFAULT_CHARSET)) case s: scala.collection.Seq[Any] => val delimiter = if (level == 1) { delimiterLevel1 } else { delimiterLevel2 } val builder = new StringBuilder() s.foreach { x => builder.append(objectToString(x, serializationNullFormat, delimiterLevel1, delimiterLevel2, timeStampFormat, dateFormat, isVarcharType, level + 1)) .append(delimiter) } builder.substring(0, builder.length - delimiter.length()) case m: scala.collection.Map[Any, Any] => throw new Exception("Unsupported data type: Map") case r: org.apache.spark.sql.Row => val delimiter = if (level == 1) { delimiterLevel1 } else { delimiterLevel2 } val builder = new StringBuilder() for (i <- 0 until r.length) { builder.append(objectToString(r(i), serializationNullFormat, delimiterLevel1, delimiterLevel2, timeStampFormat, dateFormat, isVarcharType, level + 1)) .append(delimiter) } builder.substring(0, builder.length - delimiter.length()) case other => other.toString } } } }
jatin9896/incubator-carbondata
streaming/src/main/scala/org/apache/carbondata/streaming/parser/FieldConverter.scala
Scala
apache-2.0
4,017
/* * Copyright (c) 2015 Steven Soloff * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package io.github.ssoloff.polyhedra import org.scalatest.{FunSpec, Matchers} final class ExpressionSpec extends FunSpec with Matchers with Dice with EqualsVerifierSugar { val three = new ConstantExpression(3.0) val four = new ConstantExpression(4.0) describe("AdditionExpression") { it("should be equatable") { instancesOf [AdditionExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to sum of augend and addend") { val expression = new AdditionExpression(four, three) val expressionResult = expression.evaluate() expressionResult should equal (new AdditionExpressionResult( new ConstantExpressionResult(four.constant), new ConstantExpressionResult(three.constant) )) } it("should evaluate subexpressions") { val expression = new AdditionExpression(new AdditionExpression(four, three), three) val expressionResult = expression.evaluate() expressionResult.value should equal (10.0) } } } describe("ArrayExpression") { it("should be equatable") { ( instancesOf [ArrayExpression[_]] // scalastyle:ignore no.whitespace.before.left.bracket withPrefabValues( List[Expression[_]](three, four), List[Expression[_]](four, three) ) should be (equatable) ) } describe("#evaluate") { it("should return result with value equal to array of expression result values") { val expression = new ArrayExpression(List(three, four)) val expressionResult = expression.evaluate() expressionResult should equal (new ArrayExpressionResult(List( new ConstantExpressionResult(three.constant), new ConstantExpressionResult(four.constant) ))) } } } describe("ConstantExpression") { it("should be equatable") { instancesOf [ConstantExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to constant") { val constant = 42.0 val expression = new ConstantExpression(constant) val expressionResult = expression.evaluate() expressionResult should equal (new ConstantExpressionResult(constant)) } } } describe("DieExpression") { it("should be equatable") { ( instancesOf [DieExpression] // scalastyle:ignore no.whitespace.before.left.bracket withPrefabValues(createDie(1), createDie(2)) should be (equatable) ) } describe("#evaluate") { it("should return result with value equal to die") { val die = createDie(2) val expression = new DieExpression(die) val expressionResult = expression.evaluate() expressionResult should equal (new DieExpressionResult(die)) } } } describe("DivisionExpression") { it("should be equatable") { instancesOf [DivisionExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to quotient of dividend and divisor") { val expression = new DivisionExpression(three, four) val expressionResult = expression.evaluate() expressionResult should equal (new DivisionExpressionResult( new ConstantExpressionResult(three.constant), new ConstantExpressionResult(four.constant) )) } it("should evaluate subexpressions") { val expression = new DivisionExpression(new DivisionExpression(three, four), four) val expressionResult = expression.evaluate() expressionResult.value should equal (0.1875) } } } describe("FunctionCallExpression") { it("should be equatable") { ( instancesOf [FunctionCallExpression[_, _]] // scalastyle:ignore no.whitespace.before.left.bracket withPrefabValues( List(three, four), List(four, three) ) should be (equatable) ) } describe("#evaluate") { describe("when zero arguments specified") { it("should return result with value equal to function return value") { val name = "f0" val returnValue = 0.0 val expression = new FunctionCallExpression(name, (args: Seq[Any]) => returnValue, Nil) val expressionResult = expression.evaluate() expressionResult should equal (new FunctionCallExpressionResult( returnValue, name, Nil )) } } describe("when one argument specified") { it("should return result with value equal to function return value") { val name = "f1" val expression = new FunctionCallExpression(name, (args: Seq[Double]) => args(0), List(three)) val expressionResult = expression.evaluate() expressionResult should equal (new FunctionCallExpressionResult( three.constant, name, List( new ConstantExpressionResult(three.constant) ) )) } } describe("when two arguments specified") { it("should return result with value equal to function return value") { val name = "f2" val expression = new FunctionCallExpression(name, (args: Seq[Double]) => args(0) + args(1), List(three, four)) val expressionResult = expression.evaluate() expressionResult should equal (new FunctionCallExpressionResult( three.constant + four.constant, name, List( new ConstantExpressionResult(three.constant), new ConstantExpressionResult(four.constant) ) )) } } } } describe("GroupExpression") { it("should be equatable") { instancesOf [GroupExpression[_]] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to child expression result value") { val expression = new GroupExpression(three) val expressionResult = expression.evaluate() expressionResult should equal (new GroupExpressionResult(new ConstantExpressionResult(three.constant))) } } } describe("ModuloExpression") { it("should be equatable") { instancesOf [ModuloExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to remainder of division of dividend and divisor") { val expression = new ModuloExpression(four, three) val expressionResult = expression.evaluate() expressionResult should equal (new ModuloExpressionResult( new ConstantExpressionResult(four.constant), new ConstantExpressionResult(three.constant) )) } it("should evaluate subexpressions") { val expression = new ModuloExpression(new ModuloExpression(three, four), three) val expressionResult = expression.evaluate() expressionResult.value should equal (0.0) } } } describe("MultiplicationExpression") { it("should be equatable") { instancesOf [MultiplicationExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to product of multiplicand and multiplier") { val expression = new MultiplicationExpression(four, three) val expressionResult = expression.evaluate() expressionResult should equal (new MultiplicationExpressionResult( new ConstantExpressionResult(four.constant), new ConstantExpressionResult(three.constant) )) } it("should evaluate subexpressions") { val expression = new MultiplicationExpression(new MultiplicationExpression(four, three), three) val expressionResult = expression.evaluate() expressionResult.value should equal (36.0) } } } describe("NegativeExpression") { it("should be equatable") { instancesOf [NegativeExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to negative of child expression result value") { val expression = new NegativeExpression(three) val expressionResult = expression.evaluate() expressionResult should equal (new NegativeExpressionResult(new ConstantExpressionResult(three.constant))) } } } describe("PositiveExpression") { it("should be equatable") { instancesOf [PositiveExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to child expression result value") { val expression = new PositiveExpression(three) val expressionResult = expression.evaluate() expressionResult should equal (new PositiveExpressionResult(new ConstantExpressionResult(three.constant))) } } } describe("SubtractionExpression") { it("should be equatable") { instancesOf [SubtractionExpression] should be (equatable) // scalastyle:ignore no.whitespace.before.left.bracket } describe("#evaluate") { it("should return result with value equal to difference between minuend and subtrahend") { val expression = new SubtractionExpression(four, three) val expressionResult = expression.evaluate() expressionResult should equal (new SubtractionExpressionResult( new ConstantExpressionResult(four.constant), new ConstantExpressionResult(three.constant) )) } it("should evaluate subexpressions") { val expression = new SubtractionExpression(new SubtractionExpression(four, three), three) val expressionResult = expression.evaluate() expressionResult.value should equal (-2.0) } } } }
ssoloff/polyhedra-jvm
src/test/scala/io/github/ssoloff/polyhedra/ExpressionSpec.scala
Scala
mit
11,475
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming.sources import java.net.{InetSocketAddress, SocketException} import java.nio.ByteBuffer import java.nio.channels.ServerSocketChannel import java.nio.charset.StandardCharsets import java.sql.Timestamp import java.util.concurrent.LinkedBlockingQueue import java.util.concurrent.TimeUnit._ import scala.collection.JavaConverters._ import org.apache.spark.internal.Logging import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.connector.read.streaming.{Offset, SparkDataStream} import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.datasources.v2.StreamingDataSourceV2Relation import org.apache.spark.sql.execution.streaming._ import org.apache.spark.sql.execution.streaming.continuous._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.streaming.{StreamingQueryException, StreamTest} import org.apache.spark.sql.test.SharedSparkSession import org.apache.spark.sql.types._ import org.apache.spark.sql.util.CaseInsensitiveStringMap class TextSocketStreamSuite extends StreamTest with SharedSparkSession { override def afterEach() { sqlContext.streams.active.foreach(_.stop()) if (serverThread != null) { serverThread.interrupt() serverThread.join() serverThread = null } } private var serverThread: ServerThread = null case class AddSocketData(data: String*) extends AddData { override def addData(query: Option[StreamExecution]): (SparkDataStream, Offset) = { require( query.nonEmpty, "Cannot add data when there is no query for finding the active socket source") val sources = query.get.logicalPlan.collect { case r: StreamingDataSourceV2Relation if r.stream.isInstanceOf[TextSocketMicroBatchStream] => r.stream.asInstanceOf[TextSocketMicroBatchStream] } if (sources.isEmpty) { throw new Exception( "Could not find socket source in the StreamExecution logical plan to add data to") } else if (sources.size > 1) { throw new Exception( "Could not select the socket source in the StreamExecution logical plan as there" + "are multiple socket sources:\\n\\t" + sources.mkString("\\n\\t")) } val socketSource = sources.head assert(serverThread != null && serverThread.port != 0) val currOffset = socketSource.getCurrentOffset() data.foreach(serverThread.enqueue) val newOffset = LongOffset(currOffset.offset + data.size) (socketSource, newOffset) } override def toString: String = s"AddSocketData(data = $data)" } test("backward compatibility with old path") { val ds = DataSource.lookupDataSource( "org.apache.spark.sql.execution.streaming.TextSocketSourceProvider", spark.sqlContext.conf).newInstance() assert(ds.isInstanceOf[TextSocketSourceProvider], "Could not find socket source") } test("basic usage") { serverThread = new ServerThread() serverThread.start() withSQLConf(SQLConf.UNSUPPORTED_OPERATION_CHECK_ENABLED.key -> "false") { val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) } } test("timestamped usage") { serverThread = new ServerThread() serverThread.start() withSQLConf(SQLConf.UNSUPPORTED_OPERATION_CHECK_ENABLED.key -> "false") { val socket = spark .readStream .format("socket") .options(Map( "host" -> "localhost", "port" -> serverThread.port.toString, "includeTimestamp" -> "true")) .load() assert(socket.schema === StructType(StructField("value", StringType) :: StructField("timestamp", TimestampType) :: Nil)) var batch1Stamp: Timestamp = null var batch2Stamp: Timestamp = null val curr = System.currentTimeMillis() testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswerRowsByFunc( rows => { assert(rows.size === 1) assert(rows.head.getAs[String](0) === "hello") batch1Stamp = rows.head.getAs[Timestamp](1) Thread.sleep(10) }, true), AddSocketData("world"), CheckAnswerRowsByFunc( rows => { assert(rows.size === 1) assert(rows.head.getAs[String](0) === "world") batch2Stamp = rows.head.getAs[Timestamp](1) }, true), StopStream ) // Timestamp for rate stream is round to second which leads to milliseconds lost, that will // make batch1stamp smaller than current timestamp if both of them are in the same second. // Comparing by second to make sure the correct behavior. assert(batch1Stamp.getTime >= SECONDS.toMillis(MILLISECONDS.toSeconds(curr))) assert(!batch2Stamp.before(batch1Stamp)) } } test("params not given") { val provider = new TextSocketSourceProvider intercept[AnalysisException] { provider.getTable(CaseInsensitiveStringMap.empty()) } intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(Map("host" -> "localhost").asJava)) } intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(Map("port" -> "1234").asJava)) } } test("non-boolean includeTimestamp") { val provider = new TextSocketSourceProvider val params = Map("host" -> "localhost", "port" -> "1234", "includeTimestamp" -> "fasle") intercept[AnalysisException] { provider.getTable(new CaseInsensitiveStringMap(params.asJava)) } } test("user-specified schema given") { val provider = new TextSocketSourceProvider val userSpecifiedSchema = StructType( StructField("name", StringType) :: StructField("area", StringType) :: Nil) val params = Map("host" -> "localhost", "port" -> "1234") val exception = intercept[UnsupportedOperationException] { provider.getTable(new CaseInsensitiveStringMap(params.asJava), userSpecifiedSchema) } assert(exception.getMessage.contains( "TextSocketSourceProvider source does not support user-specified schema")) } test("input row metrics") { serverThread = new ServerThread() serverThread.start() withSQLConf(SQLConf.UNSUPPORTED_OPERATION_CHECK_ENABLED.key -> "false") { val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AssertOnQuery { q => val numRowMetric = q.lastExecution.executedPlan.collectLeaves().head.metrics.get("numOutputRows") numRowMetric.nonEmpty && numRowMetric.get.value == 1 }, StopStream ) } } test("verify ServerThread only accepts the first connection") { serverThread = new ServerThread() serverThread.start() withSQLConf(SQLConf.UNSUPPORTED_OPERATION_CHECK_ENABLED.key -> "false") { val ref = spark import ref.implicits._ val socket = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] assert(socket.schema === StructType(StructField("value", StringType) :: Nil)) testStream(socket)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) // we are trying to connect to the server once again which should fail try { val socket2 = spark .readStream .format("socket") .options(Map("host" -> "localhost", "port" -> serverThread.port.toString)) .load() .as[String] testStream(socket2)( StartStream(), AddSocketData("hello"), CheckAnswer("hello"), AddSocketData("world"), CheckLastBatch("world"), CheckAnswer("hello", "world"), StopStream ) fail("StreamingQueryException is expected!") } catch { case e: StreamingQueryException if e.cause.isInstanceOf[SocketException] => // pass } } } test("continuous data") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = CaseInsensitiveStringMap.empty()) val partitions = stream.planInputPartitions(stream.initialOffset()) assert(partitions.length == 2) val numRecords = 10 val data = scala.collection.mutable.ListBuffer[Int]() val offsets = scala.collection.mutable.ListBuffer[Int]() val readerFactory = stream.createContinuousReaderFactory() import org.scalatest.time.SpanSugar._ failAfter(5.seconds) { // inject rows, read and check the data and offsets for (i <- 0 until numRecords) { serverThread.enqueue(i.toString) } partitions.foreach { case t: TextSocketContinuousInputPartition => val r = readerFactory.createReader(t).asInstanceOf[TextSocketContinuousPartitionReader] for (i <- 0 until numRecords / 2) { r.next() offsets.append(r.getOffset().asInstanceOf[ContinuousRecordPartitionOffset].offset) data.append(r.get().get(0, DataTypes.StringType).asInstanceOf[String].toInt) // commit the offsets in the middle and validate if processing continues if (i == 2) { commitOffset(t.partitionId, i + 1) } } assert(offsets.toSeq == Range.inclusive(1, 5)) assert(data.toSeq == Range(t.partitionId, 10, 2)) offsets.clear() data.clear() case _ => throw new IllegalStateException("Unexpected task type") } assert(stream.startOffset.offsets == List(3, 3)) stream.commit(TextSocketOffset(List(5, 5))) assert(stream.startOffset.offsets == List(5, 5)) } def commitOffset(partition: Int, offset: Int): Unit = { val offsetsToCommit = stream.startOffset.offsets.updated(partition, offset) stream.commit(TextSocketOffset(offsetsToCommit)) assert(stream.startOffset.offsets == offsetsToCommit) } } test("continuous data - invalid commit") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = CaseInsensitiveStringMap.empty()) stream.startOffset = TextSocketOffset(List(5, 5)) assertThrows[IllegalStateException] { stream.commit(TextSocketOffset(List(6, 6))) } } test("continuous data with timestamp") { serverThread = new ServerThread() serverThread.start() val stream = new TextSocketContinuousStream( host = "localhost", port = serverThread.port, numPartitions = 2, options = new CaseInsensitiveStringMap(Map("includeTimestamp" -> "true").asJava)) val partitions = stream.planInputPartitions(stream.initialOffset()) assert(partitions.size == 2) val numRecords = 4 // inject rows, read and check the data and offsets for (i <- 0 until numRecords) { serverThread.enqueue(i.toString) } val readerFactory = stream.createContinuousReaderFactory() partitions.foreach { case t: TextSocketContinuousInputPartition => val r = readerFactory.createReader(t).asInstanceOf[TextSocketContinuousPartitionReader] for (_ <- 0 until numRecords / 2) { r.next() assert(r.get().get(0, TextSocketReader.SCHEMA_TIMESTAMP).isInstanceOf[(_, _)]) } case _ => throw new IllegalStateException("Unexpected task type") } } /** * This class tries to mimic the behavior of netcat, so that we can ensure * TextSocketStream supports netcat, which only accepts the first connection * and exits the process when the first connection is closed. * * Please refer SPARK-24466 for more details. */ private class ServerThread extends Thread with Logging { private val serverSocketChannel = ServerSocketChannel.open() serverSocketChannel.bind(new InetSocketAddress(0)) private val messageQueue = new LinkedBlockingQueue[String]() val port = serverSocketChannel.socket().getLocalPort override def run(): Unit = { try { val clientSocketChannel = serverSocketChannel.accept() // Close server socket channel immediately to mimic the behavior that // only first connection will be made and deny any further connections // Note that the first client socket channel will be available serverSocketChannel.close() clientSocketChannel.configureBlocking(false) clientSocketChannel.socket().setTcpNoDelay(true) while (true) { val line = messageQueue.take() + "\\n" clientSocketChannel.write(ByteBuffer.wrap(line.getBytes(StandardCharsets.UTF_8))) } } catch { case e: InterruptedException => } finally { // no harm to call close() again... serverSocketChannel.close() } } def enqueue(line: String): Unit = { messageQueue.put(line) } } }
bdrillard/spark
sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/sources/TextSocketStreamSuite.scala
Scala
apache-2.0
15,024
package es.codemotion.akkaships.common.domain case class Result(message: String)
miguel0afd/Akkaships
Common/src/main/scala/es/codemotion/akkaships/common/domain/Result.scala
Scala
apache-2.0
83
package mesosphere.marathon package core.appinfo import java.time.{OffsetDateTime, ZoneOffset} import mesosphere.UnitTest import mesosphere.marathon.core.health.Health import mesosphere.marathon.core.instance.Instance.AgentInfo import mesosphere.marathon.core.instance.{Instance, TestInstanceBuilder, TestTaskBuilder} import mesosphere.marathon.state.{PathId, Timestamp, UnreachableStrategy, VersionInfo} import play.api.libs.json.Json import scala.concurrent.duration._ class TaskStatsByVersionTest extends UnitTest { "TaskStatsByVersion" should { "no tasks" in { Given("no tasks") When("calculating stats") val stats = TaskStatsByVersion( now = now, versionInfo = versionInfo, instances = Seq.empty, statuses = Map.empty[Instance.Id, Seq[Health]] ) Then("we get none") stats should be( TaskStatsByVersion( maybeStartedAfterLastScaling = None, maybeWithLatestConfig = None, maybeWithOutdatedConfig = None, maybeTotalSummary = None ) ) } "tasks are correctly split along categories" in { Given("various tasks") val outdatedInstances = Vector( runningInstanceStartedAt(outdatedVersion, 1.seconds), runningInstanceStartedAt(outdatedVersion, 2.seconds) ) val afterLastScalingTasks = Vector( runningInstanceStartedAt(lastScalingAt, 1.seconds), runningInstanceStartedAt(lastScalingAt, 2.seconds) ) val afterLastConfigChangeTasks = Vector( runningInstanceStartedAt(lastConfigChangeAt, 1.seconds), runningInstanceStartedAt(intermediaryScalingAt, 2.seconds) ) ++ afterLastScalingTasks val instances: Seq[Instance] = outdatedInstances ++ afterLastConfigChangeTasks val statuses = Map.empty[Instance.Id, Seq[Health]] When("calculating stats") val stats = TaskStatsByVersion( now = now, versionInfo = versionInfo, instances = instances, statuses = statuses ) Then("we get the correct stats") import mesosphere.marathon.api.v2.json.Formats._ withClue(Json.prettyPrint(Json.obj("stats" -> stats, "tasks" -> instances.map(state.Instance.fromCoreInstance)))) { stats.maybeWithOutdatedConfig should not be empty stats.maybeWithLatestConfig should not be empty stats.maybeStartedAfterLastScaling should not be empty stats.maybeTotalSummary should not be empty stats.maybeWithOutdatedConfig should be(TaskStats.forSomeTasks(now, outdatedInstances, statuses)) stats.maybeWithLatestConfig should be(TaskStats.forSomeTasks(now, afterLastConfigChangeTasks, statuses)) stats.maybeStartedAfterLastScaling should be(TaskStats.forSomeTasks(now, afterLastScalingTasks, statuses)) stats.maybeTotalSummary should be(TaskStats.forSomeTasks(now, instances, statuses)) stats should be( TaskStatsByVersion( maybeStartedAfterLastScaling = TaskStats.forSomeTasks(now, afterLastScalingTasks, statuses), maybeWithLatestConfig = TaskStats.forSomeTasks(now, afterLastConfigChangeTasks, statuses), maybeWithOutdatedConfig = TaskStats.forSomeTasks(now, outdatedInstances, statuses), maybeTotalSummary = TaskStats.forSomeTasks(now, instances, statuses) ) ) } } } private[this] val now: Timestamp = Timestamp(OffsetDateTime.of(2015, 4, 9, 12, 30, 0, 0, ZoneOffset.UTC)) private val lastScalingAt: Timestamp = now - 10.seconds private val intermediaryScalingAt: Timestamp = now - 20.seconds private val lastConfigChangeAt: Timestamp = now - 100.seconds private val outdatedVersion: Timestamp = now - 200.seconds private[this] val versionInfo = VersionInfo.FullVersionInfo( version = lastScalingAt, lastScalingAt = lastScalingAt, lastConfigChangeAt = lastConfigChangeAt ) val appId = PathId("/test") private[this] def newInstanceId(): Instance.Id = Instance.Id.forRunSpec(appId) private[this] def runningInstanceStartedAt(version: Timestamp, startingDelay: FiniteDuration): Instance = { val startedAt = (version + startingDelay).millis val agentInfo = AgentInfo(host = "host", agentId = Some("agent"), region = None, zone = None, attributes = Nil) TestInstanceBuilder.fromTask( TestTaskBuilder.Helper.runningTask(newInstanceId(), appVersion = version, startedAt = startedAt), agentInfo, unreachableStrategy = UnreachableStrategy.default() ) } }
gsantovena/marathon
src/test/scala/mesosphere/marathon/core/appinfo/TaskStatsByVersionTest.scala
Scala
apache-2.0
4,554
package edu.jhu.hlt.probe import scala.collection._ import edu.jhu.hlt.probe.util._ /** * @author Tongfei Chen ([email protected]). * @since 0.4.0 */ class FeatureVector[A] { private val g = mutable.HashMap[String, FeatureGroup[A]]() def +=(fg: FeatureGroup[A]) = { if (g contains fg.name) g(fg.name) = group(fg.name) + fg else g += (fg.name → fg) } def group(name: String) = g.getOrElse(name, FeatureGroup.empty(name)) def groups = g.values def features = groups.flatMap(_.features) def +(that: FeatureVector[A]): FeatureVector[A] = { val res = new FeatureVector[A] this.groups foreach res.+= that.groups foreach res.+= res } def unary_- : FeatureVector[A] = { val res = new FeatureVector[A] for (g ← this.groups.map(-_)) res += g res } def -(that: FeatureVector[A]): FeatureVector[A] = { val res = new FeatureVector[A] for (g ← this.groups) res += g for (g ← that.groups.map(-_)) res += g res } def *(k: Double): FeatureVector[A] = { val res = new FeatureVector[A] this.groups.map(_ * k) foreach res.+= res } def dot(that: FeatureVector[A]): Double = { var sum = 0.0 for (ga ← groups) { val gb = that.group(ga.name) for { (ka, va) ← ga.pairs (kb, vb) ← gb.pairs if ka == kb } sum += va * vb } sum } def l2Norm: Double = { var res = 0.0 for (g ← groups) for ((k, v) ← g.pairs) res += v * v math.sqrt(res) } def l2Normalize = this * (1.0 / l2Norm) def l1Norm: Double = { var res = 0.0 for (g ← groups) for ((k, v) ← g.pairs) res += math.abs(v) res } def maxNorm: Double = { var res = 0.0 for (g ← groups) for ((k, v) ← g.pairs) res = math.max(res, v) res } def cosSimilarity(that: FeatureVector[A]) = (this dot that) / this.l2Norm / that.l2Norm def toStringFeatureVector: StringFeatureVector = { val sfv = new StringFeatureVector groups foreach sfv.<<= sfv } /** Returns the LIBSVM style string representation of this feature vector. */ override def toString = groups.mkString(" ") } object FeatureVector { def apply[A](fgs: FeatureGroup[A]*): FeatureVector[A] = from(fgs) def from[A](fgs: Iterable[FeatureGroup[A]]): FeatureVector[A] = { val fv = new FeatureVector[A] fv.g ++= fgs.map(g => g.name → g) fv } def empty[A] = new FeatureVector[A]() /** Reads a LIBSVM style string representation of a feature vector. * @note The type of feature keys will be obliterated: they will be `String` in the returned vector. */ def parse(s: String): FeatureVector[String] = { val groups = s.split(" ").map { case sm"$fn~$k:$v" => (fn, k, v.toDouble) case sm"$fn:$v" => (fn, "", v.toDouble) }.groupBy(_._1) val fv = new FeatureVector[String] for (g ← groups) { val fg = FeatureGroup(g._1) { g._2.map { case (fn, k, v) => (k, v) } } fv += fg } fv } }
ctongfei/probe
core/src/main/scala/edu/jhu/hlt/probe/FeatureVector.scala
Scala
mit
3,042
package edu.depauw.csc.scala.graphics; import java.awt.Graphics import java.awt.Graphics2D import java.awt.geom.Rectangle2D import java.awt.geom.Area import java.awt.geom.AffineTransform /** Companion object for the square class which handles constructors. @author Cory Boatright, [email protected] @version July 1, 2007 */ object Square { def apply(anX: Double, aY: Double, aL: Double) = new Square(anX, aY, aL) def apply(anX: Double, aY: Double, aL: Double, c: Color) = new Square(anX, aY, aL, c) def apply(anX: Double, aY: Double, aL: Double, c1: Color, c2: Color) = new Square(anX, aY, aL, c1, c2) } /** Candidate square primitive for the graphics component in SCALES. The default constructor will make a black square @author Cory Boatright, [email protected] @version June 15, 2007 @param anX The x-coordinate of the top-left corner of the square @param aY The y-coordinate of the top-left corner of the square @param aL The length of the square's side */ class Square(anX: Double, aY: Double, aL: Double) extends Rectangle2D.Double(anX, aY, aL, aL) with ShapeExt { private var intColor: Color = Color(java.awt.Color.BLACK) private var extColor: Color = Color(java.awt.Color.BLACK) private var orient: AffineTransform = new AffineTransform /** Secondary constructor that allows a different initial color for the square @param anX The x-coordinate of the top-left corner of the square @param aY The y-coordinate of the top-left corner of the square @param aL The length of the square's side @param c The initial color for the square */ def this(anX: Double, aY: Double, aL: Double, c: Color) = { this(anX, aY, aL) intColor = c extColor = c } /** Secondary constructor that allows a different initial fill and outline colors for the square @param anX The x-coordinate of the top-left corner of the square @param aY The y-coordinate of the top-left corner of the square @param aL The length of the square's side @param c1 The initial fill color for the square @param c2 The initial outline color for the square */ def this(anX: Double, aY: Double, aL: Double, c1: Color, c2: Color) = { this(anX, aY, aL) intColor = c1 extColor = c2 } /** Allows for translation along the x- and y-directions @param difx The magnitude and direction to move with respect to the x-axis @param dify The magnitude and direction to move with respect to the y-axis */ def translate(difx: Double, dify: Double): Unit = { x += difx y += dify } /** Allows for expanding or reducing the square by a given ratio @param change The double result of dividing new size by old size */ def scale(change: Double): Unit = { x *= change y *= change width *= change height *= change } /** Allows the changing the square to a different color, fill and outline will be the same @param c The new color to use */ def changeColor(c: Color): Unit = { intColor = c extColor = c } /** Allows the changing of the square to use a different outline and fill color @param newIntColor The new color to use for the fill @param newExtColor The new color to use for the outline */ def changeColors(newIntColor: Color, newExtColor: Color): Unit = { intColor = newIntColor extColor = newExtColor } /** Allows for the changing of the square's outline color only @param c The new color to use for the outline */ def changeExtColor(c: Color): Unit = { extColor = c } /** Allows for the changing of the square's fill color only @param c The new color to use for the fill */ def changeIntColor(c: Color): Unit = { intColor = c } /** Paints the square @param g The graphics context */ def paint(g: Graphics): Unit = { val g2: Graphics2D = g.asInstanceOf[Graphics2D] g2.setTransform(orient) g2.setColor(intColor.dumpColor) g2.fill(this) g2.setColor(extColor.dumpColor) g2.draw(this) } /** Creates a java.awt.geom.Area object for this shape @return The area object constructed from this shape */ def getArea: Area = { if (orient.isIdentity) { //If the affine transformation doesn't rotate the object, don't bother with it new Area(this) } else { var base: Area = new Area(this) base.createTransformedArea(orient) //the area will not be rotated by default } } /** Rotates the shape by a radian amount. @param rad The amount of rotation to do */ def rotate(rad: Double): Unit = { orient.rotate(rad) } /** Rotates the shape when it's in a composite shape @param rad The amount of rotation to do @param anchorX The x-coordinate of the center of the composite @param anchorY The y-coordinate of the center of the composite */ def rotate(rad: Double, anchorX: Double, anchorY: Double): Unit = { orient.rotate(rad, anchorX, anchorY) } /** Creates a composite layering one shape under this one. @param s The shape which will be under this one. @return A composite of this shape as well as the argument shape */ def over(s: ShapeExt): Composite = { new Composite(this, s) } }
bhoward/EscalatorOld
ScalaGraphics/src/edu/depauw/csc/scala/graphics/Square.scala
Scala
apache-2.0
5,411
/******************************************************************************* Copyright (c) 2012-2013, KAIST, S-Core. All rights reserved. Use is subject to license terms. This distribution may include materials developed by third parties. ******************************************************************************/ package kr.ac.kaist.jsaf.tests import _root_.java.util.{List => JList} import _root_.java.io.File import _root_.java.io.FilenameFilter import _root_.java.io.FileNotFoundException import _root_.java.io.PrintStream import _root_.java.util.StringTokenizer import _root_.kr.ac.kaist.jsaf.useful.ArrayBackedList import scala.collection.JavaConversions import junit.framework.Assert._ import junit.framework.Test import junit.framework.TestCase import junit.framework.TestResult import junit.framework.TestSuite import kr.ac.kaist.jsaf.ProjectProperties import kr.ac.kaist.jsaf.Shell import kr.ac.kaist.jsaf.scala_src.useful.Arrays._ import kr.ac.kaist.jsaf.useful.StringMap import kr.ac.kaist.jsaf.useful.Useful import kr.ac.kaist.jsaf.useful.WireTappedPrintStream object FileTests { /* Import Note! * Do not use Scala's println. Use System.out.println. */ def makeTestFileName(name: String) = if (name.endsWith(".js") || name.endsWith(".widl")) name else name + ".js" def join(dir: String, file: String) = if (dir.length == 0) file else dir + "/" + file def directoryAsFile(dirname: String): File = { val dir = new File(dirname) if (!dir.exists) { System.err.println(dirname + " does not exist") throw new FileNotFoundException(dirname) } if (!dir.isDirectory) { System.err.println(dirname + " exists but is not a directory") throw new IllegalArgumentException(dirname) } dir } def compilerSuite(dirname: String, failsOnly: Boolean, expect_failure: Boolean) = { val dir = directoryAsFile(dirname) //System.err.println(dir) //val shuffled = shuffle(new File(dirname).list()) suiteFromListOfFiles(/*shuffled*/ new File(dirname).list(), dirname, dirname, dir.getCanonicalPath, failsOnly, expect_failure) } def suiteFromListOfFiles(shuffledL: JList[String], dir_from_user: String, dir_slashes_normalized: String, dir_canonical: String, failsOnly: Boolean, expect_failure: Boolean): TestSuite = { val shuffled = JavaConversions.asScalaBuffer(new ArrayBackedList(shuffledL)).toArray suiteFromListOfFiles(shuffled, dir_from_user, dir_slashes_normalized, dir_canonical, failsOnly, expect_failure) } def suiteFromListOfFiles(shuffled: Array[String], dir_from_user: String, dir_slashes_normalized: String, dir_canonical: String, failsOnly: Boolean, expect_failure: Boolean): TestSuite = { var dir_name_from_user = dir_from_user var dir_name_slashes_normalized = dir_slashes_normalized var dir_name_canonical = dir_canonical val testCount = Integer.MAX_VALUE var i = testCount val suite = new TestSuite("Runs all tests in " + dir_name_from_user) { override def run(result: TestResult) { super.run(result) } } var commandTests = List[Test]() for (one <- shuffled if i > 0) { var s = one val slashi = s.lastIndexOf('/') if (slashi != -1) { val candidatedir = s.substring(0, slashi) s = s.substring(slashi + 1) dir_name_slashes_normalized = candidatedir dir_name_canonical = directoryAsFile(dir_name_slashes_normalized).getCanonicalPath } var decrement = true val shouldFail = s.startsWith("XXX") if (s.endsWith(".js") || s.endsWith(".widl")) { // do nothing decrement = false } else if (!s.startsWith(".")) { if (s.endsWith(".test")) { // need to define the test of tests. val propFileName = join(dir_name_canonical, s) //var props = new StringMap.FromFileProps(propFileName) val props = new StringMap.ComposedMaps(new StringMap.FromFileProps(propFileName), new StringMap.FromPair("JS_HOME", ProjectProperties.JS_HOME), new StringMap.FromEnv) if (props.isEmpty) throw new java.io.FileNotFoundException("File not found or empty: "+propFileName) val testname = s.substring(0, s.lastIndexOf(".test")) var testNames = props.get("tests") if (testNames == null) testNames = "" else testNames = testNames.trim if (testNames.length > 0) { val st = new StringTokenizer(testNames) while (st.hasMoreTokens) commandTests ++= standardCompilerTests(props, dir_name_canonical, dir_name_slashes_normalized, st.nextToken, expect_failure, shouldFail, failsOnly) } else commandTests ++= standardCompilerTests(props, dir_name_canonical, dir_name_slashes_normalized, testname, expect_failure, shouldFail, failsOnly) } else { System.out.println("Not compiling file " + s) decrement = false } } if (decrement) i -= 1 } if (i <= 0) System.out.println("Early testing exit after " + testCount + " tests") // Do all the larger tests else for (test <- commandTests) suite.addTest(test) suite } def standardCompilerTests(props: StringMap, canonicalDirName: String, dirname: String, testname: String, expect_not_passing: Boolean, shouldFail: Boolean, failsOnly: Boolean): List[Test] = { var cTests = List[Test]() val commands = List("strict","compile","disambiguate","interpret","interpret_mozilla","cfg", "concolic","bug-detector","widlparse","widlcheck") var found = false for (c <- commands) if (props.get(c) != null) { cTests ::= (new CommandTest(c, props, canonicalDirName, dirname, testname, failsOnly, expect_not_passing, shouldFail)) found = true } if (!found) throw new IllegalArgumentException("No supported tests found in " + dirname + "/" + testname) cTests } class CommandTest(command: String, props: StringMap, path: String, d: String, s: String, unexpected_only: Boolean, knownFailure: Boolean, shouldFail: Boolean) extends SourceFileTest(path, d, s, unexpected_only, knownFailure, shouldFail) { override def justTheTest = { val tokens = if (command.equals("widlcheck")) Array[String](command, "-js", join(dir, makeTestFileName(name)), "-db", "tests/widlchecker_tests/webapis.db") else Array[String](command, join(dir, makeTestFileName(name))) /* System.out.print("[[ ") for (s <- tokens) System.out.print(s+ " ") System.out.println(" ]]... Calling... ") */ kr.ac.kaist.jsaf.Shell.params.Clear() kr.ac.kaist.jsaf.Shell.subMain(tokens) } override def tag = command override def testFailed(out: String, err: String, exc: String): String = generalTestFailed(command + "_", props, out, err, exc) } abstract class SourceFileTest(path: String, d: String, s: String, unexpected_only: Boolean, knownFailure: Boolean, shouldFail: Boolean) extends BaseTest(path, d, s, unexpected_only, knownFailure, shouldFail) { def tag(): String def testFile(): Unit = { // Useful when a test is running forever // System.out.println(this.name) // System.out.flush() val checkDuration: Boolean = false if(checkDuration) System.out.println(" " + s + " is being executed...") val oldErr = System.err val oldOut = System.out val wt_err = WireTappedPrintStream. make(new PrintStream(System.err, true, "UTF-8"), unexpected_only) val wt_out = WireTappedPrintStream.make(new PrintStream(System.out, true, "UTF-8"), unexpected_only) System.setErr(wt_err) System.setOut(wt_out) val start = System.nanoTime //val jsFile = f + ".js" var rc = 0 try { try { /* oldOut.print(" " + tag + " ") oldOut.print(f) oldOut.print("\n") oldOut.flush */ //in = Useful.utf8BufferedFileReader(jsFile) rc = justTheTest } finally { System.setErr(oldErr) System.setOut(oldOut) } } catch { case ex:Throwable => val outs = wt_out.getString val errs = wt_err.getString var exFirstLine = ex.toString val trueFailure = testFailed(outs, errs, exFirstLine) if (f.contains("XXX")) { if (trueFailure != null) { unexpectedExceptionBoilerplate(wt_err, wt_out, ex, " Did not satisfy " + trueFailure) return } else { // "Failed", but correctly // !unexpectedOnly || expectFailure wt_err.flush(printSuccess) wt_out.flush(printSuccess) var crLoc = exFirstLine.indexOf("\n") if (crLoc == -1) crLoc = exFirstLine.length exFirstLine = exFirstLine.substring(0, crLoc) if (printSuccess) System.out.println(exFirstLine) //System.out.println(" OK Saw expected exception") return } } else unexpectedExceptionBoilerplate(wt_err, wt_out, ex, " UNEXPECTED exception ") } /* Come here IFF NO EXCEPTIONS, to analyze output */ val outs = wt_out.getString val errs = wt_err.getString val anyFails = (outs.contains("fail") || outs.contains("FAIL") || errs.contains("fail") || errs.contains("FAIL") || rc != 0) && (!f.contains("string-unpack-code")) var trueFailure = testFailed(outs, errs, "") if (shouldFail) { // NOTE expect to see this on STANDARD OUTPUT, not ERROR. if (anyFails && trueFailure == null) { wt_err.flush(printSuccess) wt_out.flush(printSuccess) // Saw a failure, that is good. //System.out.println(" Saw expected failure") } else { if (printFailure) System.out.println wt_err.flush(printFailure) wt_out.flush(printFailure) if (trueFailure != null) { System.out.println(" Saw failure, but did not satisfy " + trueFailure) // Expected exception, saw none. fail("Saw wrong failure.") } else { System.out.println(" Missing expected failure.") // Expected exception, saw none. fail("Expected failure or exception, saw none.") } } } else { // This logic is a little confusing. // Failure is failure. TrueFailure contains the better message. if (anyFails && trueFailure == null) trueFailure = "FAIL or fail should not appear in output" val duration = (System.nanoTime - start) / 1000000 if (trueFailure != null) { if(checkDuration) System.out.println(" " + s + " FAIL (time = " + duration + "ms)") else System.out.println(" FAIL") wt_err.flush(printFailure) wt_out.flush(printFailure) } else { if(checkDuration) System.out.println(" " + s + " OK (time = " + duration + "ms)") wt_err.flush(printSuccess) wt_out.flush(printSuccess) } assertTrue("Must satisfy " + trueFailure, trueFailure == null) } } def unexpectedExceptionBoilerplate(wt_err: WireTappedPrintStream, wt_out: WireTappedPrintStream, ex: Throwable, s: String) = { if (printFailure) System.out.println wt_err.flush(printFailure) wt_out.flush(printFailure) if (printFailure) { System.out.println(s) ex.printStackTrace fail } else { System.out.println(s) fail(ex.getMessage) } } def justTheTest(): Int } class BaseTest(path: String, _dir: String, _name: String, unexpected_only: Boolean, knownFailure: Boolean, shouldFail: Boolean) extends TestCase("testFile") { val dir = _dir val name = _name // Directory-qualified file name val f = join(dir, name) val printSuccess = !unexpected_only || knownFailure val printFailure = !unexpected_only || !knownFailure override def getName = f /** * Returns true if this test should be regarded as a "Failure", * regardless of the XXX test name or not. This can be used to * test that a particular exception was thrown, for example; not only * should (say) XXXbadNumber thrown an exception, it should throw * a NumberFormatException. Thus, the exc string could be tested * to see that it contains "NumberFormatException". */ def testFailed(out: String, err: String, exc: String) = "" /** * Looks for properties of the form * compile/link/run_ * out/err/exceptions_ * contains/matches/WImatches/equals/WCIequals, * returns true if an expected condition fails. */ def generalTestFailed(pfx: String, props: StringMap, out: String, err: String, exc: String): String = { var s = "" if (s == "") s = generalTestFailed(pfx, props, "out", out) if (s == "") s = generalTestFailed(pfx, props, "err", err) if (s == "") s = generalTestFailed(pfx, props, "exception", exc) s } def generalTestFailed(pfx: String, props: StringMap, which: String, contents: String): String = { var any_check = false var what = pfx + which + "_contains" var test = props.get(what) test = ProjectProperties.get(test) if (test != null && test.length > 0) { if (!contents.contains(test)) return what + "; expected\n" + test any_check = true } what = pfx + which + "_does_not_contain" test = props.get(what) test = ProjectProperties.get(test) if (test != null && test.length > 0) { if (contents.contains(test)) return what + "; expected\n" + test any_check = true } what = pfx + which + "_matches" test = props.get(what) test = ProjectProperties.get(test) if (test != null && test.length > 0) { if (!contents.matches(test)) return what + "; expected\n" + test any_check = true } what = pfx + which + "_WImatches" test = props.get(what) test = ProjectProperties.get(test) if (test != null && test.length() > 0) { val wi_contents = contents.replaceAll("\\s+", " ").trim if (!wi_contents.matches(test)) return what + "; expected\n" + test any_check = true } what = pfx + which + "_WCIequals" test = props.get(what) test = props.getCompletely(test) if (test != null && test.length > 0) { val wci_contents = contents.replaceAll("\\s+", " ").trim val wci_test = test.replaceAll("\\s+", " ").trim if (!wci_contents.equals(wci_test)) return what + "; expected\n" + test any_check = true } what = pfx + which + "_equals" test = props.get(what) test = props.getCompletely(test) if (test != null && test.length > 0) { var wi_contents = contents.replaceAll("[ \\\t]+", " ") var wi_test = test.replaceAll("[ \\\t]+", " ") // Convert Windows CRLF to UNIX LF wi_contents = wi_contents.replaceAll("\\\r\\\n", "\n") wi_test = wi_test.replaceAll("\\\r\\\n", "\n") // Convert Mac CR to UNIX LF wi_contents = wi_contents.replaceAll("\\\r", "\n") wi_test = wi_test.replaceAll("\\\r", "\n") if (!wi_contents.equals(wi_test)) { if (wi_contents.trim.equals(wi_test.trim)) { // It is a leading/trailing whitespace problem.... val c0 = wi_contents.charAt(0) val t0 = wi_test.charAt(0) val cN = wi_contents.charAt(wi_contents.length - 1) val tN = wi_test.charAt(wi_test.length - 1) var problem = "" if (c0 == ' ' || c0 == '\n') { if (t0 == ' ' || t0 == '\n') { } else problem += "text began with unexpected whitespace" } else { if (t0 == ' ' || t0 == '\n') problem += "text began without expected whitespace" else {} } val problemAnd = if (problem.length > 0) problem + " and " else problem if (cN == ' ' || cN == '\n') { if (tN == ' ' || tN == '\n') { } else problem = problemAnd + "text ended with unexpected whitespace" } else { if (tN == ' ' || tN == '\n') problem = problemAnd + "text ended without expected whitespace" else {} } return what + ": " + problem } else if (wi_contents.replaceAll("\\s+", " ").trim.equals(wi_test.replaceAll("\\s+", " ").trim)) { val cL = wi_contents.replaceAll("\n[ \t]+", "\n").trim val cT = wi_contents.replaceAll("[ \t]+\n", "\n").trim val tL = wi_test.replaceAll("\n[ \t]+", "\n").trim val tT = wi_test.replaceAll("[ \t]+\n", "\n").trim if (cL.equals(tL)) return what + ": different LEADING whitespace on some line(s)" else if (cT.equals(tT)) return what + ": different TRAILING whitespace on some line(s)" else if (wi_contents.replaceAll("[ \\\t]*\\\n[ \\\t]*", "\n").trim.equals(wi_test.replaceAll( "[ \\\t]*\\\n[ \\\t]*", "\n").trim)) return what + ": different LEADING AND TRAILING whitespace on some line(s)" return what + ": some sort of an internal whitespace problem (linebreaks?)" } return what + "; expected\n" + test } any_check = true } if (!any_check && pfx.equals("run_") && which.equals("out")) { // If there is no check specified on run_out, demand that it // contain "pass" or "PASS". if (!(contents.contains("pass") || contents.contains("PASS"))) return "default check run_out_contains=PASS" } return null } } }
daejunpark/jsaf
src/kr/ac/kaist/jsaf/tests/FileTests.scala
Scala
bsd-3-clause
20,705
/* * Copyright (C) 2016 Lightbend Inc. <http://www.lightbend.com> */ package sample.chirper.load.api import akka.NotUsed import com.lightbend.lagom.javadsl.api.Descriptor import com.lightbend.lagom.javadsl.api.ScalaService._ import com.lightbend.lagom.javadsl.api.Service import com.lightbend.lagom.javadsl.api.ServiceCall import akka.stream.javadsl.Source trait LoadTestService extends Service { /** * Example: src/test/resources/websocket-loadtest.html */ def startLoad(): ServiceCall[NotUsed, Source[String, _]] /** * Example: curl http://localhost:21360/loadHeadless -H * "Content-Type: application/json" -X POST -d '{"users":2000, "friends":5, * "chirps":200000, "clients":20, "parallelism":20}' */ def startLoadHeadless(): ServiceCall[TestParams, NotUsed] override def descriptor(): Descriptor = { // @formatter:off named("/loadtestservice").withCalls( namedCall("/load", startLoad _), pathCall("/loadHeadless", startLoadHeadless _) ) // @formatter:on } }
dotta/activator-lagom-scala-chirper
load-test-api/src/main/scala/sample/chirper/load/api/LoadTestService.scala
Scala
apache-2.0
1,035
package cxx import scala.math._ object Example1 { def main(args: Array[String]) { //val用于声明可变变量 var count = 0 count += 1 println(count) // val用于常量声明,不可改变 val constant = 0 //constant=1会报错 //apply方法可看作()重载 println("Hello"(4)) println("Hello".apply(4)) println(BigInt.apply("1234567890")) println("Hello," + constant) println("Hello".toUpperCase()) println("Hello".head) // 输出 H println("tail :" + "Hello".tail) // 输出 ello println("takeRight(2): " + "Hello".takeRight(2)) //输出 o println("Hello".intersect("World")) // 输出 lo println("Hello".distinct) // 输出 Helo println("countisLower: " + "Hello".count(_.isLower)) // 输出 4 println(1.to(10)) println(1.toString()) println(99.54.toInt) println(sqrt(2)) } }
cxxspark/TEST
Example1.scala
Scala
apache-2.0
882
object Build extends sbt.Build { lazy val root = (Project(id = "cronos", base = file(".")) aggregate(client, common, commonTest, gui, server, test) settings(ScoverageSbtPlugin.instrumentSettings: _*) ) lazy val common = (Project(id = "common", base = file("cronos-common")) settings(ScoverageSbtPlugin.instrumentSettings: _*) ) }
D4RK0studio/cronos
project/Build.scala
Scala
gpl-3.0
357
package org.openurp.edu.eams.teach.web.action.selector import org.beangle.data.jpa.dao.OqlBuilder import org.beangle.commons.lang.Strings import org.openurp.edu.base.code.StdLabel import org.openurp.edu.base.Course import org.openurp.edu.eams.web.action.BaseAction class CourseSelector extends BaseAction { def search(): String = { val pageNo = get("pageNo") val query = OqlBuilder.from(classOf[Course], "course") populateConditions(query) if (Strings.isEmpty(pageNo)) { put("courseList", entityDao.search(query)) } else { query.limit(getPageLimit) put("courseList", entityDao.search(query)) } put("stdTypeList", baseCodeService.getCodes(classOf[StdLabel])) forward("list") } }
openurp/edu-eams-webapp
web/src/main/scala/org/openurp/edu/eams/teach/web/action/selector/CourseSelector.scala
Scala
gpl-3.0
737
package com.twitter.finagle.stats import com.twitter.finagle.benchmark.StdBenchAnnotations import java.util.Random import org.openjdk.jmh.annotations._ import org.openjdk.jmh.infra.Blackhole // ./sbt 'project finagle-benchmark' 'run .*BucketedHistogramBenchmark.*' class BucketedHistogramBenchmark extends StdBenchAnnotations { import BucketedHistogramBenchmark._ @Benchmark @OperationsPerInvocation(N) def add(data: DataState, add: AddState, bh: Blackhole): Int = { var i = 0 while (i < data.datas.length) { bh.consume(add.histogram.add(data.datas(i))) i += 1 } i } @Benchmark def percentiles(state: PercentileState): Array[Long] = { state.histogram.getQuantiles(BucketedHistogramBenchmark.percentiles) } } object BucketedHistogramBenchmark { final val N = 10000 final val percentiles = Array[Double](0.5, 0.9, 0.95, 0.99, 0.999, 0.9999) @State(Scope.Benchmark) class DataState { val datas: Array[Long] = { val rng = new Random(1010101) Array.fill(N) { (rng.nextDouble() * 100000L).toLong } } } @State(Scope.Thread) class AddState { val histogram = BucketedHistogram() } @State(Scope.Thread) class PercentileState { val histogram = BucketedHistogram() @Setup def setup(state: DataState): Unit = { state.datas.foreach(histogram.add) } } }
cogitate/twitter-finagle-uuid
finagle-benchmark/src/main/scala/com/twitter/finagle/stats/BucketedHistogramBenchmark.scala
Scala
apache-2.0
1,368
/* * Copyright 2015 Simin You * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spatialspark.partition.stp import spatialspark.partition.{PartitionConf, PartitionMethod} import spatialspark.util.MBR /** * Created by Simin You on 10/22/14. */ class SortTilePartitionConf (val gridDimX:Int, val gridDimY:Int, val extent:MBR, val ratio:Double, val parallel:Boolean) extends PartitionConf(PartitionMethod.STP) with Serializable{ }
giserh/SpatialSpark
src/main/scala/spatialspark/partition/stp/SortTilePartitionConf.scala
Scala
apache-2.0
982
/* * Copyright 2015 Johan Andrén * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package markatta.futiles import org.scalatest.concurrent.ScalaFutures import org.scalatest.{FunSpec, Matchers} import scala.concurrent.duration._ abstract class Spec extends FunSpec with Matchers with ScalaFutures { implicit val ec = scala.concurrent.ExecutionContext.Implicits.global override implicit def patienceConfig: PatienceConfig = PatienceConfig(1.second, 100.millis) }
johanandren/futiles
src/test/scala/markatta/futiles/Spec.scala
Scala
apache-2.0
1,021
package com.twitter.finagle.thrift.transport.netty3 import org.apache.thrift.transport.TTransport import org.jboss.netty.buffer.ChannelBuffer /** * Adapts a single Netty ChannelBuffer to a Thrift TTransport * * @param underlying a netty channelBuffer * */ private[thrift] class ChannelBufferToTransport(underlying: ChannelBuffer) extends TTransport { override def isOpen: Boolean = true override def open(): Unit = {} override def close(): Unit = {} override def read(buffer: Array[Byte], offset: Int, length: Int): Int = { val bytesToRead = math.min(length, underlying.readableBytes) underlying.readBytes(buffer, offset, bytesToRead) bytesToRead } override def write(buffer: Array[Byte], offset: Int, length: Int): Unit = { underlying.writeBytes(buffer, offset, length) } } /** * Adapts input and output Netty ChannelBuffers to a Thrift TTransport * * @param input a netty channelBuffer to be read from * @param output a netty channelBuffer to write to * */ private[thrift] class DuplexChannelBufferTransport(input: ChannelBuffer, output: ChannelBuffer) extends TTransport { override def isOpen: Boolean = true override def open(): Unit = {} override def close(): Unit = {} override def read(buffer: Array[Byte], offset: Int, length: Int): Int = { val readableBytes = input.readableBytes() val bytesToRead = math.min(length, readableBytes) input.readBytes(buffer, offset, bytesToRead) bytesToRead } override def write(buffer: Array[Byte], offset: Int, length: Int): Unit = { output.writeBytes(buffer, offset, length) } }
BuoyantIO/finagle
finagle-thrift/src/main/scala/com/twitter/finagle/thrift/transport/netty3/ChannelBufferToTransport.scala
Scala
apache-2.0
1,612
/** * Copyright (C) 2010 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.xforms.processor.handlers.xhtml import java.{lang => jl} import org.orbeon.oxf.xforms.analysis.ElementAnalysis import org.orbeon.oxf.xforms.analysis.controls.LHHA import org.orbeon.oxf.xforms.control.{LHHASupport, XFormsSingleNodeControl} import org.orbeon.xforms.XFormsNames import org.orbeon.oxf.xforms.processor.handlers.{HandlerContext, HandlerSupport} import org.xml.sax.Attributes abstract class XFormsGroupHandler( uri : String, localname : String, qName : String, localAtts : Attributes, elementAnalysis : ElementAnalysis, handlerContext : HandlerContext ) extends XFormsControlLifecyleHandler( uri, localname, qName, localAtts, elementAnalysis, handlerContext, repeating = false, forwarding = true ) with HandlerSupport { protected def getLabelClasses(xformsControl: XFormsSingleNodeControl): jl.StringBuilder = { require(LHHASupport.hasLabel(containingDocument, getPrefixedId)) val labelClasses = new jl.StringBuilder("xforms-label") // Handle relevance on label if ((xformsControl eq null) || ((xformsControl ne null) && ! xformsControl.isRelevant)) labelClasses.append(" xforms-disabled") // Copy over existing label classes if any val labelClassAttribute = handlerContext.getPartAnalysis.getLHH(getPrefixedId, LHHA.Label).element.attributeValue(XFormsNames.CLASS_QNAME) if (labelClassAttribute ne null) { labelClasses.append(' ') labelClasses.append(labelClassAttribute) } labelClasses } protected def getLabelValue(xformsControl: XFormsSingleNodeControl): String = if (xformsControl eq null) { // Q: Can this happen? // 2020-11-13: Not 100% sure but haven't seen it yet. Probably safe to remove. null } else xformsControl.getLabel }
orbeon/orbeon-forms
xforms-runtime/shared/src/main/scala/org/orbeon/oxf/xforms/processor/handlers/xhtml/XFormsGroupHandler.scala
Scala
lgpl-2.1
2,522
package parser import java.io._ import tokyocabinet._ import scala.util.Random /** * Generates the results file by pulling data from the Tokyo Cabinet * databases created by other objects */ object ResultsGenerator { import sbinary.DefaultProtocol._ import sbinary.Operations._ import scala.collection.jcl.Conversions._ import helper.FileHelper._ def main(args : Array[String]) : Unit = { val rand = new Random val userDatabase = new HDB userDatabase.open("users.hcb", HDB.OREADER) val repoDatabase = new TDB repoDatabase.open("repos.tcb", TDB.OREADER) val testFile = new File("/Users/andrewg/Downloads/download/test.txt") val resultsFile = new File("/Users/andrewg/Downloads/download/results.txt") val topRepos = getTopRepos(repoDatabase) val br = new BufferedReader(new FileReader(testFile)) val bw = new BufferedWriter(new FileWriter(resultsFile)) testFile.eachLine { line => val repoBytes = userDatabase.get(toByteArray(line)) var alreadyWatchedRepos : List[Int] = Nil if (repoBytes != null) { alreadyWatchedRepos = fromByteArray[List[Int]](repoBytes) } val possibleRepos = topRepos -- alreadyWatchedRepos // the random part at the end was just to create a unique file so GH would // process it and put me on the leaderboard (towards the bottom) val topTen = possibleRepos.slice(0, 10).sort {(a, b) => rand.nextBoolean} bw.write(line + ":" + topTen.mkString(",") + "\\n") println(line + ":" + topTen.mkString(",")) } bw.close userDatabase.close repoDatabase.close } /** * Find the top 50 watched repos in GitHub */ def getTopRepos(repoDatabase : TDB) = { var query = new TDBQRY(repoDatabase) query.setlimit(50, 0) query.setorder("count", TDBQRY.QONUMDESC) val results = query.search.asInstanceOf[java.util.ArrayList[Array[Byte]]] results.map (new String(_).trim.toInt).toList } }
eedrummer/gh-contest
src/parser/ResultsGenerator.scala
Scala
apache-2.0
1,953
package com.github.mdr.mash.utils object Region { def zeroWidth(offset: Int): Region = Region(offset, length = 0) def apply(range: Range): Region = Region(range.start, range.length) def fromStartEnd(start: Int, end: Int) = Region(start, end - start) } case class Region(offset: Int, length: Int) { require(length >= 0, s"Length must not be negative (length = $length, offset = $offset)") require(offset >= 0, s"Offset must not be negative (offset = $offset, length = $length)") def contains(pos: Int): Boolean = offset <= pos && pos < offset + length def contains(that: Region): Boolean = this.contains(that.offset) && (that.isEmpty || this.contains(that.lastPos)) def posAfter = offset + length def lastPos = offset + length - 1 // maybe this should be optional (for 0-length regions) def of(s: String): String = s.substring(offset, offset + length) def of[T](xs: Seq[T]): Seq[T] = xs.slice(offset, offset + length) def translate(n: Int) = copy(offset = offset + n) def replace(s: String, replacement: String) = StringUtils.replace(s, this, replacement) def grow(n: Int) = Region(offset, length + n) def overlaps(that: Region): Boolean = if (this.length == 0) that contains this.offset else if (that.length == 0) this contains that.offset else !(this.lastPos < that.offset || that.lastPos < this.offset) def merge(that: Region): Region = { val offset = this.offset min that.offset val posAfter = this.posAfter max that.posAfter Region(offset, posAfter - offset) } def range = offset until posAfter def isEmpty: Boolean = length == 0 } /** * A region with a highlighted point */ case class PointedRegion(point: Int, region: Region) { def this(point: Int, offset: Int, length: Int) = this(point, Region(offset, length)) def of(s: String): String = region.of(s) def contains(pos: Int) = region contains pos def posAfter = region.posAfter def merge(that: PointedRegion): PointedRegion = copy(region = this.region merge that.region) def movePoint(delta: Int) = copy(point = point + delta) }
mdr/mash
src/main/scala/com/github/mdr/mash/utils/Region.scala
Scala
mit
2,091
package list import list.P12.decode import org.scalatest.{FlatSpec, ShouldMatchers} class P12Spec extends FlatSpec with ShouldMatchers { "decode" must "return an empty list when given an empty list" in { val list = List() decode(list) should be(Nil) } it must "return a list of one element when given a list of tuple of one element" in { val list = List((1, "a")) decode(list) should be(List("a")) } it must "return a list of multiple elements when given a list of multiple tuples" in { val list = List((2, "a"), (4, "c"), (3, "e")) decode(list) should be(List("a", "a", "c", "c", "c", "c", "e", "e", "e")) } }
zjt1114/scala99
src/test/scala/list/P12Spec.scala
Scala
apache-2.0
653
// Copyright (c) 2013-2020 Rob Norris and Contributors // This software is licensed under the MIT License (MIT). // For more information see LICENSE or https://opensource.org/licenses/MIT package doobie.enumerated import java.sql.Types._ import cats.Show import cats.kernel.Order import cats.kernel.instances.int._ /** @group Types */ sealed abstract class JdbcType(val toInt: Int) extends Product with Serializable /** @group Modules */ object JdbcType { /** @group Values */ case object Array extends JdbcType(ARRAY) /** @group Values */ case object BigInt extends JdbcType(BIGINT) /** @group Values */ case object Binary extends JdbcType(BINARY) /** @group Values */ case object Bit extends JdbcType(BIT) /** @group Values */ case object Blob extends JdbcType(BLOB) /** @group Values */ case object Boolean extends JdbcType(BOOLEAN) /** @group Values */ case object Char extends JdbcType(CHAR) /** @group Values */ case object Clob extends JdbcType(CLOB) /** @group Values */ case object DataLink extends JdbcType(DATALINK) /** @group Values */ case object Date extends JdbcType(DATE) /** @group Values */ case object Decimal extends JdbcType(DECIMAL) /** @group Values */ case object Distinct extends JdbcType(DISTINCT) /** @group Values */ case object Double extends JdbcType(DOUBLE) /** @group Values */ case object Float extends JdbcType(FLOAT) /** @group Values */ case object Integer extends JdbcType(INTEGER) /** @group Values */ case object JavaObject extends JdbcType(JAVA_OBJECT) /** @group Values */ case object LongnVarChar extends JdbcType(LONGNVARCHAR) /** @group Values */ case object LongVarBinary extends JdbcType(LONGVARBINARY) /** @group Values */ case object LongVarChar extends JdbcType(LONGVARCHAR) /** @group Values */ case object NChar extends JdbcType(NCHAR) /** @group Values */ case object NClob extends JdbcType(NCLOB) /** @group Values */ case object Null extends JdbcType(NULL) /** @group Values */ case object Numeric extends JdbcType(NUMERIC) /** @group Values */ case object NVarChar extends JdbcType(NVARCHAR) /** @group Values */ case object Other extends JdbcType(OTHER) /** @group Values */ case object Real extends JdbcType(REAL) /** @group Values */ case object Ref extends JdbcType(REF) /** @group Values */ case object RefCursor extends JdbcType(REF_CURSOR) /** @group Values */ case object RowId extends JdbcType(ROWID) /** @group Values */ case object SmallInt extends JdbcType(SMALLINT) /** @group Values */ case object SqlXml extends JdbcType(SQLXML) /** @group Values */ case object Struct extends JdbcType(STRUCT) /** @group Values */ case object Time extends JdbcType(TIME) /** @group Values */ case object TimeWithTimezone extends JdbcType(TIME_WITH_TIMEZONE) /** @group Values */ case object Timestamp extends JdbcType(TIMESTAMP) /** @group Values */ case object TimestampWithTimezone extends JdbcType(TIMESTAMP_WITH_TIMEZONE) /** @group Values */ case object TinyInt extends JdbcType(TINYINT) /** @group Values */ case object VarBinary extends JdbcType(VARBINARY) /** @group Values */ case object VarChar extends JdbcType(VARCHAR) /** @group Values (MS-SQL Specific) */ case object MsSqlDateTimeOffset extends JdbcType(-155) /** @group Values (MS-SQL Specific) */ case object MsSqlVariant extends JdbcType(-150) /** * A catch-all constructor for JDBC type constants outside the specification and known extensions. * @group Values */ final case class Unknown(override val toInt: Int) extends JdbcType(toInt) def fromInt(n:Int): JdbcType = n match { case Array.toInt => Array case BigInt.toInt => BigInt case Binary.toInt => Binary case Bit.toInt => Bit case Blob.toInt => Blob case Boolean.toInt => Boolean case Char.toInt => Char case Clob.toInt => Clob case DataLink.toInt => DataLink case Date.toInt => Date case Decimal.toInt => Decimal case Distinct.toInt => Distinct case Double.toInt => Double case Float.toInt => Float case Integer.toInt => Integer case JavaObject.toInt => JavaObject case LongnVarChar.toInt => LongnVarChar case LongVarBinary.toInt => LongVarBinary case LongVarChar.toInt => LongVarChar case NChar.toInt => NChar case NClob.toInt => NClob case Null.toInt => Null case Numeric.toInt => Numeric case NVarChar.toInt => NVarChar case Other.toInt => Other case Real.toInt => Real case Ref.toInt => Ref case RefCursor.toInt => RefCursor case RowId.toInt => RowId case SmallInt.toInt => SmallInt case SqlXml.toInt => SqlXml case Struct.toInt => Struct case Time.toInt => Time case TimeWithTimezone.toInt => TimeWithTimezone case Timestamp.toInt => Timestamp case TimestampWithTimezone.toInt => TimestampWithTimezone case TinyInt.toInt => TinyInt case VarBinary.toInt => VarBinary case VarChar.toInt => VarChar // MS-SQL Specific values, sigh case MsSqlDateTimeOffset.toInt => MsSqlDateTimeOffset case MsSqlVariant.toInt => MsSqlVariant // Gets a little iffy here. H2 reports NVarChar as -10 rather than -9 ... no idea. It's // definitely not in the spec. So let's just accept it here and call it good. What's the // worst thing that could happen? heh-heh case -10 => NVarChar // In the case of an unknown value we construct a catch-all case n => Unknown(n) } implicit val OrderJdbcType: Order[JdbcType] = Order.by(_.toInt) implicit val ShowJdbcType: Show[JdbcType] = Show.fromToString }
tpolecat/doobie
modules/core/src/main/scala/doobie/enumerated/jdbctype.scala
Scala
mit
6,862
package com.faacets.qalg package algos package impl import scala.{specialized => sp} import spire.algebra._ import spire.math._ import spire.syntax.all._ import algebra._ import syntax.all._ final class DenseMutableRrefImpl[M, @sp(Double, Long) A](implicit val M: MatField[M, A], val MM: MatMut[M, A], pivotA: Pivot[A], eqA: Eq[A]) extends MutableRref[M] { implicit def A: Field[A] = M.scalar def unsafeRref(m: M): RrefDecomposition[M] = { val used = collection.mutable.ArrayBuilder.make[Int] var r = 0 cforRange(0 until m.nCols) { c => if (r < m.nRows) { var pivotPriority = pivotA.priority(m(r, c)) var pivot = r cforRange((r + 1) until m.nRows) { r1 => val r1Priority = pivotA.priority(m(r1, c)) if (r1Priority > pivotPriority) { pivotPriority = r1Priority pivot = r1 } } if (pivotPriority != 0) { // if el is zero, skip the column c used += c // keep track of bound variables // swap current row and pivot row cforRange(c until m.nCols) { c1 => val tmp = m(pivot, c1) m(pivot, c1) = m(r, c1) m(r, c1) = tmp } // normalize pivot row val f = m(r, c) cforRange(c until m.nCols) { c1 => m(r, c1) = m(r, c1) / f } // eliminate current column cforRange(0 until m.nRows) { r1 => if (r1 != r) { val g = m(r1, c) cforRange(c until m.nCols) { c1 => m(r1, c1) = m(r1, c1) - g * m(r, c1) } } } r += 1 } else // set zero terms to exact zero (used for floating point) cforRange(r until m.nRows) { r1 => m(r, c) = A.zero } } } new RrefDecomposition[M] { def reduced = m val basis = used.result } } }
denisrosset/qalg
core/src/main/scala/qalg/algos/impl/DenseMutableRrefImpl.scala
Scala
mit
1,933
package controllers import javax.inject.Inject import play.api.i18n.{I18nSupport, MessagesApi} import play.api.mvc._ /** * Created by akyao on 2015/11/19. */ class MiniC @Inject()(val messagesApi: MessagesApi) extends Controller with I18nSupport { def imageLoad = Action { Ok(views.html.mini.image_load()) } }
akyao/ketsuco
app/controllers/MiniC.scala
Scala
mit
324
package org.dmonix.area51.akka.cluster.extensions import akka.actor.{Actor, ActorLogging, ActorSystem, Props} import akka.cluster.Cluster import akka.cluster.pubsub.DistributedPubSub import akka.cluster.pubsub.DistributedPubSubMediator._ import org.dmonix.area51.akka.cluster.ClusterSettings import org.dmonix.area51.akka.cluster.Messages.{Message, Response} /** * Works as a subscriber in the publish/subscribe pattern provided by the cluster extensions * @author Peter Nerg */ class Subscriber extends Actor with ActorLogging { def receive = { case Message(msg) => log.info(s"Subscriber [$self] Got [$msg] from [$sender]") sender ! Response(s"Response to [$msg]") case SubscribeAck(Subscribe(topic, group, _)) => log.info(s"Subscriber [$self] is now subscribed to topic [$topic] with group [$group]"); case a:Any => log.warning(s"Subscriber [$self] got unexpected message [$a] from [$sender]") } } object SubscriberStarter extends App with ClusterSettings { System.setProperty("config.file", "src/main/resources/akka-cfg/cluster-ext-member-tcp.conf"); val actorSystem = ActorSystem(actorSystemName) val cluster = Cluster(actorSystem) cluster.joinSeedNodes(seedNodes) val mediator = DistributedPubSub(actorSystem).mediator def registerService(serviceName:String): Unit = { val actor = actorSystem.actorOf(Props(new Subscriber), serviceName) //Get different behavior if subscribed with a group or not. // val group = Option("group") val group = None mediator.tell(Subscribe(serviceName, group, actor),actor) //registers the actor to subscribe to the topic with the provided service name } registerService("ServiceA") registerService("ServiceB") }
pnerg/area51-akka
src/main/scala/org/dmonix/area51/akka/cluster/extensions/Subscriber.scala
Scala
apache-2.0
1,738
/* sbt -- Simple Build Tool * Copyright 2011 Mark Harrah */ package sbt import Def.{ showRelativeKey, ScopedKey } import Project.showContextKey import Keys.{ sessionSettings, thisProject } import complete.{ DefaultParsers, Parser } import Aggregation.{ KeyValue, Values } import DefaultParsers._ import Types.idFun import java.net.URI import CommandStrings.{ MultiTaskCommand, ShowCommand } final class ParsedKey(val key: ScopedKey[_], val mask: ScopeMask) object Act { val GlobalString = "*" // this does not take aggregation into account def scopedKey(index: KeyIndex, current: ProjectRef, defaultConfigs: Option[ResolvedReference] => Seq[String], keyMap: Map[String, AttributeKey[_]], data: Settings[Scope]): Parser[ScopedKey[_]] = scopedKeySelected(index, current, defaultConfigs, keyMap, data).map(_.key) // the index should be an aggregated index for proper tab completion def scopedKeyAggregated(current: ProjectRef, defaultConfigs: Option[ResolvedReference] => Seq[String], structure: BuildStructure): KeysParser = for (selected <- scopedKeySelected(structure.index.aggregateKeyIndex, current, defaultConfigs, structure.index.keyMap, structure.data)) yield Aggregation.aggregate(selected.key, selected.mask, structure.extra) def scopedKeySelected(index: KeyIndex, current: ProjectRef, defaultConfigs: Option[ResolvedReference] => Seq[String], keyMap: Map[String, AttributeKey[_]], data: Settings[Scope]): Parser[ParsedKey] = scopedKeyFull(index, current, defaultConfigs, keyMap) flatMap { choices => select(choices, data)(showRelativeKey(current, index.buildURIs.size > 1)) } def scopedKeyFull(index: KeyIndex, current: ProjectRef, defaultConfigs: Option[ResolvedReference] => Seq[String], keyMap: Map[String, AttributeKey[_]]): Parser[Seq[Parser[ParsedKey]]] = { def taskKeyExtra(proj: Option[ResolvedReference], confAmb: ParsedAxis[String], baseMask: ScopeMask): Seq[Parser[ParsedKey]] = for { conf <- configs(confAmb, defaultConfigs, proj, index) } yield for { taskAmb <- taskAxis(conf, index.tasks(proj, conf), keyMap) task = resolveTask(taskAmb) key <- key(index, proj, conf, task, keyMap) extra <- extraAxis(keyMap, IMap.empty) } yield { val mask = baseMask.copy(task = taskAmb.isExplicit, extra = true) new ParsedKey(makeScopedKey(proj, conf, task, extra, key), mask) } for { rawProject <- optProjectRef(index, current) proj = resolveProject(rawProject, current) confAmb <- config(index configs proj) partialMask = ScopeMask(rawProject.isExplicit, confAmb.isExplicit, false, false) } yield taskKeyExtra(proj, confAmb, partialMask) } def makeScopedKey(proj: Option[ResolvedReference], conf: Option[String], task: Option[AttributeKey[_]], extra: ScopeAxis[AttributeMap], key: AttributeKey[_]): ScopedKey[_] = ScopedKey(Scope(toAxis(proj, Global), toAxis(conf map ConfigKey.apply, Global), toAxis(task, Global), extra), key) def select(allKeys: Seq[Parser[ParsedKey]], data: Settings[Scope])(implicit show: Show[ScopedKey[_]]): Parser[ParsedKey] = seq(allKeys) flatMap { ss => val default = ss.headOption match { case None => noValidKeys case Some(x) => success(x) } selectFromValid(ss filter isValid(data), default) } def selectFromValid(ss: Seq[ParsedKey], default: Parser[ParsedKey])(implicit show: Show[ScopedKey[_]]): Parser[ParsedKey] = selectByTask(selectByConfig(ss)) match { case Seq() => default case Seq(single) => success(single) case multi => failure("Ambiguous keys: " + showAmbiguous(keys(multi))) } private[this] def keys(ss: Seq[ParsedKey]): Seq[ScopedKey[_]] = ss.map(_.key) def selectByConfig(ss: Seq[ParsedKey]): Seq[ParsedKey] = ss match { case Seq() => Nil case Seq(x, tail @ _*) => // select the first configuration containing a valid key tail.takeWhile(_.key.scope.config == x.key.scope.config) match { case Seq() => x :: Nil case xs => x +: xs } } def selectByTask(ss: Seq[ParsedKey]): Seq[ParsedKey] = { val (selects, globals) = ss.partition(_.key.scope.task.isSelect) if (globals.nonEmpty) globals else selects } def noValidKeys = failure("No such key.") def showAmbiguous(keys: Seq[ScopedKey[_]])(implicit show: Show[ScopedKey[_]]): String = keys.take(3).map(x => show(x)).mkString("", ", ", if (keys.size > 3) ", ..." else "") def isValid(data: Settings[Scope])(parsed: ParsedKey): Boolean = { val key = parsed.key data.definingScope(key.scope, key.key) == Some(key.scope) } def examples(p: Parser[String], exs: Set[String], label: String): Parser[String] = p !!! ("Expected " + label) examples exs def examplesStrict(p: Parser[String], exs: Set[String], label: String): Parser[String] = filterStrings(examples(p, exs, label), exs, label) def optionalAxis[T](p: Parser[T], ifNone: ScopeAxis[T]): Parser[ScopeAxis[T]] = p.? map { opt => toAxis(opt, ifNone) } def toAxis[T](opt: Option[T], ifNone: ScopeAxis[T]): ScopeAxis[T] = opt match { case Some(t) => Select(t); case None => ifNone } def config(confs: Set[String]): Parser[ParsedAxis[String]] = { val sep = ':' !!! "Expected ':' (if selecting a configuration)" token((GlobalString ^^^ ParsedGlobal | value(examples(ID, confs, "configuration"))) <~ sep) ?? Omitted } def configs(explicit: ParsedAxis[String], defaultConfigs: Option[ResolvedReference] => Seq[String], proj: Option[ResolvedReference], index: KeyIndex): Seq[Option[String]] = explicit match { case Omitted => None +: defaultConfigurations(proj, index, defaultConfigs).flatMap(nonEmptyConfig(index, proj)) case ParsedGlobal => None :: Nil case pv: ParsedValue[x] => Some(pv.value) :: Nil } def defaultConfigurations(proj: Option[ResolvedReference], index: KeyIndex, defaultConfigs: Option[ResolvedReference] => Seq[String]): Seq[String] = if (index exists proj) defaultConfigs(proj) else Nil def nonEmptyConfig(index: KeyIndex, proj: Option[ResolvedReference]): String => Seq[Option[String]] = config => if (index.isEmpty(proj, Some(config))) Nil else Some(config) :: Nil def key(index: KeyIndex, proj: Option[ResolvedReference], conf: Option[String], task: Option[AttributeKey[_]], keyMap: Map[String, AttributeKey[_]]): Parser[AttributeKey[_]] = { def dropHyphenated(keys: Set[String]): Set[String] = keys.filterNot(Util.hasHyphen) def keyParser(keys: Set[String]): Parser[AttributeKey[_]] = token(ID !!! "Expected key" examples dropHyphenated(keys)) flatMap { keyString => getKey(keyMap, keyString, idFun) } // Fixes sbt/sbt#2460 and sbt/sbt#2851 // The parser already accepts build-level keys. // This queries the key index so tab completion will list the build-level keys. val buildKeys: Set[String] = proj match { case Some(ProjectRef(uri, id)) => index.keys(Some(BuildRef(uri)), conf, task) case _ => Set() } val keys: Set[String] = index.keys(proj, conf, task) ++ buildKeys keyParser(keys) } def getKey[T](keyMap: Map[String, AttributeKey[_]], keyString: String, f: AttributeKey[_] => T): Parser[T] = keyMap.get(keyString) match { case Some(k) => success(f(k)) case None => failure(Command.invalidValue("key", keyMap.keys)(keyString)) } val spacedComma = token(OptSpace ~ ',' ~ OptSpace) def extraAxis(knownKeys: Map[String, AttributeKey[_]], knownValues: IMap[AttributeKey, Set]): Parser[ScopeAxis[AttributeMap]] = { val extrasP = extrasParser(knownKeys, knownValues) val extras = token('(', hide = _ == 1 && knownValues.isEmpty) ~> extrasP <~ token(')') optionalAxis(extras, Global) } def taskAxis(d: Option[String], tasks: Set[AttributeKey[_]], allKnown: Map[String, AttributeKey[_]]): Parser[ParsedAxis[AttributeKey[_]]] = { val taskSeq = tasks.toSeq def taskKeys(f: AttributeKey[_] => String): Seq[(String, AttributeKey[_])] = taskSeq.map(key => (f(key), key)) val normKeys = taskKeys(_.label) val valid = allKnown ++ normKeys ++ taskKeys(_.rawLabel) val suggested = normKeys.map(_._1).toSet val keyP = filterStrings(examples(ID, suggested, "key"), valid.keySet, "key") map valid (token(value(keyP) | GlobalString ^^^ ParsedGlobal) <~ token("::".id)) ?? Omitted } def resolveTask(task: ParsedAxis[AttributeKey[_]]): Option[AttributeKey[_]] = task match { case ParsedGlobal | Omitted => None case t: ParsedValue[AttributeKey[_]] @unchecked => Some(t.value) } def filterStrings(base: Parser[String], valid: Set[String], label: String): Parser[String] = base.filter(valid, Command.invalidValue(label, valid)) def extrasParser(knownKeys: Map[String, AttributeKey[_]], knownValues: IMap[AttributeKey, Set]): Parser[AttributeMap] = { val validKeys = knownKeys.filter { case (_, key) => knownValues get key exists (_.nonEmpty) } if (validKeys.isEmpty) failure("No valid extra keys.") else rep1sep(extraParser(validKeys, knownValues), spacedComma) map AttributeMap.apply } def extraParser(knownKeys: Map[String, AttributeKey[_]], knownValues: IMap[AttributeKey, Set]): Parser[AttributeEntry[_]] = { val keyp = knownIDParser(knownKeys, "Not a valid extra key") <~ token(':' ~ OptSpace) keyp flatMap { case key: AttributeKey[t] => val valueMap: Map[String, t] = knownValues(key).map(v => (v.toString, v)).toMap knownIDParser(valueMap, "extra value") map { value => AttributeEntry(key, value) } } } def knownIDParser[T](knownKeys: Map[String, T], label: String): Parser[T] = token(examplesStrict(ID, knownKeys.keys.toSet, label)) map knownKeys def knownPluginParser[T](knownPlugins: Map[String, T], label: String): Parser[T] = { val pluginLabelParser = rep1sep(ID, '.').map(_.mkString(".")) token(examplesStrict(pluginLabelParser, knownPlugins.keys.toSet, label)) map knownPlugins } def projectRef(index: KeyIndex, currentBuild: URI): Parser[ParsedAxis[ResolvedReference]] = { val global = token(GlobalString ~ '/') ^^^ ParsedGlobal val trailing = '/' !!! "Expected '/' (if selecting a project)" global | value(resolvedReference(index, currentBuild, trailing)) } def resolvedReference(index: KeyIndex, currentBuild: URI, trailing: Parser[_]): Parser[ResolvedReference] = { def projectID(uri: URI) = token(examplesStrict(ID, index projects uri, "project ID") <~ trailing) def projectRef(uri: URI) = projectID(uri) map { id => ProjectRef(uri, id) } val uris = index.buildURIs val resolvedURI = Uri(uris).map(uri => Scope.resolveBuild(currentBuild, uri)) val buildRef = token('{' ~> resolvedURI <~ '}').? buildRef flatMap { case None => projectRef(currentBuild) case Some(uri) => projectRef(uri) | token(trailing ^^^ BuildRef(uri)) } } def optProjectRef(index: KeyIndex, current: ProjectRef): Parser[ParsedAxis[ResolvedReference]] = projectRef(index, current.build) ?? Omitted def resolveProject(parsed: ParsedAxis[ResolvedReference], current: ProjectRef): Option[ResolvedReference] = parsed match { case Omitted => Some(current) case ParsedGlobal => None case pv: ParsedValue[rr] => Some(pv.value) } def actParser(s: State): Parser[() => State] = requireSession(s, actParser0(s)) private[this] def actParser0(state: State): Parser[() => State] = { val extracted = Project extract state import extracted.{ showKey, structure } import Aggregation.evaluatingParser actionParser.flatMap { action => val akp = aggregatedKeyParser(extracted) def evaluate(kvs: Seq[ScopedKey[_]]): Parser[() => State] = { val preparedPairs = anyKeyValues(structure, kvs) val showConfig = Aggregation.defaultShow(state, showTasks = action == ShowAction) evaluatingParser(state, structure, showConfig)(preparedPairs) map { evaluate => () => { val keyStrings = preparedPairs.map(pp => showKey(pp.key)).mkString(", ") state.log.debug("Evaluating tasks: " + keyStrings) evaluate() } } } action match { case SingleAction => akp flatMap evaluate case ShowAction | MultiAction => rep1sep(akp, token(Space)).flatMap(kvss => evaluate(kvss.flatten)) } } } private[this] final class ActAction private[this] final val ShowAction, MultiAction, SingleAction = new ActAction private[this] def actionParser: Parser[ActAction] = token( ((ShowCommand ^^^ ShowAction) | (MultiTaskCommand ^^^ MultiAction)) <~ Space ) ?? SingleAction @deprecated("No longer used.", "0.13.2") def showParser = token((ShowCommand ~ Space) ^^^ true) ?? false def scopedKeyParser(state: State): Parser[ScopedKey[_]] = scopedKeyParser(Project extract state) def scopedKeyParser(extracted: Extracted): Parser[ScopedKey[_]] = scopedKeyParser(extracted.structure, extracted.currentRef) def scopedKeyParser(structure: BuildStructure, currentRef: ProjectRef): Parser[ScopedKey[_]] = scopedKey(structure.index.keyIndex, currentRef, structure.extra.configurationsForAxis, structure.index.keyMap, structure.data) type KeysParser = Parser[Seq[ScopedKey[T]] forSome { type T }] def aggregatedKeyParser(state: State): KeysParser = aggregatedKeyParser(Project extract state) def aggregatedKeyParser(extracted: Extracted): KeysParser = aggregatedKeyParser(extracted.structure, extracted.currentRef) def aggregatedKeyParser(structure: BuildStructure, currentRef: ProjectRef): KeysParser = scopedKeyAggregated(currentRef, structure.extra.configurationsForAxis, structure) def keyValues[T](state: State)(keys: Seq[ScopedKey[T]]): Values[T] = keyValues(Project extract state)(keys) def keyValues[T](extracted: Extracted)(keys: Seq[ScopedKey[T]]): Values[T] = keyValues(extracted.structure)(keys) def keyValues[T](structure: BuildStructure)(keys: Seq[ScopedKey[T]]): Values[T] = keys.flatMap { key => getValue(structure.data, key.scope, key.key) map { value => KeyValue(key, value) } } private[this] def anyKeyValues(structure: BuildStructure, keys: Seq[ScopedKey[_]]): Seq[KeyValue[_]] = keys.flatMap { key => getValue(structure.data, key.scope, key.key) map { value => KeyValue(key, value) } } private[this] def getValue[T](data: Settings[Scope], scope: Scope, key: AttributeKey[T]): Option[T] = if (java.lang.Boolean.getBoolean("sbt.cli.nodelegation")) data.getDirect(scope, key) else data.get(scope, key) def requireSession[T](s: State, p: => Parser[T]): Parser[T] = if (s get sessionSettings isEmpty) failure("No project loaded") else p sealed trait ParsedAxis[+T] { final def isExplicit = this != Omitted } final object ParsedGlobal extends ParsedAxis[Nothing] final object Omitted extends ParsedAxis[Nothing] final class ParsedValue[T](val value: T) extends ParsedAxis[T] def value[T](t: Parser[T]): Parser[ParsedAxis[T]] = t map { v => new ParsedValue(v) } }
som-snytt/xsbt
main/src/main/scala/sbt/Act.scala
Scala
bsd-3-clause
15,503
package org.orbeon.oxf.fr import cats.syntax.option._ import io.circe.generic.auto._ import io.circe.syntax._ import org.orbeon.io.{CharsetNames, IOUtils} import org.orbeon.oxf.externalcontext.URLRewriter import org.orbeon.oxf.fr.Names.{FormInstance, FormResources, MetadataInstance} import org.orbeon.oxf.fr.library.FRComponentParamSupport import org.orbeon.oxf.fr.persistence.relational.Version.OrbeonFormDefinitionVersion import org.orbeon.oxf.http.HttpMethod import org.orbeon.oxf.http.HttpMethod.GET import org.orbeon.oxf.pipeline.api.PipelineContext import org.orbeon.oxf.processor.{ProcessorImpl, ProcessorOutput} import org.orbeon.oxf.util.CollectionUtils._ import org.orbeon.oxf.util.Logging._ import org.orbeon.oxf.util._ import org.orbeon.oxf.util.StringUtils._ import org.orbeon.oxf.xforms.analysis.model.{Instance, Submission} import org.orbeon.oxf.xforms.model.XFormsInstanceSupport import org.orbeon.oxf.xforms.processor.XFormsCompiler import org.orbeon.oxf.xml.XMLReceiver import org.orbeon.scaxon.SimplePath._ import org.orbeon.xforms.ManifestEntry import java.net.URI import java.util.zip.{ZipEntry, ZipOutputStream} import scala.util.{Failure, Success} class FormRunnerCompiler extends ProcessorImpl { private val Logger = LoggerFactory.createLogger(classOf[FormRunnerCompiler]) private implicit val indentedLogger: IndentedLogger = new IndentedLogger(Logger) override def createOutput(outputName: String): ProcessorOutput = addOutput( outputName, new ProcessorOutputImpl(FormRunnerCompiler.this, outputName) { def readImpl(pipelineContext: PipelineContext, xmlReceiver: XMLReceiver): Unit = { implicit val rcv = xmlReceiver implicit val ec = CoreCrossPlatformSupport.externalContext implicit val coreCrossPlatformSupport = CoreCrossPlatformSupport val params = readCacheInputAsOrbeonDom(pipelineContext, "instance") val formDocument = readCacheInputAsOrbeonDom(pipelineContext, "data") val appName = params.getRootElement.element("app").getText val formName = params.getRootElement.element("form").getText val formVersion = params.getRootElement.element("form-version").getText.trimAllToOpt.getOrElse("1") val (jsonString, staticState) = XFormsCompiler.compile(formDocument) val cacheableResourcesToIncludeManifests = { // Find all the languages used and return a comma-separated list of them val usedLangsOpt = staticState.topLevelPart.findControlAnalysis(FormResources) collect { case instance: Instance => instance.inlineRootElemOpt.get.elementIterator() map (_.attributeValue(XMLNames.XMLLangQName)) mkString "," } val urlOptIt = staticState.topLevelPart.iterateControls collect { case instance: Instance if instance.readonly && instance.cache && instance.dependencyURL.isDefined => instance.dependencyURL case submission: Submission if submission.avtXxfReadonlyOpt.contains("true") && submission.avtXxfCacheOpt.contains("true") && submission.avtMethod.exists(s => HttpMethod.withNameInsensitiveOption(s).contains(HttpMethod.GET)) => if (submission.avtActionOrResource == "/fr/service/i18n/fr-resources/{$app}/{$form}") PathUtils.recombineQuery("/fr/service/i18n/fr-resources/orbeon/offline", usedLangsOpt.map("langs" ->)).some else submission.avtActionOrResource.some } val entriesIt = urlOptIt.flatten map { uri => ManifestEntry(new URI(uri), ContentTypes.XmlContentType.some) } entriesIt.toList } val (orbeonLibraryVersionOpt, appLibraryVersionOpt) = { val libraryVersionsOpt = staticState.topLevelPart.findControlAnalysis(MetadataInstance) collect { case instance: Instance => instance.constantContent } collect { case Some(metadataRoot) => FRComponentParamSupport.findLibraryVersions(metadataRoot.rootElement) } libraryVersionsOpt match { case Some(versions) => versions case None => (None, None) } } // This also looks for attachments in datasets. Usually, dataset instances are not available // in the form definition. However, it is possible to manually create a form definition with // an embedded dataset. We might even provide facilities to do this in the future. And if that // is the case, we allow datasets referring to attachments, in which case we collect those as // well in addition to attachments referred to by the main form data. val formDataAndDatasetInstanceAttachmentManifests = { val manifestsIt = staticState.topLevelPart.iterateModels flatMap (_.instances.valuesIterator) collect { case i if i.staticId == FormInstance => i case i if i.staticId.startsWith("fr-dataset-") => i } map { instance => val basePaths = List( FormRunner.createFormDataBasePath(AppForm.FormBuilder.app, AppForm.FormBuilder.form, isDraft = false, documentIdOrEmpty = ""), FormRunner.createFormDefinitionBasePath(appName, formName), FormRunner.createFormDefinitionBasePath(appName, Names.LibraryFormName), FormRunner.createFormDefinitionBasePath(Names.GlobalLibraryAppName, Names.LibraryFormName) ) val dataDoc = XFormsInstanceSupport.extractDocument( instance.inlineRootElemOpt.get, // FIXME: `get` instance.excludeResultPrefixes, instance.readonly, instance.exposeXPathTypes, removeInstanceData = false ) FormRunner.collectAttachments( data = dataDoc, fromBasePaths = basePaths, toBasePath = "/dummy", // TODO: `Option`? forceAttachments = true ) map { case FormRunner.AttachmentWithHolder(fromPath, _, holder) => ManifestEntry(new URI(fromPath), holder.attValueOpt("mediatype").flatMap(_.trimAllToOpt)) } } manifestsIt.flatten.toList } // NOTE: Some resources could be the same but with different of missing `contentType`. We must ensure that // the zip paths are unique, so we keep distinct resources based on that criteria. val distinctResources = (formDataAndDatasetInstanceAttachmentManifests ::: cacheableResourcesToIncludeManifests).keepDistinctBy(_.zipPath) val useZipFormat = ec.getRequest.getFirstParamAsString("format").contains("zip") if (useZipFormat) { val chos = new ContentHandlerOutputStream(xmlReceiver, true) val zos = new ZipOutputStream(chos) chos.setContentType("application/zip") val jsonFormPath = s"_forms/$appName/$formName/$formVersion/form/form.json" // Generate and write manifest locally { val manifest = ( ManifestEntry(jsonFormPath, jsonFormPath, ContentTypes.XmlContentType.some) :: distinctResources ).asJson.noSpaces val entry = new ZipEntry(ManifestEntry.JsonFilename) zos.putNextEntry(entry) zos.write(manifest.getBytes(CharsetNames.Utf8)) } // Write form JSON locally { val entry = new ZipEntry(jsonFormPath) zos.putNextEntry(entry) zos.write(jsonString.getBytes(CharsetNames.Utf8)) } def connect(path: String): ConnectionResult = { val resolvedUri = new URI( URLRewriterUtils.rewriteServiceURL( ec.getRequest, path, URLRewriter.REWRITE_MODE_ABSOLUTE ) ) import FormRunnerPersistence._ val attachmentVersionOpt = path match { case FormPath(_, Names.GlobalLibraryAppName, Names.LibraryFormName, _) => orbeonLibraryVersionOpt case FormPath(_, _, Names.LibraryFormName, _) => appLibraryVersionOpt case DataPath(_, AppForm.FormBuilder.app, AppForm.FormBuilder.form, "data", _, _) => formVersion.toInt.some case _ => None } val allHeaders = Connection.buildConnectionHeadersCapitalizedIfNeeded( url = resolvedUri, hasCredentials = false, customHeaders = Map(attachmentVersionOpt.toList map (v => OrbeonFormDefinitionVersion -> List(v.toString)): _*), headersToForward = Set.empty, cookiesToForward = Connection.cookiesToForwardFromProperty, getHeader = Connection.getHeaderFromRequest(ec.getRequest) ) Connection.connectNow( method = GET, url = resolvedUri, credentials = None, content = None, headers = allHeaders, loadState = true, saveState = true, logBody = false ) } // Write static attachments and other resources distinctResources.iterator foreach { manifestEntry => val entryResult = ConnectionResult.tryWithSuccessConnection(connect(manifestEntry.uri), closeOnSuccess = true) { is => val entry = new ZipEntry(manifestEntry.zipPath) zos.putNextEntry(entry) IOUtils.copyStreamAndClose(is, zos, doCloseOut = false) } entryResult match { case Success(_) => debug(s"success retrieving attachment when compiling form `${manifestEntry.uri}`") case Failure(_) => warn (s"failure retrieving attachment when compiling form `${manifestEntry.uri}`") } } zos.close() } else { XFormsCompiler.outputJson(jsonString) } } } ) }
orbeon/orbeon-forms
form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/FormRunnerCompiler.scala
Scala
lgpl-2.1
11,098
/*********************************************************************** * Copyright (c) 2013-2016 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.accumulo.data.tables import java.nio.charset.StandardCharsets import com.google.common.collect.{ImmutableSet, ImmutableSortedSet} import com.google.common.primitives.{Bytes, Longs} import com.vividsolutions.jts.geom.{Geometry, GeometryCollection, LineString, Point} import org.apache.accumulo.core.client.admin.TableOperations import org.apache.accumulo.core.conf.Property import org.apache.accumulo.core.data.{Mutation, Value} import org.apache.hadoop.io.Text import org.locationtech.geomesa.accumulo.data.AccumuloFeatureWriter.{FeatureToMutations, FeatureToWrite} import org.locationtech.geomesa.accumulo.data._ import org.locationtech.geomesa.curve.Z2SFC import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType import org.locationtech.geomesa.utils.index.VisibilityLevel import org.locationtech.sfcurve.zorder.Z2 import org.locationtech.sfcurve.zorder.Z3.ZPrefix import org.opengis.feature.simple.SimpleFeatureType object Z2Table extends GeoMesaTable { val FULL_CF = new Text("F") val BIN_CF = new Text("B") val EMPTY_BYTES = Array.empty[Byte] val EMPTY_VALUE = new Value(EMPTY_BYTES) val NUM_SPLITS = 4 // can't be more than Byte.MaxValue (127) val SPLIT_ARRAYS = (0 until NUM_SPLITS).map(_.toByte).toArray.map(Array(_)).toSeq // the bytes of z we keep for complex geoms // 3 bytes is 22 bits of geometry (not including the first 2 bits which aren't used) // roughly equivalent to 4 digits of geohash (32^4 == 2^20) and ~20km resolution val GEOM_Z_NUM_BYTES = 3 // mask for zeroing the last (8 - GEOM_Z_NUM_BYTES) bytes val GEOM_Z_MASK: Long = Long.MaxValue << (64 - 8 * GEOM_Z_NUM_BYTES) override def supports(sft: SimpleFeatureType): Boolean = sft.getGeometryDescriptor != null && sft.getSchemaVersion > 7 override val suffix: String = "z2" override def writer(sft: SimpleFeatureType): FeatureToMutations = { val sharing = sharingPrefix(sft) val getRowKeys: (FeatureToWrite) => Seq[Array[Byte]] = if (sft.isPoints) getPointRowKey(sharing) else getGeomRowKeys(sharing) sft.getVisibilityLevel match { case VisibilityLevel.Feature => (fw: FeatureToWrite) => { val rows = getRowKeys(fw) // store the duplication factor in the column qualifier for later use val cq = if (rows.length > 1) new Text(Integer.toHexString(rows.length)) else EMPTY_TEXT rows.map { row => val mutation = new Mutation(row) mutation.put(FULL_CF, cq, fw.columnVisibility, fw.dataValue) fw.binValue.foreach(v => mutation.put(BIN_CF, cq, fw.columnVisibility, v)) mutation } } case VisibilityLevel.Attribute => (fw: FeatureToWrite) => { val rows = getRowKeys(fw) // TODO GEOMESA-1254 duplication factor, bin values rows.map { row => val mutation = new Mutation(row) fw.perAttributeValues.foreach(key => mutation.put(key.cf, key.cq, key.vis, key.value)) mutation } } } } override def remover(sft: SimpleFeatureType): FeatureToMutations = { val sharing = sharingPrefix(sft) val getRowKeys: (FeatureToWrite) => Seq[Array[Byte]] = if (sft.isPoints) getPointRowKey(sharing) else getGeomRowKeys(sharing) sft.getVisibilityLevel match { case VisibilityLevel.Feature => (fw: FeatureToWrite) => { val rows = getRowKeys(fw) val cq = if (rows.length > 1) new Text(Integer.toHexString(rows.length)) else EMPTY_TEXT rows.map { row => val mutation = new Mutation(row) mutation.putDelete(BIN_CF, cq, fw.columnVisibility) mutation.putDelete(FULL_CF, cq, fw.columnVisibility) mutation } } case VisibilityLevel.Attribute => (fw: FeatureToWrite) => { val rows = getRowKeys(fw) // TODO GEOMESA-1254 duplication factor, bin values rows.map { row => val mutation = new Mutation(row) fw.perAttributeValues.foreach(key => mutation.putDelete(key.cf, key.cq, key.vis)) mutation } } } } override def getIdFromRow(sft: SimpleFeatureType): (Array[Byte]) => String = { val offset = getIdRowOffset(sft) (row: Array[Byte]) => new String(row, offset, row.length - offset, StandardCharsets.UTF_8) } // split(1 byte), z value (8 bytes), id (n bytes) private def getPointRowKey(tableSharing: Array[Byte])(ftw: FeatureToWrite): Seq[Array[Byte]] = { import org.locationtech.geomesa.utils.geotools.Conversions.RichSimpleFeature val split = SPLIT_ARRAYS(ftw.idHash % NUM_SPLITS) val id = ftw.feature.getID.getBytes(StandardCharsets.UTF_8) val pt = ftw.feature.point val z = Z2SFC.index(pt.getX, pt.getY).z Seq(Bytes.concat(tableSharing, split, Longs.toByteArray(z), id)) } // split(1 byte), z value (3 bytes), id (n bytes) private def getGeomRowKeys(tableSharing: Array[Byte])(ftw: FeatureToWrite): Seq[Array[Byte]] = { val split = SPLIT_ARRAYS(ftw.idHash % NUM_SPLITS) val geom = ftw.feature.getDefaultGeometry.asInstanceOf[Geometry] val zs = zBox(geom) val id = ftw.feature.getID.getBytes(StandardCharsets.UTF_8) zs.map(z => Bytes.concat(tableSharing, split, Longs.toByteArray(z).take(GEOM_Z_NUM_BYTES), id)).toSeq } // gets a sequence of z values that cover the geometry private def zBox(geom: Geometry): Set[Long] = geom match { case g: Point => Set(Z2SFC.index(g.getX, g.getY).z) case g: LineString => // we flatMap bounds for each line segment so we cover a smaller area (0 until g.getNumPoints).map(g.getPointN).sliding(2).flatMap { case Seq(one, two) => val (xmin, xmax) = minMax(one.getX, two.getX) val (ymin, ymax) = minMax(one.getY, two.getY) zBox(xmin, ymin, xmax, ymax) }.toSet case g: GeometryCollection => (0 until g.getNumGeometries).toSet.map(g.getGeometryN).flatMap(zBox) case g: Geometry => val env = g.getEnvelopeInternal zBox(env.getMinX, env.getMinY, env.getMaxX, env.getMaxY) } // gets a sequence of z values that cover the bounding box private def zBox(xmin: Double, ymin: Double, xmax: Double, ymax: Double): Set[Long] = { val zmin = Z2SFC.index(xmin, ymin).z val zmax = Z2SFC.index(xmax, ymax).z getZPrefixes(zmin, zmax) } private def minMax(a: Double, b: Double): (Double, Double) = if (a < b) (a, b) else (b, a) // gets z values that cover the interval private def getZPrefixes(zmin: Long, zmax: Long): Set[Long] = { val in = scala.collection.mutable.Queue((zmin, zmax)) val out = scala.collection.mutable.HashSet.empty[Long] while (in.nonEmpty) { val (min, max) = in.dequeue() val ZPrefix(zprefix, zbits) = Z2.longestCommonPrefix(min, max) if (zbits < GEOM_Z_NUM_BYTES * 8) { // divide the range into two smaller ones using tropf litmax/bigmin val (litmax, bigmin) = Z2.zdivide(Z2((min + max) / 2), Z2(min), Z2(max)) in.enqueue((min, litmax.z), (bigmin.z, max)) } else { // we've found a prefix that contains our z range // truncate down to the bytes we use so we don't get dupes out.add(zprefix & GEOM_Z_MASK) } } out.toSet } private def sharingPrefix(sft: SimpleFeatureType): Array[Byte] = { val sharing = if (sft.isTableSharing) { sft.getTableSharingPrefix.getBytes(StandardCharsets.UTF_8) } else { Array.empty[Byte] } require(sharing.length < 2, s"Expecting only a single byte for table sharing, got ${sft.getTableSharingPrefix}") sharing } // gets the offset into the row for the id bytes def getIdRowOffset(sft: SimpleFeatureType): Int = { val length = if (sft.isPoints) 8 else GEOM_Z_NUM_BYTES val prefix = if (sft.isTableSharing) 2 else 1 // shard + table sharing prefix + length } override def configureTable(sft: SimpleFeatureType, table: String, tableOps: TableOperations): Unit = { import scala.collection.JavaConversions._ tableOps.setProperty(table, Property.TABLE_BLOCKCACHE_ENABLED.getKey, "true") val localityGroups = Seq(BIN_CF, FULL_CF).map(cf => (cf.toString, ImmutableSet.of(cf))).toMap tableOps.setLocalityGroups(table, localityGroups) // drop first split, otherwise we get an empty tablet val splits = if (sft.isTableSharing) { val ts = sft.getTableSharingPrefix.getBytes(StandardCharsets.UTF_8) SPLIT_ARRAYS.drop(1).map(s => new Text(ts ++ s)).toSet } else { SPLIT_ARRAYS.drop(1).map(new Text(_)).toSet } val splitsToAdd = splits -- tableOps.listSplits(table).toSet if (splitsToAdd.nonEmpty) { // noinspection RedundantCollectionConversion tableOps.addSplits(table, ImmutableSortedSet.copyOf(splitsToAdd.toIterable)) } } }
mdzimmerman/geomesa
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/data/tables/Z2Table.scala
Scala
apache-2.0
9,372
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.rpc import java.io.IOException import java.net.{BindException, InetAddress} import java.util.{List => JList, Map => JMap, Objects, Random, UUID} import java.util.concurrent.atomic.AtomicBoolean import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer import scala.concurrent.Future import scala.concurrent.duration.Duration import scala.util.{Failure, Success, Try} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.mapred.JobConf import org.apache.hadoop.mapreduce.Job import org.apache.spark.{SecurityManager, SerializableWritable, SparkConf} import org.apache.spark.rpc.netty.NettyRpcEnvFactory import org.apache.spark.search._ import org.apache.spark.util.ThreadUtils import org.apache.carbondata.common.annotations.InterfaceAudience import org.apache.carbondata.common.logging.LogServiceFactory import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.core.datastore.block.Distributable import org.apache.carbondata.core.datastore.row.CarbonRow import org.apache.carbondata.core.metadata.schema.table.CarbonTable import org.apache.carbondata.core.scan.expression.Expression import org.apache.carbondata.core.util.CarbonProperties import org.apache.carbondata.hadoop.CarbonMultiBlockSplit import org.apache.carbondata.hadoop.api.CarbonInputFormat import org.apache.carbondata.hadoop.util.CarbonInputFormatUtil import org.apache.carbondata.processing.util.CarbonLoaderUtil import org.apache.carbondata.store.worker.Status /** * Master of CarbonSearch. * It provides a Registry service for worker to register. * And it provides search API to fire RPC call to workers. */ @InterfaceAudience.Internal class Master(sparkConf: SparkConf) { private val LOG = LogServiceFactory.getLogService(this.getClass.getCanonicalName) // worker host address map to EndpointRef private val random = new Random private var rpcEnv: RpcEnv = _ private val scheduler: Scheduler = new Scheduler /** start service and listen on port passed in constructor */ def startService(): Unit = { if (rpcEnv == null) { LOG.info("Start search mode master thread") val isStarted: AtomicBoolean = new AtomicBoolean(false) new Thread(new Runnable { override def run(): Unit = { val hostAddress = InetAddress.getLocalHost.getHostAddress var port = CarbonProperties.getSearchMasterPort var exception: BindException = null var numTry = 100 // we will try to create service at worse case 100 times do { try { LOG.info(s"starting registry-service on $hostAddress:$port") val config = RpcEnvConfig( sparkConf, "registry-service", hostAddress, "", port, new SecurityManager(sparkConf), clientMode = false) rpcEnv = new NettyRpcEnvFactory().create(config) numTry = 0 } catch { case e: BindException => // port is occupied, increase the port number and try again exception = e LOG.error(s"start registry-service failed: ${e.getMessage}") port = port + 1 numTry = numTry - 1 } } while (numTry > 0) if (rpcEnv == null) { // we have tried many times, but still failed to find an available port throw exception } val registryEndpoint: RpcEndpoint = new Registry(rpcEnv, Master.this) rpcEnv.setupEndpoint("registry-service", registryEndpoint) if (isStarted.compareAndSet(false, false)) { synchronized { isStarted.compareAndSet(false, true) } } LOG.info("registry-service started") rpcEnv.awaitTermination() } }).start() var count = 0 val countThreshold = 5000 while (isStarted.compareAndSet(false, false) && count < countThreshold) { LOG.info(s"Waiting search mode master to start, retrying $count times") Thread.sleep(10) count = count + 1; } if (count >= countThreshold) { LOG.error(s"Search mode try $countThreshold times to start master but failed") throw new RuntimeException( s"Search mode try $countThreshold times to start master but failed") } else { LOG.info("Search mode master started") } } else { LOG.info("Search mode master has already started") } } def stopService(): Unit = { if (rpcEnv != null) { rpcEnv.shutdown() rpcEnv = null } } def stopAllWorkers(): Unit = { val futures = scheduler.getAllWorkers.toSeq.map { case (address, schedulable) => (address, schedulable.ref.ask[ShutdownResponse](ShutdownRequest("user"))) } futures.foreach { case (address, future) => ThreadUtils.awaitResult(future, Duration.apply("10s")) future.value match { case Some(result) => result match { case Success(response) => scheduler.removeWorker(address) case Failure(throwable) => throw new IOException(throwable.getMessage) } case None => throw new ExecutionTimeoutException } } } /** A new searcher is trying to register, add it to the map and connect to this searcher */ def addWorker(request: RegisterWorkerRequest): RegisterWorkerResponse = { LOG.info(s"Receive Register request from worker ${request.hostAddress}:${request.port} " + s"with ${request.cores} cores") val workerId = UUID.randomUUID().toString val workerAddress = request.hostAddress val workerPort = request.port LOG.info(s"connecting to worker ${request.hostAddress}:${request.port}, workerId $workerId") val endPointRef = rpcEnv.setupEndpointRef(RpcAddress(workerAddress, workerPort), "search-service") scheduler.addWorker(workerAddress, new Schedulable(workerId, workerAddress, workerPort, request.cores, endPointRef)) LOG.info(s"worker ${request.hostAddress}:${request.port} registered") RegisterWorkerResponse(workerId) } /** * Execute search by firing RPC call to worker, return the result rows * @param table table to search * @param columns projection column names * @param filter filter expression * @param globalLimit max number of rows required in Master * @param localLimit max number of rows required in Worker * @return */ def search(table: CarbonTable, columns: Array[String], filter: Expression, globalLimit: Long, localLimit: Long): Array[CarbonRow] = { Objects.requireNonNull(table) Objects.requireNonNull(columns) if (globalLimit < 0 || localLimit < 0) { throw new IllegalArgumentException("limit should be positive") } val queryId = random.nextInt var rowCount = 0 val output = new ArrayBuffer[CarbonRow] def onSuccess(result: SearchResult): Unit = { // in case of RPC success, collect all rows in response message if (result.queryId != queryId) { throw new IOException( s"queryId in response does not match request: ${result.queryId} != $queryId") } if (result.status != Status.SUCCESS.ordinal()) { throw new IOException(s"failure in worker: ${ result.message }") } val itor = result.rows.iterator while (itor.hasNext && rowCount < globalLimit) { output += new CarbonRow(itor.next()) rowCount = rowCount + 1 } LOG.info(s"[SearchId:$queryId] accumulated result size $rowCount") } def onFaiure(e: Throwable) = throw new IOException(s"exception in worker: ${ e.getMessage }") def onTimedout() = throw new ExecutionTimeoutException() // prune data and get a mapping of worker hostname to list of blocks, // then add these blocks to the SearchRequest and fire the RPC call val nodeBlockMapping: JMap[String, JList[Distributable]] = pruneBlock(table, columns, filter) val tuple = nodeBlockMapping.asScala.map { case (splitAddress, blocks) => // Build a SearchRequest val split = new SerializableWritable[CarbonMultiBlockSplit]( new CarbonMultiBlockSplit(blocks, splitAddress)) val request = SearchRequest(queryId, split, table.getTableInfo, columns, filter, localLimit) // Find an Endpoind and send the request to it // This RPC is non-blocking so that we do not need to wait before send to next worker scheduler.sendRequestAsync[SearchResult](splitAddress, request) } // loop to get the result of each Worker tuple.foreach { case (worker: Schedulable, future: Future[SearchResult]) => // if we have enough data already, we do not need to collect more result if (rowCount < globalLimit) { // wait for worker val timeout = CarbonProperties .getInstance() .getProperty(CarbonCommonConstants.CARBON_SEARCH_QUERY_TIMEOUT, CarbonCommonConstants.CARBON_SEARCH_QUERY_TIMEOUT_DEFAULT) ThreadUtils.awaitResult(future, Duration.apply(timeout)) LOG.info(s"[SearchId:$queryId] receive search response from worker " + s"${worker.address}:${worker.port}") try { future.value match { case Some(response: Try[SearchResult]) => response match { case Success(result) => onSuccess(result) case Failure(e) => onFaiure(e) } case None => onTimedout() } } finally { worker.workload.decrementAndGet() } } } output.toArray } /** * Prune data by using CarbonInputFormat.getSplit * Return a mapping of host address to list of block */ private def pruneBlock( table: CarbonTable, columns: Array[String], filter: Expression): JMap[String, JList[Distributable]] = { val jobConf = new JobConf(new Configuration) val job = new Job(jobConf) val format = CarbonInputFormatUtil.createCarbonTableInputFormat( job, table, columns, filter, null, null) // We will do FG pruning in reader side, so don't do it here CarbonInputFormat.setFgDataMapPruning(job.getConfiguration, false) val splits = format.getSplits(job) val distributables = splits.asScala.map { split => split.asInstanceOf[Distributable] } CarbonLoaderUtil.nodeBlockMapping( distributables.asJava, -1, getWorkers.asJava, CarbonLoaderUtil.BlockAssignmentStrategy.BLOCK_NUM_FIRST, null) } /** return hostname of all workers */ def getWorkers: Seq[String] = scheduler.getAllWorkers.map(_._1).toSeq } // Exception if execution timed out in search mode class ExecutionTimeoutException extends RuntimeException
jatin9896/incubator-carbondata
store/search/src/main/scala/org/apache/spark/rpc/Master.scala
Scala
apache-2.0
11,610
package genericFunctionRDD /** * Created by merlin on 2/9/16. */ import scala.reflect.ClassTag abstract class SpatialRDDPartition [K, V] extends Serializable { protected implicit def kTag: ClassTag[K] protected implicit def vTag: ClassTag[V] def size: Long def isDefined(k: K): Boolean def iterator: Iterator[(K, V)] /** *range search and find points inside the function related value, and each element meet the condition, and return a iterator, * and this iterator can be used for other RDD */ def rangefilter(begin:Double,end:Double,f: (K, V) => Double):Iterator[(K,V)] /** *top k search for the predifined function */ def topMin(k:Int, f: (K, V) => Double):Iterator[(K,V)] /** *range search and find points inside the box, and each element meet the condition, and return a iterator, * and this iterator can be used for other RDD */ def topMax(k:Int,f: (K, V) => Double):Iterator[(K,V)] def getSmallest(UDF:String, DERUDF:Array[String]):Float }
merlintang/genericFunctionOverSpark
src/main/scala/genericFunctionRDD/SpatialRDDPartition.scala
Scala
apache-2.0
1,012
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import java.util.{Objects, UUID} import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute import org.apache.spark.sql.catalyst.expressions.codegen._ import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark import org.apache.spark.sql.catalyst.trees.TreePattern import org.apache.spark.sql.catalyst.trees.TreePattern._ import org.apache.spark.sql.catalyst.util.{quoteIfNeeded, METADATA_COL_ATTR_KEY} import org.apache.spark.sql.errors.QueryExecutionErrors import org.apache.spark.sql.types._ import org.apache.spark.util.collection.BitSet import org.apache.spark.util.collection.ImmutableBitSet object NamedExpression { private val curId = new java.util.concurrent.atomic.AtomicLong() private[expressions] val jvmId = UUID.randomUUID() def newExprId: ExprId = ExprId(curId.getAndIncrement(), jvmId) def unapply(expr: NamedExpression): Option[(String, DataType)] = Some((expr.name, expr.dataType)) } /** * A globally unique id for a given named expression. * Used to identify which attribute output by a relation is being * referenced in a subsequent computation. * * The `id` field is unique within a given JVM, while the `uuid` is used to uniquely identify JVMs. */ case class ExprId(id: Long, jvmId: UUID) { override def equals(other: Any): Boolean = other match { case ExprId(id, jvmId) => this.id == id && this.jvmId == jvmId case _ => false } override def hashCode(): Int = id.hashCode() } object ExprId { def apply(id: Long): ExprId = ExprId(id, NamedExpression.jvmId) } /** * An [[Expression]] that is named. */ trait NamedExpression extends Expression { /** We should never fold named expressions in order to not remove the alias. */ override def foldable: Boolean = false def name: String def exprId: ExprId /** * Returns a dot separated fully qualified name for this attribute. Given that there can be * multiple qualifiers, it is possible that there are other possible way to refer to this * attribute. */ def qualifiedName: String = (qualifier :+ name).mkString(".") /** * Optional qualifier for the expression. * Qualifier can also contain the fully qualified information, for e.g, Sequence of string * containing the database and the table name * * For now, since we do not allow using original table name to qualify a column name once the * table is aliased, this can only be: * * 1. Empty Seq: when an attribute doesn't have a qualifier, * e.g. top level attributes aliased in the SELECT clause, or column from a LocalRelation. * 2. Seq with a Single element: either the table name or the alias name of the table. * 3. Seq with 2 elements: database name and table name * 4. Seq with 3 elements: catalog name, database name and table name */ def qualifier: Seq[String] def toAttribute: Attribute /** Returns the metadata when an expression is a reference to another expression with metadata. */ def metadata: Metadata = Metadata.empty /** Returns a copy of this expression with a new `exprId`. */ def newInstance(): NamedExpression } abstract class Attribute extends LeafExpression with NamedExpression with NullIntolerant { @transient override lazy val references: AttributeSet = AttributeSet(this) def withNullability(newNullability: Boolean): Attribute def withQualifier(newQualifier: Seq[String]): Attribute def withName(newName: String): Attribute def withMetadata(newMetadata: Metadata): Attribute def withExprId(newExprId: ExprId): Attribute def withDataType(newType: DataType): Attribute override def toAttribute: Attribute = this def newInstance(): Attribute } /** * Used to assign a new name to a computation. * For example the SQL expression "1 + 1 AS a" could be represented as follows: * Alias(Add(Literal(1), Literal(1)), "a")() * * Note that exprId and qualifiers are in a separate parameter list because * we only pattern match on child and name. * * Note that when creating a new Alias, all the [[AttributeReference]] that refer to * the original alias should be updated to the new one. * * @param child The computation being performed * @param name The name to be associated with the result of computing [[child]]. * @param exprId A globally unique id used to check if an [[AttributeReference]] refers to this * alias. Auto-assigned if left blank. * @param qualifier An optional Seq of string that can be used to refer to this attribute in a * fully qualified way. Consider the examples tableName.name, subQueryAlias.name. * tableName and subQueryAlias are possible qualifiers. * @param explicitMetadata Explicit metadata associated with this alias that overwrites child's. * @param nonInheritableMetadataKeys Keys of metadata entries that are supposed to be removed when * inheriting the metadata from the child. */ case class Alias(child: Expression, name: String)( val exprId: ExprId = NamedExpression.newExprId, val qualifier: Seq[String] = Seq.empty, val explicitMetadata: Option[Metadata] = None, val nonInheritableMetadataKeys: Seq[String] = Seq.empty) extends UnaryExpression with NamedExpression { final override val nodePatterns: Seq[TreePattern] = Seq(ALIAS) // Alias(Generator, xx) need to be transformed into Generate(generator, ...) override lazy val resolved = childrenResolved && checkInputDataTypes().isSuccess && !child.isInstanceOf[Generator] override def eval(input: InternalRow): Any = child.eval(input) /** Just a simple passthrough for code generation. */ override def genCode(ctx: CodegenContext): ExprCode = child.genCode(ctx) override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { throw QueryExecutionErrors.doGenCodeOfAliasShouldNotBeCalledError } override def dataType: DataType = child.dataType override def nullable: Boolean = child.nullable override def metadata: Metadata = { explicitMetadata.getOrElse { child match { case named: NamedExpression => val builder = new MetadataBuilder().withMetadata(named.metadata) nonInheritableMetadataKeys.foreach(builder.remove) builder.build() case _ => Metadata.empty } } } def withName(newName: String): NamedExpression = { Alias(child, newName)( exprId = exprId, qualifier = qualifier, explicitMetadata = explicitMetadata, nonInheritableMetadataKeys = nonInheritableMetadataKeys) } def newInstance(): NamedExpression = Alias(child, name)( qualifier = qualifier, explicitMetadata = explicitMetadata, nonInheritableMetadataKeys = nonInheritableMetadataKeys) override def toAttribute: Attribute = { if (resolved) { AttributeReference(name, child.dataType, child.nullable, metadata)(exprId, qualifier) } else { UnresolvedAttribute(name) } } /** Used to signal the column used to calculate an eventTime watermark (e.g. a#1-T{delayMs}) */ private def delaySuffix = if (metadata.contains(EventTimeWatermark.delayKey)) { s"-T${metadata.getLong(EventTimeWatermark.delayKey)}ms" } else { "" } override def toString: String = s"$child AS $name#${exprId.id}$typeSuffix$delaySuffix" override protected final def otherCopyArgs: Seq[AnyRef] = { exprId :: qualifier :: explicitMetadata :: nonInheritableMetadataKeys :: Nil } override def hashCode(): Int = { val state = Seq(name, exprId, child, qualifier, explicitMetadata) state.map(Objects.hashCode).foldLeft(0)((a, b) => 31 * a + b) } override def equals(other: Any): Boolean = other match { case a: Alias => name == a.name && exprId == a.exprId && child == a.child && qualifier == a.qualifier && explicitMetadata == a.explicitMetadata && nonInheritableMetadataKeys == a.nonInheritableMetadataKeys case _ => false } override def sql: String = { val qualifierPrefix = if (qualifier.nonEmpty) qualifier.map(quoteIfNeeded).mkString(".") + "." else "" s"${child.sql} AS $qualifierPrefix${quoteIfNeeded(name)}" } override protected def withNewChildInternal(newChild: Expression): Alias = copy(child = newChild)(exprId, qualifier, explicitMetadata, nonInheritableMetadataKeys) } // Singleton tree pattern BitSet for all AttributeReference instances. object AttributeReferenceTreeBits { val bits: BitSet = new ImmutableBitSet(TreePattern.maxId, ATTRIBUTE_REFERENCE.id) } /** * A reference to an attribute produced by another operator in the tree. * * @param name The name of this attribute, should only be used during analysis or for debugging. * @param dataType The [[DataType]] of this attribute. * @param nullable True if null is a valid value for this attribute. * @param metadata The metadata of this attribute. * @param exprId A globally unique id used to check if different AttributeReferences refer to the * same attribute. * @param qualifier An optional string that can be used to referred to this attribute in a fully * qualified way. Consider the examples tableName.name, subQueryAlias.name. * tableName and subQueryAlias are possible qualifiers. */ case class AttributeReference( name: String, dataType: DataType, nullable: Boolean = true, override val metadata: Metadata = Metadata.empty)( val exprId: ExprId = NamedExpression.newExprId, val qualifier: Seq[String] = Seq.empty[String]) extends Attribute with Unevaluable { override lazy val treePatternBits: BitSet = AttributeReferenceTreeBits.bits /** * Returns true iff the expression id is the same for both attributes. */ def sameRef(other: AttributeReference): Boolean = this.exprId == other.exprId override def equals(other: Any): Boolean = other match { case ar: AttributeReference => name == ar.name && dataType == ar.dataType && nullable == ar.nullable && metadata == ar.metadata && exprId == ar.exprId && qualifier == ar.qualifier case _ => false } override def semanticHash(): Int = { this.exprId.hashCode() } override def hashCode: Int = { // See http://stackoverflow.com/questions/113511/hash-code-implementation var h = 17 h = h * 37 + name.hashCode() h = h * 37 + dataType.hashCode() h = h * 37 + nullable.hashCode() h = h * 37 + metadata.hashCode() h = h * 37 + exprId.hashCode() h = h * 37 + qualifier.hashCode() h } override lazy val preCanonicalized: Expression = { AttributeReference("none", dataType)(exprId) } override def newInstance(): AttributeReference = AttributeReference(name, dataType, nullable, metadata)(qualifier = qualifier) /** * Returns a copy of this [[AttributeReference]] with changed nullability. */ override def withNullability(newNullability: Boolean): AttributeReference = { if (nullable == newNullability) { this } else { AttributeReference(name, dataType, newNullability, metadata)(exprId, qualifier) } } override def withName(newName: String): AttributeReference = { if (name == newName) { this } else { AttributeReference(newName, dataType, nullable, metadata)(exprId, qualifier) } } /** * Returns a copy of this [[AttributeReference]] with new qualifier. */ override def withQualifier(newQualifier: Seq[String]): AttributeReference = { if (newQualifier == qualifier) { this } else { AttributeReference(name, dataType, nullable, metadata)(exprId, newQualifier) } } override def withExprId(newExprId: ExprId): AttributeReference = { if (exprId == newExprId) { this } else { AttributeReference(name, dataType, nullable, metadata)(newExprId, qualifier) } } override def withMetadata(newMetadata: Metadata): AttributeReference = { AttributeReference(name, dataType, nullable, newMetadata)(exprId, qualifier) } override def withDataType(newType: DataType): AttributeReference = { AttributeReference(name, newType, nullable, metadata)(exprId, qualifier) } override protected final def otherCopyArgs: Seq[AnyRef] = { exprId :: qualifier :: Nil } /** Used to signal the column used to calculate an eventTime watermark (e.g. a#1-T{delayMs}) */ private def delaySuffix = if (metadata.contains(EventTimeWatermark.delayKey)) { s"-T${metadata.getLong(EventTimeWatermark.delayKey)}ms" } else { "" } override def toString: String = s"$name#${exprId.id}$typeSuffix$delaySuffix" // Since the expression id is not in the first constructor it is missing from the default // tree string. override def simpleString(maxFields: Int): String = { s"$name#${exprId.id}: ${dataType.simpleString(maxFields)}" } override def sql: String = { val qualifierPrefix = if (qualifier.nonEmpty) qualifier.map(quoteIfNeeded).mkString(".") + "." else "" s"$qualifierPrefix${quoteIfNeeded(name)}" } } /** * A place holder used when printing expressions without debugging information such as the * expression id or the unresolved indicator. */ case class PrettyAttribute( name: String, dataType: DataType = NullType) extends Attribute with Unevaluable { def this(attribute: Attribute) = this(attribute.name, attribute match { case a: AttributeReference => a.dataType case a: PrettyAttribute => a.dataType case _ => NullType }) override def toString: String = name override def sql: String = toString override def withNullability(newNullability: Boolean): Attribute = throw new UnsupportedOperationException override def newInstance(): Attribute = throw new UnsupportedOperationException override def withQualifier(newQualifier: Seq[String]): Attribute = throw new UnsupportedOperationException override def withName(newName: String): Attribute = throw new UnsupportedOperationException override def withMetadata(newMetadata: Metadata): Attribute = throw new UnsupportedOperationException override def qualifier: Seq[String] = throw new UnsupportedOperationException override def exprId: ExprId = throw new UnsupportedOperationException override def withExprId(newExprId: ExprId): Attribute = throw new UnsupportedOperationException override def withDataType(newType: DataType): Attribute = throw new UnsupportedOperationException override def nullable: Boolean = true } /** * A place holder used to hold a reference that has been resolved to a field outside of the current * plan. This is used for correlated subqueries. */ case class OuterReference(e: NamedExpression) extends LeafExpression with NamedExpression with Unevaluable { override def dataType: DataType = e.dataType override def nullable: Boolean = e.nullable override def prettyName: String = "outer" override def sql: String = s"$prettyName(${e.sql})" override def name: String = e.name override def qualifier: Seq[String] = e.qualifier override def exprId: ExprId = e.exprId override def toAttribute: Attribute = e.toAttribute override def newInstance(): NamedExpression = OuterReference(e.newInstance()) final override val nodePatterns: Seq[TreePattern] = Seq(OUTER_REFERENCE) } object VirtualColumn { // The attribute name used by Hive, which has different result than Spark, deprecated. val hiveGroupingIdName: String = "grouping__id" val groupingIdName: String = "spark_grouping_id" val groupingIdAttribute: UnresolvedAttribute = UnresolvedAttribute(groupingIdName) } /** * The internal representation of the MetadataAttribute, * it sets `__metadata_col` to `true` in AttributeReference metadata * - apply() will create a metadata attribute reference * - unapply() will check if an attribute reference is the metadata attribute reference */ object MetadataAttribute { def apply(name: String, dataType: DataType, nullable: Boolean = true): AttributeReference = AttributeReference(name, dataType, nullable, new MetadataBuilder().putBoolean(METADATA_COL_ATTR_KEY, value = true).build())() def unapply(attr: AttributeReference): Option[AttributeReference] = { if (attr.metadata.contains(METADATA_COL_ATTR_KEY) && attr.metadata.getBoolean(METADATA_COL_ATTR_KEY)) { Some(attr) } else None } } /** * The internal representation of the FileSourceMetadataAttribute, it sets `__metadata_col` * and `__file_source_metadata_col` to `true` in AttributeReference's metadata * - apply() will create a file source metadata attribute reference * - unapply() will check if an attribute reference is the file source metadata attribute reference */ object FileSourceMetadataAttribute { val FILE_SOURCE_METADATA_COL_ATTR_KEY = "__file_source_metadata_col" def apply(name: String, dataType: DataType, nullable: Boolean = true): AttributeReference = AttributeReference(name, dataType, nullable, new MetadataBuilder() .putBoolean(METADATA_COL_ATTR_KEY, value = true) .putBoolean(FILE_SOURCE_METADATA_COL_ATTR_KEY, value = true).build())() def unapply(attr: AttributeReference): Option[AttributeReference] = attr match { case MetadataAttribute(attr) if attr.metadata.contains(FILE_SOURCE_METADATA_COL_ATTR_KEY) && attr.metadata.getBoolean(FILE_SOURCE_METADATA_COL_ATTR_KEY) => Some(attr) case _ => None } /** * Cleanup the internal metadata information of an attribute if it is * a [[FileSourceMetadataAttribute]], it will remove both [[METADATA_COL_ATTR_KEY]] and * [[FILE_SOURCE_METADATA_COL_ATTR_KEY]] from the attribute [[Metadata]] */ def cleanupFileSourceMetadataInformation(attr: Attribute): Attribute = attr match { case FileSourceMetadataAttribute(attr) => attr.withMetadata( new MetadataBuilder().withMetadata(attr.metadata) .remove(METADATA_COL_ATTR_KEY) .remove(FILE_SOURCE_METADATA_COL_ATTR_KEY) .build() ) case attr => attr } }
ueshin/apache-spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/namedExpressions.scala
Scala
apache-2.0
18,915
package com.monsanto.arch.cloudformation.model import spray.json.{JsValue, JsonWriter} /** * Wrapper for anything that can be written to Json. Useful to allow specifying * a Map of String -> JsonWritable that can take any value that can be written out as Json. */ case class JsonWritable[T: JsonWriter](thing: T) { implicit val fmt = implicitly[JsonWriter[T]] } object JsonWritable { import scala.language.implicitConversions implicit def fmt[T] = new JsonWriter[JsonWritable[T]] { override def write(obj: JsonWritable[T]): JsValue = obj.fmt.write(obj.thing) } implicit def wrap[T: JsonWriter](thing: T) = JsonWritable(thing) }
MonsantoCo/cloudformation-template-generator
src/main/scala/com/monsanto/arch/cloudformation/model/JsonUtils.scala
Scala
bsd-3-clause
655
package com.imaginea.activegrid.core.models import org.scalamock.scalatest.MockFactory import org.scalatest.{Suite, PrivateMethodTester, FlatSpec} /** * Created by ranjithrajd on 18/11/16. */ class SSHBasedStrategyTest extends FlatSpec with PrivateMethodTester with MockFactory with Suite { "resolveOS " should "return osinfo tag" in { val strategy = SSHBasedStrategy(Builder.topology,List.empty,false) val resolveOS = PrivateMethod[List[KeyValueInfo]]('resolveOS) val result = strategy.invokePrivate(resolveOS(Some("ubuntu"),Builder.instance1)) assert(result.size == 1) assert(result.contains(KeyValueInfo(None,"osinfo","ubuntu"))) } "toProcessMap " should "return Map of process id and name" in { val processList = "1513/mongod;1268/mysqld;5872/java;2880/redis-server;5066/java;" val strategy = SSHBasedStrategy(Builder.topology,List.empty,false) val toProcessMap = PrivateMethod[Map[String,String]]('toProcessMap) val result = strategy.invokePrivate(toProcessMap(processList)) assert(result.size == 3) assert(result.contains("5872")) //mysqld assert(!result.contains("2880")) //redis-server } "connectionResolver " should " takes the process list and instance and return the InstanceConnection" in { val processList = "74.125.68.188:;172.217.26.197:;13.107.6.159:;" val strategy = SSHBasedStrategy(Builder.topology,List.empty,false) val connectionResolver = PrivateMethod[Set[InstanceConnection]]('connectionResolver) val result = strategy.invokePrivate(connectionResolver(Some(processList),Builder.instance2)) assert(result.size == 1) val expected = InstanceConnection(None,"i-69c98e7e","i-f6d92ce0",List()) assert(result.contains(expected)) } "setRole " should " return role tag KeyValueInfo" in { val strategy = SSHBasedStrategy(Builder.topology,List.empty,false) val setRole = PrivateMethod[Option[KeyValueInfo]]('roleTag) val result = strategy.invokePrivate(setRole(Builder.instance2,Builder.software2)) val expected = KeyValueInfo(None,"roles","WEB") assert(result.isDefined) assert(result.contains(expected)) } /*Integration test with SSH //applicationDetails findProcessName resolveApplication grepInstanceDetails resolveProcesses */ } object Builder{ val instance1 = new Instance( id = None, instanceId = Some("i-f6d92ce0"), name = "74.125.68.188:", state = Some("Running"), instanceType = Some("t2.small"), platform = None, architecture = None, publicDnsName = None, launchTime = None, memoryInfo = None, rootDiskInfo = None, tags = List.empty, sshAccessInfo = None, liveConnections = List.empty, estimatedConnections = List.empty, processes = Set.empty, image = None, existingUsers = List.empty, account = None, availabilityZone = None, privateDnsName = None, privateIpAddress = Some("74.125.68.188:"), publicIpAddress = Some("74.125.68.188:"), elasticIP = None, monitoring = None, rootDeviceType = None, blockDeviceMappings = List.empty, securityGroups = List.empty, reservedInstance = true, region = None) val instance2 = new Instance( id = None, instanceId = Some("i-69c98e7e"), name = "172.217.26.197:", state = Some("Running"), instanceType = Some("t2.small"), platform = None, architecture = None, publicDnsName = None, launchTime = None, memoryInfo = None, rootDiskInfo = None, tags = List.empty, sshAccessInfo = None, liveConnections = List.empty, estimatedConnections = List.empty, processes = Set.empty, image = None, existingUsers = List.empty, account = None, availabilityZone = None, privateDnsName = None, privateIpAddress = Some("172.217.26.197:"), publicIpAddress = Some("172.217.26.197:"), elasticIP = None, monitoring = None, rootDeviceType = None, blockDeviceMappings = List.empty, securityGroups = List.empty, reservedInstance = true, region = None) val software1 = Software(None,Some("1.0"),"Tomcat","Tomcat",None,"8080",List("Tomcat"),true) val software2 = Software(None,Some("5.0"),"PServer","Pramati",None,"9000",List("PServer"),true) val softwares = List(software1,software2) val site1 = new Site1( id = None, siteName = "AWS", instances = List(instance1), reservedInstanceDetails = List.empty, filters = List.empty, loadBalancers = List.empty, scalingGroups = List.empty, applications = List.empty, groupsList = List.empty, groupBy = "groupBy", scalingPolicies = List.empty ) val topology = new Topology(site1) } //class SSHSessionWrapper extends SSHSession("127.0.0.0","aws","/tmp",None,None)
eklavya/activeGrid
src/test/scala/com/imaginea/activegrid/core/models/SSHBasedStrategyTest.scala
Scala
apache-2.0
4,788
package org.http4s package headers import org.http4s.parser.HttpHeaderParser import org.http4s.util.Writer import java.lang.{Long => JLong} object `X-B3-ParentSpanId` extends HeaderKey.Internal[`X-B3-ParentSpanId`] with HeaderKey.Singleton { override def parse(s: String): ParseResult[`X-B3-ParentSpanId`] = HttpHeaderParser.X_B3_PARENTSPANID(s) } final case class `X-B3-ParentSpanId`(id: Long) extends Header.Parsed { override def key: `X-B3-ParentSpanId`.type = `X-B3-ParentSpanId` override def renderValue(writer: Writer): writer.type = xB3RenderValueImpl(writer, id) }
ZizhengTai/http4s
core/src/main/scala/org/http4s/headers/X-B3-ParentSpanId.scala
Scala
apache-2.0
592
/** * Digi-Lib-Mesh - distributed mesh library for Digi components * * Copyright (c) 2012-2013 Alexey Aksenov [email protected] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.digimead.digi.lib.mesh.endpoint import java.util.UUID import org.digimead.digi.lib.DependencyInjection import org.digimead.digi.lib.log.api.Loggable import org.digimead.lib.test.LoggingHelper import org.scalatest.FunSpec import org.scalatest.matchers.ShouldMatchers class EndpointSpec extends FunSpec with ShouldMatchers with LoggingHelper with Loggable { after { adjustLoggingAfter } before { DependencyInjection(org.digimead.digi.lib.mesh.defaultFakeHexapod ~ org.digimead.digi.lib.mesh.default ~ org.digimead.digi.lib.default, false) adjustLoggingBefore } describe("An Endpoint") { it("should should proper comparison of nature based on address") { val ep1 = new LocalEndpoint.Nature(UUID.fromString("00000000-0000-0000-0000-000000000000")) val ep2 = new LocalEndpoint.Nature(UUID.randomUUID()) val ep3 = new LocalEndpoint.Nature(UUID.fromString("00000000-0000-0000-0000-000000000000")) ep1 should not be (ep2) ep1 should be(ep3) } } override def beforeAll(configMap: Map[String, Any]) { adjustLoggingBeforeAll(configMap) } }
ezh/digi-lib-mesh
src/test/scala/org/digimead/digi/lib/mesh/endpoint/EndpointSpec.scala
Scala
apache-2.0
1,801
package queries import java.time.{LocalDate, LocalDateTime} import datamodel.Priority import datamodel.Priority.Priority import datamodel.columnDataMappers._ import datamodel.dataModel._ import slick.driver.H2Driver.api._ object queries { val selectAllTasksQuery: Query[TaskTable, Task, Seq] = Tasks // val findAllTaskTitleQuery = Tasks.map(taskTable => taskTable.title) val selectAllTaskTitleQuery: Query[Rep[String], String, Seq] = Tasks.map(_.title) val selectMultipleColumnsQuery: Query[(Rep[String], Rep[Priority], Rep[LocalDateTime]), (String, Priority, LocalDateTime), Seq] = Tasks.map(t => (t.title, t.priority, t.createdAt)) val selectHighPriorityTasksQuery: Query[Rep[String], String, Seq] = Tasks.filter(_.priority === Priority.HIGH).map(_.title) def findAllTasksPageQuery(skip: Int, limit: Int) = Tasks.drop(skip).take(limit) val selectTasksSortedByDueDateDescQuery = Tasks.sortBy(_.dueBy.desc) val findAllDueTasks = Tasks.filter(_.dueBy >= LocalDate.now().atStartOfDay()) val selectAllTaskTitlesDueToday = Tasks .filter(_.dueBy > LocalDate.now().atStartOfDay()) .filter(_.dueBy < LocalDate.now().atStartOfDay().plusDays(1)) .map(_.title) val selectTasksBetweenTodayAndSameDateNextMonthQuery = Tasks.filter(t => t.dueBy.between(LocalDateTime.now(), LocalDateTime.now().plusMonths(1))) val selectAllTasksDueToday = Tasks .filter(_.dueBy > LocalDate.now().atStartOfDay()) .filter(_.dueBy < LocalDate.now().atStartOfDay().plusDays(1)) val checkIfAnyHighPriorityTaskExistsToday = selectAllTasksDueToday.filter(_.priority === Priority.HIGH).exists }
shekhargulati/52-technologies-in-2016
05-slick/tasky/src/main/scala/queries/queries.scala
Scala
mit
1,616
package com.stefansavev.randomprojections.implementation class SplitIntoKProjection { } import java.util.Random import com.stefansavev.randomprojections.datarepr.dense.DataFrameView import com.stefansavev.randomprojections.datarepr.sparse.SparseVector import com.stefansavev.randomprojections.utils.RandomUtils import scala.collection.mutable.ArrayBuffer case class SplitIntoKProjectionStrategy(rnd: Random, numCols: Int, k: Int) extends ProjectionStrategy{ def chooseKPoints(k: Int, pointIds: Array[Int], view: DataFrameView): Array[Int] = { RandomUtils.shuffleInts(rnd, pointIds).take(k) } def chooseKDimensions(k: Int): Array[Int] = { val columns = Array.range(0, numCols) RandomUtils.shuffleInts(rnd, columns).take(k).sorted } def generateRandomVector(columnIds: Array[Int]): SparseVector = { val signs = columnIds.map(_ => (if (rnd.nextDouble() >= 0.5) 1.0 else -1.0)) var sum = 0.0 var i = 0 while(i < signs.length){ val v = signs(i) sum += v*v i += 1 } sum = Math.sqrt(sum) i = 0 while(i < signs.length){ signs(i) /= sum i += 1 } val sparseVec = new SparseVector(numCols, columnIds,signs) sparseVec } def generateKRandomVectors(num: Int, columnIds: Array[Int]): Array[SparseVector] = { val buff = new ArrayBuffer[SparseVector]() for(i <- 0 until num){ buff += generateRandomVector(columnIds) } buff.toArray } def nextRandomProjection(depth: Int, view: DataFrameView, projectionVector: AbstractProjectionVector): AbstractProjectionVector = { val useK = HadamardUtils.largestPowerOf2(k) val chosenDim = chooseKDimensions(useK) val randomVector = generateRandomVector(chosenDim) val proj = new HadamardProjectionVector(randomVector) proj } } case class SplitIntoKProjectionSettings(k: Int) class SplitIntoKProjectionBuilder(builderSettings: SplitIntoKProjectionSettings) extends ProjectionStrategyBuilder{ type T = SplitIntoKProjectionStrategy val splitStrategy: DatasetSplitStrategy = new HadamardProjectionSplitStrategy() def build(settings: IndexSettings, rnd: Random, dataFrameView:DataFrameView): T = SplitIntoKProjectionStrategy(rnd, dataFrameView.numCols, builderSettings.k) def datasetSplitStrategy: DatasetSplitStrategy = splitStrategy }
codeaudit/random-projections-at-berlinbuzzwords
src/main/scala/com/stefansavev/randomprojections/implementation/SplitIntoKProjection.scala
Scala
apache-2.0
2,333
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.play.asyncmvc.async import akka.actor.Actor import uk.gov.hmrc.play.asyncmvc.model.{TaskCache, StatusCodes} import play.api.Logger import uk.gov.hmrc.time.DateTimeUtils import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import uk.gov.hmrc.http.HeaderCarrier /** * The AsyncTask is responsible for processing an AsyncMVCAsyncActor message, where is message encapsulates a Future to be processed. * @tparam OUTPUT - The Type the Task will return. */ trait AsyncTask[OUTPUT] extends LogWrapper { /** * Object encapsulates the message handling and the definition of the Akka message. */ object AsyncMVCAsyncActor { case class AsyncMessage(id: String, asyncFunction: HeaderCarrier => Future[OUTPUT], jsonToString: OUTPUT => String, headerCarrier: Option[HeaderCarrier], startTime: Long) { def invokeAsyncFunction(startTime:Long)(implicit hc: HeaderCarrier, sessionCache: Cache[TaskCache]): Future[Unit] = { asyncFunction(hc).flatMap(jsonResult => { // Attempt to save the JSON result from the Future to cache. Indicates the background Future has completed processing. val task = TaskCache(id, StatusCodes.Complete, Some(jsonToString(jsonResult)), startTime, DateTimeUtils.now.getMillis) sessionCache.put(id, task).map { dataMap => Logger.info(wrap(s"The async task Id [${this.id}] is complete and cache updated.")) }.recover { case e: Exception => Logger.error(wrap(s"Failed to update cache for task Id [${this.id}].")) // The client will timeout waiting! } }) } } } /** * Defines an Actor which is responsible for processing the AsyncMVCAsyncActor.AsyncMessage message. * @param sessionCache - The cache used to update the status of the task associated with the message. * @param clientTimeout - The maximum amount of time a client will wait for the message to be processed. */ class AsyncMVCAsyncActor(sessionCache:Cache[TaskCache], clientTimeout:Long) extends Actor { import AsyncMVCAsyncActor.AsyncMessage def receive = { case asyncTask @ AsyncMessage(id, _, _, hc, _) => processMessage(asyncTask)(hc.getOrElse(throw new Exception("No HeaderCarrier found in message!")), sessionCache) case unknown @ _ => Logger.info(wrap(s"Unknown message received! $unknown")) } private def processMessage(asyncTask:AsyncMessage)(implicit headerCarrier: HeaderCarrier, sessionCache: Cache[TaskCache]): Unit = { Logger.info(wrap(s"Picked up a new async task with Id [${asyncTask.id}]")) val timeout = DateTimeUtils.now.getMillis > (asyncTask.startTime + clientTimeout) // Check if the client has timed out already waiting for the task, no point starting if no client! val status = if (timeout) StatusCodes.Timeout else StatusCodes.Running val task = TaskCache(asyncTask.id, status, None, asyncTask.startTime, if (timeout) DateTimeUtils.now.getMillis else 0) invokeAsyncTaskFuture(asyncTask, task) } private def invokeAsyncTaskFuture(asyncMessage:AsyncMessage, task:TaskCache)(implicit hc:HeaderCarrier, sessionCache: Cache[TaskCache]) : Unit = { Logger.info(wrap(s"Invoking Future for Id [${asyncMessage.id}]")) val time = DateTimeUtils.now.getMillis decreaseThrottle(asyncMessage, task) { asyncMessage.invokeAsyncFunction(time) } } private def saveError(asyncTask:TaskCache, e:Exception)(implicit hc:HeaderCarrier) = { Logger.error(wrap(s"Task failed to process and error status recorded for Id [${asyncTask.id}]"), e) // Note: Do not wait for future! sessionCache.put(asyncTask.id, asyncTask.copy(status=StatusCodes.Error, complete=DateTimeUtils.now.getMillis)).recover { case e: Exception => Logger.error(wrap(s"saveError: Failed to save the task error status to cache! Task Id [${asyncTask.id}]. Exception $e")) throw new Exception("Failed to save to cache!") } } private def decreaseThrottle(asyncMessage:AsyncMessage, task:TaskCache)(action: => Future[Unit])(implicit hc:HeaderCarrier, sessionCache: Cache[TaskCache]) { val time = DateTimeUtils.now.getMillis def decreaseThrottle = Throttle.down() try { if (task.status != StatusCodes.Timeout) { action.map(_ => { decreaseThrottle Logger.info(wrap(s"Future completed processing. Time spent processing future for Id [${asyncMessage.id}] is ${DateTimeUtils.now.getMillis - time}")) }).recover { case e: Exception => decreaseThrottle saveError(task, e) case _ => // Leave for akka to handle. } } else { decreaseThrottle Logger.error(wrap(s"Client has timed out waiting to start task! Id [${asyncMessage.id}]")) } } catch { // Note: Function could throw an exception before Future is successfully running! case e: Exception => decreaseThrottle saveError(task, e) } } } } trait LogWrapper { def wrap(message:String) = s"play-async - $message" }
hmrc/play-async
src/main/scala/uk/gov/hmrc/play/asyncmvc/async/AsyncTask.scala
Scala
apache-2.0
5,869
package scodec package codecs class DiscriminatorCodecTest extends CodecSuite { "the discriminator combinators" should { "support building a codec using typecases" in { val codec = discriminated[AnyVal].by(uint8) .typecase(0, int32) .typecase(1, bool) roundtrip(codec, true) roundtrip(codec, false) roundtrip(codec, 1) roundtrip(codec, Int.MaxValue) codec.sizeBound shouldBe SizeBound.bounded(9, 40) } "support building a codec using partial functions and subtyping" in { val codec = discriminated[AnyVal].by(uint8) .\ (0) { case i: Int => i } (int32) .\ (1) { case b: Boolean => b } (bool) roundtrip(codec, true) roundtrip(codec, false) roundtrip(codec, 1) roundtrip(codec, Int.MaxValue) } "support building a codec using A => Option[B] and subtyping" in { val codec = discriminated[AnyVal].by(uint8) ./ (0) { v => v match { case i: Int => Some(i); case _ => None }} (int32) ./ (1) { v => v match { case b: Boolean => Some(b); case _ => None }} (bool) roundtrip(codec, true) roundtrip(codec, false) roundtrip(codec, 1) roundtrip(codec, Int.MaxValue) } "support building a codec for an enumeration" in { sealed trait Direction case object North extends Direction case object South extends Direction case object East extends Direction case object West extends Direction val codec = mappedEnum(uint8, North -> 1, South -> 2, East -> 3, West -> 4) roundtrip(codec, North) roundtrip(codec, South) roundtrip(codec, East) roundtrip(codec, West) } "support building a codec for an enumeration with preserved reserved values" in { trait Color case object Red extends Color case object Green extends Color case object Blue extends Color case class Reserved(value: Int) extends Color val nonReserved: Codec[Color] = mappedEnum(uint8, Red -> 1, Green -> 2, Blue -> 3) val reserved: Codec[Reserved] = uint8.widenOpt(Reserved.apply, Reserved.unapply) val codec: Codec[Color] = choice(nonReserved, reserved.upcast[Color]) roundtrip(codec, Red) roundtrip(codec, Green) roundtrip(codec, Blue) roundtrip(codec, Reserved(255)) roundtrip(codec, Reserved(4)) } "support building a codec for an enumeration with preserved reserved values, and reserved values are not in the type hierarchy" in { trait Color case object Red extends Color case object Green extends Color case object Blue extends Color case class Reserved(value: Int) val nonReserved: Codec[Color] = mappedEnum(uint8, Red -> 1, Green -> 2, Blue -> 3) val reserved: Codec[Reserved] = uint8.widenOpt(Reserved.apply, Reserved.unapply) val codec: Codec[Either[Reserved, Color]] = choice( nonReserved.xmapc(Right.apply)(_.b).upcast[Either[Reserved, Color]], reserved.xmapc(Left.apply)(_.a).upcast[Either[Reserved, Color]] ) roundtrip(codec, Right(Red)) roundtrip(codec, Right(Green)) roundtrip(codec, Right(Blue)) roundtrip(codec, Left(Reserved(255))) roundtrip(codec, Left(Reserved(4))) } "support building a codec for an ADT" in { sealed trait Direction case object Stay extends Direction case class Go(units: Int) extends Direction val stayCodec = provide(Stay) val goCodec = int32.widenOpt[Go](Go.apply, Go.unapply) val codec = discriminated[Direction].by(uint8). typecase(0, stayCodec). typecase(1, goCodec) roundtrip(codec, Stay) roundtrip(codec, Go(42)) } "support building a codec for recusive ADTs - e.g., trees" in { sealed trait Tree case class Node(l: Tree, r: Tree) extends Tree case class Leaf(n: Int) extends Tree def treeCodec: Codec[Tree] = lazily { discriminated[Tree].by(bool) .| (false) { case l @ Leaf(n) => n } (Leaf.apply) (int32) .| (true) { case n @ Node(l, r) => (l, r) } { case (l, r) => Node(l, r) } (treeCodec ~ treeCodec) } roundtrip(treeCodec, Leaf(42)) roundtrip(treeCodec, Node(Leaf(42), Node(Leaf(1), Leaf(2)))) } "error when matching discriminator for encoding is not found" in { val codec = discriminated[AnyVal].by(uint8) .typecase(0, bool) roundtrip(codec, true) roundtrip(codec, false) encodeError(codec, 1, new Err.MatchingDiscriminatorNotFound(1)) encodeError(codec, Int.MaxValue, new Err.MatchingDiscriminatorNotFound(Int.MaxValue)) } "support framing value codecs" in { sealed trait Direction case object Stay extends Direction case class Go(units: Int) extends Direction case class Annotate(message: String) extends Direction val stayCodec = provide(Stay) val goCodec = int32.widenOpt[Go](Go.apply, Go.unapply) val annotateCodec = ascii.widenOpt[Annotate](Annotate.apply, Annotate.unapply) val codec = discriminated[Direction].by(uint8). typecase(0, stayCodec). typecase(1, goCodec). typecase(2, annotateCodec). framing(new CodecTransformation { def apply[X](c: Codec[X]) = variableSizeBytes(uint8, c) }) roundtrip(list(codec), List(Stay, Go(1), Annotate("Hello"), Go(2), Stay)) } } }
alvaroc1/scodec
shared/src/test/scala/scodec/codecs/DiscriminatorCodecTest.scala
Scala
bsd-3-clause
5,501
package example import akka.actor.{ActorSystem, Props} import akka.http.scaladsl.server.directives.Credentials import example.Commands.{CheckBalance, Deposit, OpenAccount, Withdraw} import example.Events.{AccountOpened, BalanceChecked, Deposited, Withdrawn} import fission.Fission import fission.Fission._ import fission.auth.Principal import fission.http.Request import fission.message.{Command, Nack} import fission.model.{Aggregate, Event, State} import fission.router.Router import org.json4s.JsonDSL._ import scaldi.Module import scaldi.akka.AkkaInjectable /** * @author David Caseria */ object Main extends App with AkkaInjectable { implicit val module = new AppModule :: Fission.module implicit val system = inject[ActorSystem] Fission() } class AppModule extends Module { // Create an example bank system bind[ActorSystem] to ActorSystem("BankSystem") destroyWith (_.terminate()) // Resolve credentials to a principal bind[Authenticator] to ((credentials: Credentials) => { credentials match { case [email protected](token) => Some(User(token)) case _ => None } }) // Map requests to commands bind[RequestMapper] to ((request: Request) => { request.method match { case "CheckBalance" => request.mapTo[CheckBalance] case "Deposit" => request.mapTo[Deposit] case "OpenAccount" => request.mapTo[OpenAccount] case "Withdraw" => request.mapTo[Withdraw] } }) // Router to forward commands to aggregates binding toProvider new Router() { override def route = { case CheckBalance(userId) => reference(Account.props(userId)) case Deposit(userId, _) => reference(Account.props(userId)) case OpenAccount(userId) => reference(Account.props(userId)) case Withdraw(userId, _) => reference(Account.props(userId)) } } } class Account(val id: String) extends Aggregate(new AccountState()) { override def persistenceId = s"account-$id" override def applyEvent = { case Deposited(amount) => state.balance + amount case Withdrawn(amount) => state.balance - amount case _ => } override def receiveCommand = { case OpenAccount(userId) => acknowledge(AccountOpened(userId)) case Deposit(_, amount) => acknowledge(Deposited(amount)) case Withdraw(_, amount) => if (state.balance > amount) { acknowledge(Withdrawn(amount)) } else { sender() ! Nack(-32000, "Overdraft", Some(("amount" -> amount) ~ ("balance" -> state.balance))) } case CheckBalance(_) => acknowledge(BalanceChecked(state.balance)) } } object Account { def props(id: String) = Props(new Account(id)) } class AccountState extends State { var balance = BigDecimal("0.00") } object Commands { case class OpenAccount(userId: String) extends Command case class Deposit(userId: String, amount: BigDecimal) extends Command case class Withdraw(userId: String, amount: BigDecimal) extends Command // This is just an example. Use CQRS instead! case class CheckBalance(userId: String) extends Command } object Events { case class AccountOpened(userId: String) extends Event case class Deposited(amount: BigDecimal) extends Event case class Withdrawn(amount: BigDecimal) extends Event case class BalanceChecked(balance: BigDecimal) extends Event } case class User(id: String) extends Principal { override def authorize = { case CheckBalance(userId) => id == userId case Deposit(userId, _) => id == userId case OpenAccount(userId) => id == userId case Withdraw(userId, _) => id == userId } }
davidcaseria/fission
src/test/scala/example/Main.scala
Scala
mit
3,595
package week4.primitives abstract class Nat { def isZero: Boolean def predecessor: Nat def successor: Nat = new Succ(this) def + (that: Nat): Nat def - (that: Nat): Nat }
juliocnsouzadev/scala_datascience
fuctional_programming_principles/fpps-notes/src/week4/primitives/Nat.scala
Scala
mit
193
package utils import org.junit.Assert._ import org.junit.Test import org.apache.commons.lang.RandomStringUtils class ParseUtilsTest { @Test def testAsLong() { val value = ParseUtils.asLong("10").get assertEquals(10L, value) } @Test def testAsLongWithNotANumber() { val value = ParseUtils.asLong(RandomStringUtils.randomAlphabetic(2)).get assertEquals(0L, value) } @Test def testAsInt() = { val value = ParseUtils.asInt("10").get assertEquals(10, value) } @Test def testAsIntWithNotANumber() = { val value = ParseUtils.asInt(RandomStringUtils.randomAlphabetic(2)).get assertEquals(0, value) } @Test def testParseGroupFirst() = { val matchedValue = RandomStringUtils.randomAlphabetic(5) val input = "data1=1, data2=" + matchedValue + ", data3=3," val pattern = "data2=(.*?)," val result = ParseUtils.parseGroupFirst(input, pattern) assertEquals(matchedValue, result) } @Test def testParseGroupFirstWhenMatchedValueContainsNewLine() = { val matchedValue = RandomStringUtils.randomAlphabetic(5) + "\\n\\r" + RandomStringUtils.randomAlphabetic(5) val input = "data1=1, data2=" + matchedValue + ", data3=3," val pattern = "data2=(.*?)," val result = ParseUtils.parseGroupFirst(input, pattern, true) assertEquals(matchedValue, result) } @Test def testParseGroupFirstWhenNoMatch() = { val input = RandomStringUtils.randomAscii(100) val pattern = "(Í)" val result = ParseUtils.parseGroupFirst(input, pattern, true) assertEquals("", result) } }
tiborbotos/domino
domino-crawler/src/test/scala/utils/ParseUtilsTest.scala
Scala
lgpl-3.0
1,579
package org.broadinstitute.clio.transfer.model.ubam import org.broadinstitute.clio.transfer.model.IndexKey import org.broadinstitute.clio.util.model.Location case class UbamKey( location: Location, flowcellBarcode: String, lane: Int, libraryName: String ) extends IndexKey { override def getUrlSegments: Seq[String] = Seq(location.entryName, flowcellBarcode, lane.toString, libraryName) }
broadinstitute/clio
clio-transfer-model/src/main/scala/org/broadinstitute/clio/transfer/model/ubam/UbamKey.scala
Scala
bsd-3-clause
406
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.deploy.k8s.submit.submitsteps import java.io.{File, RandomAccessFile} import com.google.common.base.Charsets import com.google.common.io.{BaseEncoding, Files} import io.fabric8.kubernetes.api.model.{Container, ContainerBuilder, HasMetadata, Pod, PodBuilder, Secret} import org.junit.Test import org.mockito.{Mock, MockitoAnnotations} import org.scalatest.BeforeAndAfter import scala.collection.JavaConverters._ import org.apache.spark.{SparkConf, SparkFunSuite} import org.apache.spark.deploy.k8s.config._ import org.apache.spark.deploy.k8s.constants._ import org.apache.spark.deploy.k8s.submit.MountSmallFilesBootstrap import org.apache.spark.util.Utils private[spark] class MountSmallLocalFilesStepSuite extends SparkFunSuite with BeforeAndAfter { private val FIRST_TEMP_FILE_NAME = "file1.txt" private val SECOND_TEMP_FILE_NAME = "file2.txt" private val FIRST_TEMP_FILE_CONTENTS = "123" private val SECOND_TEMP_FILE_CONTENTS = "456" private val REMOTE_FILE_URI = "hdfs://localhost:9000/file3.txt" private val SECRET_NAME = "secret" private var tempFolder: File = _ private val mountSmallFilesBootstrap = new DummyMountSmallFilesBootstrap before { MockitoAnnotations.initMocks(this) tempFolder = Utils.createTempDir() } after { tempFolder.delete() } test("Local files should be added to the secret.") { val firstTempFile = createTempFileWithContents( tempFolder, FIRST_TEMP_FILE_NAME, FIRST_TEMP_FILE_CONTENTS) val secondTempFile = createTempFileWithContents( tempFolder, SECOND_TEMP_FILE_NAME, SECOND_TEMP_FILE_CONTENTS) val sparkFiles = Seq( s"file://${firstTempFile.getAbsolutePath}", secondTempFile.getAbsolutePath, REMOTE_FILE_URI) val configurationStep = new MountSmallLocalFilesStep( sparkFiles, SECRET_NAME, MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH, mountSmallFilesBootstrap) val baseDriverSpec = new KubernetesDriverSpec( new PodBuilder().build(), new ContainerBuilder().build(), Seq.empty[HasMetadata], new SparkConf(false)) val configuredDriverSpec = configurationStep.configureDriver(baseDriverSpec) assert(configuredDriverSpec.otherKubernetesResources.size === 1) assert(configuredDriverSpec.otherKubernetesResources(0).isInstanceOf[Secret]) val localFilesSecret = configuredDriverSpec.otherKubernetesResources(0).asInstanceOf[Secret] assert(localFilesSecret.getMetadata.getName === SECRET_NAME) val expectedSecretContents = Map( FIRST_TEMP_FILE_NAME -> BaseEncoding.base64().encode( FIRST_TEMP_FILE_CONTENTS.getBytes(Charsets.UTF_8)), SECOND_TEMP_FILE_NAME -> BaseEncoding.base64().encode( SECOND_TEMP_FILE_CONTENTS.getBytes(Charsets.UTF_8))) assert(localFilesSecret.getData.asScala === expectedSecretContents) assert(configuredDriverSpec.driverPod.getMetadata.getLabels.asScala === Map(mountSmallFilesBootstrap.LABEL_KEY -> mountSmallFilesBootstrap.LABEL_VALUE)) assert(configuredDriverSpec.driverContainer.getEnv.size() === 1) assert(configuredDriverSpec.driverContainer.getEnv.get(0).getName === mountSmallFilesBootstrap.ENV_KEY) assert(configuredDriverSpec.driverContainer.getEnv.get(0).getValue === mountSmallFilesBootstrap.ENV_VALUE) assert(configuredDriverSpec.driverSparkConf.get( EXECUTOR_SUBMITTED_SMALL_FILES_SECRET) === Some(SECRET_NAME)) assert(configuredDriverSpec.driverSparkConf.get( EXECUTOR_SUBMITTED_SMALL_FILES_SECRET_MOUNT_PATH) === Some(MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH)) } test("Using large files should throw an exception.") { val largeTempFileContents = BaseEncoding.base64().encode(new Array[Byte](10241)) val largeTempFile = createTempFileWithContents(tempFolder, "large.txt", largeTempFileContents) val configurationStep = new MountSmallLocalFilesStep( Seq(largeTempFile.getAbsolutePath), SECRET_NAME, MOUNTED_SMALL_FILES_SECRET_MOUNT_PATH, mountSmallFilesBootstrap) val baseDriverSpec = new KubernetesDriverSpec( new PodBuilder().build(), new ContainerBuilder().build(), Seq.empty[HasMetadata], new SparkConf(false)) try { configurationStep.configureDriver(baseDriverSpec) fail("Using the small local files mounter should not be allowed with big files.") } catch { case e: Throwable => assert(e.getMessage === s"requirement failed: Total size of all files submitted must be less than" + s" ${MountSmallLocalFilesStep.MAX_SECRET_BUNDLE_SIZE_BYTES_STRING} if you do not" + s" use a resource staging server. The total size of all submitted local" + s" files is ${Utils.bytesToString(largeTempFile.length())}. Please install a" + s" resource staging server and configure your application to use it via" + s" ${RESOURCE_STAGING_SERVER_URI.key}" ) } } private def createTempFileWithContents( root: File, fileName: String, fileContents: String): File = { val tempFile = new File(root, fileName) tempFile.createNewFile() Files.write(fileContents, tempFile, Charsets.UTF_8) tempFile } private class DummyMountSmallFilesBootstrap extends MountSmallFilesBootstrap { val LABEL_KEY = "smallFilesLabelKey" val LABEL_VALUE = "smallFilesLabelValue" val ENV_KEY = "smallFilesEnvKey" val ENV_VALUE = "smallFilesEnvValue" override def mountSmallFilesSecret(pod: Pod, container: Container): (Pod, Container) = { val editedPod = new PodBuilder(pod) .editOrNewMetadata() .addToLabels(LABEL_KEY, LABEL_VALUE) .endMetadata() .build() val editedContainer = new ContainerBuilder(container) .addNewEnv() .withName(ENV_KEY) .withValue(ENV_VALUE) .endEnv() .build() (editedPod, editedContainer) } } }
publicRoman/spark
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/submit/submitsteps/MountSmallLocalFilesStepSuite.scala
Scala
apache-2.0
6,825
/* Copyright 2012 Georgia Tech Research Institute Author: [email protected] This file is part of org.gtri.util.scala.xmlbuilder library. org.gtri.util.scala.xmlbuilder library is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. org.gtri.util.scala.xmlbuilder library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with org.gtri.util.scala.xmlbuilder library. If not, see <http://www.gnu.org/licenses/>. */ package org.gtri.util.scala import org.gtri.util.xsddatatypes._ import org.gtri.util.xsddatatypes.XsdQName._ import annotation.tailrec package object xmlbuilder { type DiagnosticLocator = Any implicit class implicitXmlElementStackPrefixToNamespaceURIResolver(stack: Seq[PrefixToNamespaceURIResolver]) extends PrefixToNamespaceURIResolver { // Note: rejected this b/c it calls getNamespaceURIForPrefix twice // def getNamespaceURIForPrefix(prefix: XsdNCName) = stack.find(_.getNamespaceURIForPrefix(prefix) != null).map(_.getNamespaceURIForPrefix(prefix)).orNull def getNamespaceURIForPrefix(prefix: XsdNCName) = doGetNamespaceURIForPrefix(stack, prefix) @tailrec private def doGetNamespaceURIForPrefix(stack: Seq[PrefixToNamespaceURIResolver], prefix: XsdNCName) : XsdAnyURI = { stack match { case Nil => null case head :: tail => val result = head.getNamespaceURIForPrefix(prefix) // Note: Have to use if/else to get tailrec to work here if(result != null) { result } else { doGetNamespaceURIForPrefix(tail, prefix) } } } } implicit class implicitXmlElementStackNamespaceURIToPrefixResolver(stack: Seq[NamespaceURIToPrefixResolver]) extends NamespaceURIToPrefixResolver { def isValidPrefixForNamespaceURI(prefix: XsdNCName, namespaceURI: XsdAnyURI) = doIsValidPrefixForNamespaceURI(stack, prefix, namespaceURI) def getPrefixForNamespaceURI(namespaceURI: XsdAnyURI) : XsdNCName = doGetPrefixForNamespaceURI(stack, namespaceURI) @tailrec private def doIsValidPrefixForNamespaceURI(stack : Seq[NamespaceURIToPrefixResolver], prefix: XsdNCName, namespaceURI: XsdAnyURI) : Boolean = { stack match { case Nil => false case head :: tail => // Note: Have to use if/else to get tailrec to work here if(head.isValidPrefixForNamespaceURI(prefix, namespaceURI)) { true } else { doIsValidPrefixForNamespaceURI(tail, prefix, namespaceURI) } } } @tailrec private def doGetPrefixForNamespaceURI(stack : Seq[NamespaceURIToPrefixResolver], namespaceURI: XsdAnyURI) : XsdNCName = { stack match { case Nil => null case head :: tail => val result = head.getPrefixForNamespaceURI(namespaceURI) // Note: Have to use if/else to get tailrec to work here if(result != null) { result } else { doGetPrefixForNamespaceURI(tail, namespaceURI) } } } } }
gtri-iead/org.gtri.util.scala
xmlbuilder/src/main/scala/org/gtri/util/scala/xmlbuilder/package.scala
Scala
gpl-3.0
3,490
package io.iohk.ethereum.blockchain.sync.regular import java.net.InetSocketAddress import akka.actor.{ActorRef, ActorSystem, PoisonPill} import akka.pattern.ask import akka.testkit.TestActor.AutoPilot import akka.testkit.{TestKitBase, TestProbe} import akka.util.{ByteString, Timeout} import cats.Eq import cats.implicits._ import io.iohk.ethereum.BlockHelpers import io.iohk.ethereum.blockchain.sync.PeerListSupport.PeersMap import io.iohk.ethereum.blockchain.sync._ import io.iohk.ethereum.consensus.blocks.CheckpointBlockGenerator import io.iohk.ethereum.domain.BlockHeaderImplicits._ import io.iohk.ethereum.domain._ import io.iohk.ethereum.security.SecureRandomBuilder import io.iohk.ethereum.ledger._ import io.iohk.ethereum.network.EtcPeerManagerActor.{PeerInfo, RemoteStatus} import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.MessageFromPeer import io.iohk.ethereum.network.PeerEventBusActor.Subscribe import io.iohk.ethereum.network.p2p.Message import io.iohk.ethereum.network.p2p.messages.PV62._ import io.iohk.ethereum.network.p2p.messages.PV63.{GetNodeData, NodeData} import io.iohk.ethereum.network.p2p.messages.PV64.NewBlock import io.iohk.ethereum.network.p2p.messages.ProtocolVersions import io.iohk.ethereum.network.{Peer, PeerId} import io.iohk.ethereum.utils.Config.SyncConfig import monix.eval.Task import monix.reactive.Observable import monix.reactive.subjects.ReplaySubject import org.scalamock.scalatest.AsyncMockFactory import org.scalatest.matchers.should.Matchers import scala.collection.mutable import scala.concurrent.duration.{DurationInt, FiniteDuration} import scala.concurrent.{ExecutionContext, Future} import scala.math.BigInt import scala.reflect.ClassTag // Fixture classes are wrapped in a trait due to problems with making mocks available inside of them trait RegularSyncFixtures { self: Matchers with AsyncMockFactory => class RegularSyncFixture(_system: ActorSystem) extends TestKitBase with EphemBlockchainTestSetup with TestSyncConfig with SecureRandomBuilder { implicit lazy val timeout: Timeout = remainingOrDefault implicit override lazy val system: ActorSystem = _system override lazy val syncConfig: SyncConfig = defaultSyncConfig.copy(blockHeadersPerRequest = 2, blockBodiesPerRequest = 2) val handshakedPeers: PeersMap = (0 to 5).toList.map((peerId _).andThen(getPeer)).fproduct(getPeerInfo(_)).toMap val defaultPeer: Peer = peerByNumber(0) val etcPeerManager: TestProbe = TestProbe() val peerEventBus: TestProbe = TestProbe() val ommersPool: TestProbe = TestProbe() val pendingTransactionsManager: TestProbe = TestProbe() val checkpointBlockGenerator: CheckpointBlockGenerator = new CheckpointBlockGenerator() val peersClient: TestProbe = TestProbe() lazy val regularSync: ActorRef = system.actorOf( RegularSync .props( peersClient.ref, etcPeerManager.ref, peerEventBus.ref, ledger, blockchain, validators.blockValidator, syncConfig, ommersPool.ref, pendingTransactionsManager.ref, checkpointBlockGenerator, system.scheduler ) .withDispatcher("akka.actor.default-dispatcher") ) val defaultTd = 12345 val testBlocks: List[Block] = BlockHelpers.generateChain(20, BlockHelpers.genesis) val testBlocksChunked: List[List[Block]] = testBlocks.grouped(syncConfig.blockHeadersPerRequest).toList override lazy val ledger = new TestLedgerImpl blockchain.save( block = BlockHelpers.genesis, receipts = Nil, weight = ChainWeight.totalDifficultyOnly(10000), saveAsBestBlock = true ) // scalastyle:on magic.number def done(): Unit = regularSync ! PoisonPill def peerId(number: Int): PeerId = PeerId(s"peer_$number") def getPeer(id: PeerId): Peer = Peer(new InetSocketAddress("127.0.0.1", 0), TestProbe(id.value).ref, incomingConnection = false) def getPeerInfo(peer: Peer, protocolVersion: Int = ProtocolVersions.PV64): PeerInfo = { val status = RemoteStatus( protocolVersion, 1, ChainWeight.totalDifficultyOnly(1), ByteString(s"${peer.id}_bestHash"), ByteString("unused") ) PeerInfo( status, forkAccepted = true, chainWeight = status.chainWeight, maxBlockNumber = 0, bestBlockHash = status.bestHash ) } def peerByNumber(number: Int): Peer = handshakedPeers.keys.toList.sortBy(_.id.value).apply(number) def blockHeadersChunkRequest(fromChunk: Int): PeersClient.Request[GetBlockHeaders] = { val block = testBlocksChunked(fromChunk).headNumberUnsafe blockHeadersRequest(block) } def blockHeadersRequest(fromBlock: BigInt): PeersClient.Request[GetBlockHeaders] = PeersClient.Request.create( GetBlockHeaders( Left(fromBlock), syncConfig.blockHeadersPerRequest, skip = 0, reverse = false ), PeersClient.BestPeer ) def fishForBlacklistPeer(peer: Peer): PeersClient.BlacklistPeer = peersClient.fishForSpecificMessage() { case msg @ PeersClient.BlacklistPeer(id, _) if id == peer.id => msg } val getSyncStatus: Task[SyncProtocol.Status] = Task.deferFuture((regularSync ? SyncProtocol.GetStatus).mapTo[SyncProtocol.Status]) def pollForStatus(predicate: SyncProtocol.Status => Boolean) = Observable .repeatEvalF(getSyncStatus.delayExecution(10.millis)) .takeWhileInclusive(predicate andThen (!_)) .lastL .timeout(remainingOrDefault) def fishForStatus[B](picker: PartialFunction[SyncProtocol.Status, B]) = Observable .repeatEvalF(getSyncStatus.delayExecution(10.millis)) .collect(picker) .firstL .timeout(remainingOrDefault) class TestLedgerImpl extends LedgerImpl(blockchain, blockchainConfig, syncConfig, consensus, system.dispatcher) { protected val results = mutable.Map[ByteString, () => Future[BlockImportResult]]() protected val importedBlocksSet = mutable.Set[Block]() private val importedBlocksSubject = ReplaySubject[Block]() val importedBlocks: Observable[Block] = importedBlocksSubject override def importBlock( block: Block )(implicit blockExecutionContext: ExecutionContext): Future[BlockImportResult] = { importedBlocksSet.add(block) results(block.hash)().flatTap(_ => importedBlocksSubject.onNext(block)) } override def getBlockByHash(hash: ByteString): Option[Block] = importedBlocksSet.find(_.hash == hash) def setImportResult(block: Block, result: () => Future[BlockImportResult]): Unit = results(block.header.hash) = result def didTryToImportBlock(predicate: Block => Boolean): Boolean = importedBlocksSet.exists(predicate) def didTryToImportBlock(block: Block): Boolean = didTryToImportBlock(_.hash == block.hash) def bestBlock: Block = importedBlocksSet.maxBy(_.number) } class PeersClientAutoPilot(blocks: List[Block] = testBlocks) extends AutoPilot { def run(sender: ActorRef, msg: Any): AutoPilot = overrides(sender).orElse(defaultHandlers(sender)).apply(msg).getOrElse(defaultAutoPilot) def overrides(sender: ActorRef): PartialFunction[Any, Option[AutoPilot]] = PartialFunction.empty def defaultHandlers(sender: ActorRef): PartialFunction[Any, Option[AutoPilot]] = { case PeersClient.Request(GetBlockHeaders(Left(minBlock), amount, _, _), _, _) => val maxBlock = minBlock + amount val matchingHeaders = blocks .filter(b => { val nr = b.number minBlock <= nr && nr < maxBlock }) .map(_.header) .sortBy(_.number) sender ! PeersClient.Response(defaultPeer, BlockHeaders(matchingHeaders)) None case PeersClient.Request(GetBlockBodies(hashes), _, _) => val matchingBodies = hashes.flatMap(hash => blocks.find(_.hash == hash)).map(_.body) sender ! PeersClient.Response(defaultPeer, BlockBodies(matchingBodies)) None case PeersClient.Request(GetNodeData(hash :: Nil), _, _) => sender ! PeersClient.Response( defaultPeer, NodeData(List(ByteString(blocks.byHashUnsafe(hash).header.toBytes: Array[Byte]))) ) None case _ => None } def defaultAutoPilot: AutoPilot = this } implicit class ListOps[T](list: List[T]) { def get(index: Int): Option[T] = if (list.isDefinedAt(index)) { Some(list(index)) } else { None } } // TODO: consider extracting it somewhere closer to domain implicit class BlocksListOps(blocks: List[Block]) { def headNumberUnsafe: BigInt = blocks.head.number def headNumber: Option[BigInt] = blocks.headOption.map(_.number) def headers: List[BlockHeader] = blocks.map(_.header) def hashes: List[ByteString] = headers.map(_.hash) def bodies: List[BlockBody] = blocks.map(_.body) def numbers: List[BigInt] = blocks.map(_.number) def numberAt(index: Int): Option[BigInt] = blocks.get(index).map(_.number) def numberAtUnsafe(index: Int): BigInt = numberAt(index).get def byHash(hash: ByteString): Option[Block] = blocks.find(_.hash == hash) def byHashUnsafe(hash: ByteString): Block = byHash(hash).get } // TODO: consider extracting it into common test environment implicit class TestProbeOps(probe: TestProbe) { def expectMsgEq[T: Eq](msg: T): T = expectMsgEq(remainingOrDefault, msg) def expectMsgEq[T: Eq](max: FiniteDuration, msg: T): T = { val received = probe.expectMsgClass(max, msg.getClass) assert(Eq[T].eqv(received, msg), s"Expected ${msg}, got ${received}") received } def fishForSpecificMessageMatching[T]( max: FiniteDuration = probe.remainingOrDefault )(predicate: Any => Boolean): T = probe.fishForSpecificMessage(max) { case msg if predicate(msg) => msg.asInstanceOf[T] } def fishForMsgEq[T: Eq: ClassTag](msg: T, max: FiniteDuration = probe.remainingOrDefault): T = probe.fishForSpecificMessageMatching[T](max)(x => implicitly[ClassTag[T]].runtimeClass.isInstance(x) && Eq[T].eqv(msg, x.asInstanceOf[T]) ) def expectMsgAllOfEq[T1: Eq, T2: Eq](msg1: T1, msg2: T2): (T1, T2) = expectMsgAllOfEq(remainingOrDefault, msg1, msg2) def expectMsgAllOfEq[T1: Eq, T2: Eq](max: FiniteDuration, msg1: T1, msg2: T2): (T1, T2) = { val received = probe.receiveN(2, max) ( received.find(m => Eq[T1].eqv(msg1, m.asInstanceOf[T1])).get.asInstanceOf[T1], received.find(m => Eq[T2].eqv(msg2, m.asInstanceOf[T2])).get.asInstanceOf[T2] ) } } implicit def eqInstanceForPeersClientRequest[T <: Message]: Eq[PeersClient.Request[T]] = (x, y) => x.message == y.message && x.peerSelector == y.peerSelector } class OnTopFixture(system: ActorSystem) extends RegularSyncFixture(system) { val newBlock: Block = BlockHelpers.generateBlock(testBlocks.last) override lazy val ledger: TestLedgerImpl = stub[TestLedgerImpl] var blockFetcher: ActorRef = _ var importedNewBlock = false var importedLastTestBlock = false (ledger.resolveBranch _).when(*).returns(NewBetterBranch(Nil)) (ledger .importBlock(_: Block)(_: ExecutionContext)) .when(*, *) .onCall((block, _) => { if (block == newBlock) { importedNewBlock = true Future.successful( BlockImportedToTop(List(BlockData(newBlock, Nil, ChainWeight(0, newBlock.number)))) ) } else { if (block == testBlocks.last) { importedLastTestBlock = true } Future.successful(BlockImportedToTop(Nil)) } }) peersClient.setAutoPilot(new PeersClientAutoPilot) def waitForSubscription(): Unit = { peerEventBus.expectMsgClass(classOf[Subscribe]) blockFetcher = peerEventBus.sender() } def sendLastTestBlockAsTop(): Unit = sendNewBlock(testBlocks.last) def sendNewBlock(block: Block = newBlock, peer: Peer = defaultPeer): Unit = blockFetcher ! MessageFromPeer(NewBlock(block, ChainWeight.totalDifficultyOnly(block.number)), peer.id) def goToTop(): Unit = { regularSync ! SyncProtocol.Start waitForSubscription() sendLastTestBlockAsTop() awaitCond(importedLastTestBlock) } } }
input-output-hk/etc-client
src/test/scala/io/iohk/ethereum/blockchain/sync/regular/RegularSyncFixtures.scala
Scala
mit
12,710
/* * Copyright (c) 2012 Orderly Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package co.orderly.narcolepsy package marshallers /** * Parent marshalling trait, extended by any marshaller */ sealed trait Marshaller /** * A MultiMarshaller can choose between different marshallers based on * the supplied content type. */ trait MultiMarshaller extends Marshaller { /** * Abstract method to marshal a given representation into a string, * based on the supplied content type. */ def fromRepresentation[R <: Representation](contentType: String, representation: R): String } /** * A ContentTypeMarshaller should be extended by any single-format marshaller * which can only handle one incoming content type. * * ContentTypeMarshaller extends MultiMarshaller so it can be used in places * where a MultiMarshaller is expected (e.g. assigning a marshaller to a * Narcolepsy Client). */ trait ContentTypeMarshaller extends MultiMarshaller { /** * Take the contentType argument and discard it */ def fromRepresentation[R <: Representation](contentType: String, representation: R): String = fromRepresentation(representation) /** * Abstract method to marshal a given representation into a string */ def fromRepresentation[R <: Representation](representation: R): String }
orderly/narcolepsy-scala
src/main/scala/co/orderly/narcolepsy/marshallers/Marshaller.scala
Scala
apache-2.0
1,924
package aiouniya.spark import com.redislabs.provider.redis.{RedisConfig, RedisEndpoint} import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileSystem, Path} import org.apache.hadoop.hbase.client.Scan import org.apache.hadoop.hbase.protobuf.ProtobufUtil import org.apache.hadoop.hbase.util.Base64 import org.apache.hadoop.io.compress.{CompressionCodec, GzipCodec} import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD import scala.collection.mutable.ListBuffer import scala.reflect.ClassTag /** * Created by qiyu.wang on 2017/10/18. */ object MyRDDFunctions { def convertScanToString(scan: Scan) = { val proto = ProtobufUtil.toScan(scan) Base64.encodeBytes(proto.toByteArray()) } def getGrade(feature: (Int, Int, Int, ListBuffer[Int], ListBuffer[String], Int, ListBuffer[String], ListBuffer[String], String)) = { var grade = 0 grade += (feature._3 + feature._6) if (feature._2 * 100 < feature._1) grade -= 1 if (feature._4.contains(1)) grade += 1 else if (feature._5.size >= 3) grade -= 1 ((feature._9, grade), (feature._7.size, feature._8.size)) } def saveAsTextFileWithCheck[T](toSave: RDD[T], path: String, codec: Class[_ <: CompressionCodec]) { val hdconf = new Configuration() val hdfs = FileSystem.get(hdconf) try { hdfs.delete(new Path(path), true) } catch { case _: Throwable => {} } toSave.saveAsTextFile(path, codec) } implicit class MRDDFunctions[T](val self: RDD[T]) extends AnyVal { def saveAsTextFileWithCheck(path: String, codec: Class[_ <: CompressionCodec]) = MyRDDFunctions.saveAsTextFileWithCheck(self, path, codec) } implicit class UserScoreFunctions[T](val self: RDD[T]) extends AnyVal { @deprecated def calcScore(): RDD[(String, ((String, Int), (Int, Int)))] = { val features = UserScore.getFeaturesFromLog(self.asInstanceOf[RDD[(String, String)]]) val retval = features.mapValues(getGrade) retval } def calcScoreWithFeatures(): RDD[(String, ((String, Int), (Int, Int)))] = { val features = self.asInstanceOf[RDD[String]].map(UserScore.getFeaturesFromHistory) val retval = features.mapValues(getGrade) retval } } implicit class RedisFunctions[T](val self: RDD[(String, T)]) extends AnyVal { def toShardRedis(partNum: Int)(implicit ev: ClassTag[T]) = MyRedisFunctions.toShardRedis(self, partNum) } implicit class SparkContextFunctions[T](val self: SparkContext) extends AnyVal { def delPath(path: String) { val hdconf = self.hadoopConfiguration val hdfs = FileSystem.get(hdconf) try { hdfs.delete(new Path(path), true) } catch { case _: Throwable => {} } } def toRedisKV[T: ClassTag](src: RDD[(String, T)], delimIdx: Int, partNum: Int) (implicit redisConfig: RedisConfig = new RedisConfig(new RedisEndpoint(self.getConf))) = new MyRedisContext(self, redisConfig, partNum).setKV(src, delimIdx) def toRedisHash(src: RDD[(String, (String, String))]) (implicit redisConfig: RedisConfig = new RedisConfig(new RedisEndpoint(self.getConf))) = new MyRedisContext(self, redisConfig).setHash(src) def fromRedisHash[T: ClassTag](src: RDD[(String, T)], delimIdx: Int, partNum: Int) (implicit redisConfig: RedisConfig = new RedisConfig(new RedisEndpoint(self.getConf))) = new MyRedisContext(self, redisConfig, partNum).getHash(src, delimIdx) def calcScore[T: ClassTag](src: RDD[(String, T)], outputDir: String): RDD[(String, (String, Int))] = { val features = src.asInstanceOf[RDD[ (String, (Int, Int, Int, ListBuffer[Int], ListBuffer[String], Int, ListBuffer[String], ListBuffer[String], String)) ]] val fs = FileSystem.get(self.hadoopConfiguration) if (outputDir.endsWith(java.io.File.pathSeparator)) outputDir.substring(0, outputDir.lastIndexOf(java.io.File.pathSeparator)) val outputPath = new Path(outputDir) val newFeatures = if (fs.exists(outputPath) && fs.getContentSummary(outputPath).getLength > 0) { val tempDir = outputDir + "_temp" val tempPath = new Path(tempDir) fs.delete(tempPath, true) fs.rename(outputPath, tempPath) val featureHistory = self.textFile(tempDir) .map(UserScore.getFeaturesFromHistory) .reduceByKey((x, _) => x) // featureHistory.union(features).reduceByKey((v1, v2) => // UserScore.mergeFeatures(v1, v2)) val unioned = features.cogroup(featureHistory) unioned.mapValues { case (newVal, oldVal) => if (newVal.isEmpty) oldVal.toIterator.next else if (oldVal.isEmpty) newVal.toIterator.next else UserScore.mergeFeatures(newVal.toIterator.next, oldVal.toIterator.next) } } else features newFeatures.map(tup => tup._1 + "\t" + tup._2.productIterator.map(v => v match { case v: ListBuffer[Any] => v.mkString(",") case v => v.toString } ).mkString("\t")).coalesce(400).saveAsTextFile(outputDir, classOf[GzipCodec]) val retval = features.mapValues(t => getGrade(t)._1) retval } def forPath(path: String*): Loaders.Context = Loaders.forPath(self, path.toList) } }
7u/spark-learning
spark.learning/src/main/scala/aiouniya/spark/MyRDDFunctions.scala
Scala
apache-2.0
5,531
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.test import java.io.File import java.util.Locale import java.util.concurrent.ConcurrentLinkedQueue import scala.collection.JavaConverters._ import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.Path import org.apache.parquet.hadoop.ParquetFileReader import org.apache.parquet.hadoop.util.HadoopInputFile import org.apache.parquet.schema.PrimitiveType import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName import org.apache.parquet.schema.Type.Repetition import org.scalatest.BeforeAndAfter import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage import org.apache.spark.internal.io.HadoopMapReduceCommitProtocol import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart} import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.plans.logical.{AppendData, LogicalPlan, OverwriteByExpression} import org.apache.spark.sql.execution.QueryExecution import org.apache.spark.sql.execution.datasources.DataSourceUtils import org.apache.spark.sql.execution.datasources.noop.NoopDataSource import org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.sources._ import org.apache.spark.sql.types._ import org.apache.spark.sql.util.QueryExecutionListener import org.apache.spark.util.Utils object LastOptions { var parameters: Map[String, String] = null var schema: Option[StructType] = null var saveMode: SaveMode = null def clear(): Unit = { parameters = null schema = null saveMode = null } } /** Dummy provider. */ class DefaultSource extends RelationProvider with SchemaRelationProvider with CreatableRelationProvider { case class FakeRelation(sqlContext: SQLContext) extends BaseRelation { override def schema: StructType = StructType(Seq(StructField("a", StringType))) } override def createRelation( sqlContext: SQLContext, parameters: Map[String, String], schema: StructType ): BaseRelation = { LastOptions.parameters = parameters LastOptions.schema = Some(schema) FakeRelation(sqlContext) } override def createRelation( sqlContext: SQLContext, parameters: Map[String, String] ): BaseRelation = { LastOptions.parameters = parameters LastOptions.schema = None FakeRelation(sqlContext) } override def createRelation( sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = { LastOptions.parameters = parameters LastOptions.schema = None LastOptions.saveMode = mode FakeRelation(sqlContext) } } /** Dummy provider with only RelationProvider and CreatableRelationProvider. */ class DefaultSourceWithoutUserSpecifiedSchema extends RelationProvider with CreatableRelationProvider { case class FakeRelation(sqlContext: SQLContext) extends BaseRelation { override def schema: StructType = StructType(Seq(StructField("a", StringType))) } override def createRelation( sqlContext: SQLContext, parameters: Map[String, String]): BaseRelation = { FakeRelation(sqlContext) } override def createRelation( sqlContext: SQLContext, mode: SaveMode, parameters: Map[String, String], data: DataFrame): BaseRelation = { FakeRelation(sqlContext) } } object MessageCapturingCommitProtocol { val commitMessages = new ConcurrentLinkedQueue[TaskCommitMessage]() } class MessageCapturingCommitProtocol(jobId: String, path: String) extends HadoopMapReduceCommitProtocol(jobId, path) { // captures commit messages for testing override def onTaskCommit(msg: TaskCommitMessage): Unit = { MessageCapturingCommitProtocol.commitMessages.offer(msg) } } class DataFrameReaderWriterSuite extends QueryTest with SharedSparkSession with BeforeAndAfter { import testImplicits._ override def sparkConf: SparkConf = super.sparkConf .setAppName("test") .set("spark.sql.parquet.columnarReaderBatchSize", "4096") .set("spark.sql.sources.useV1SourceList", "avro") .set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin") .set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096") //.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager") .set("spark.memory.offHeap.enabled", "true") .set("spark.memory.offHeap.size", "50m") .set("spark.sql.join.preferSortMergeJoin", "false") .set("spark.sql.columnar.codegen.hashAggregate", "false") .set("spark.oap.sql.columnar.wholestagecodegen", "false") .set("spark.sql.columnar.window", "false") .set("spark.unsafe.exceptionOnMemoryLeak", "false") //.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/") .set("spark.sql.columnar.sort.broadcastJoin", "true") .set("spark.oap.sql.columnar.preferColumnar", "true") private val userSchema = new StructType().add("s", StringType) private val userSchemaString = "s STRING" private val textSchema = new StructType().add("value", StringType) private val data = Seq("1", "2", "3") private val dir = Utils.createTempDir(namePrefix = "input").getCanonicalPath before { Utils.deleteRecursively(new File(dir)) } test("writeStream cannot be called on non-streaming datasets") { val e = intercept[AnalysisException] { spark.read .format("org.apache.spark.sql.test") .load() .writeStream .start() } Seq("'writeStream'", "only", "streaming Dataset/DataFrame").foreach { s => assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT))) } } test("resolve default source") { spark.read .format("org.apache.spark.sql.test") .load() .write .format("org.apache.spark.sql.test") .save() } test("resolve default source without extending SchemaRelationProvider") { spark.read .format("org.apache.spark.sql.test.DefaultSourceWithoutUserSpecifiedSchema") .load() .write .format("org.apache.spark.sql.test.DefaultSourceWithoutUserSpecifiedSchema") .save() } test("resolve full class") { spark.read .format("org.apache.spark.sql.test.DefaultSource") .load() .write .format("org.apache.spark.sql.test") .save() } test("options") { val map = new java.util.HashMap[String, String] map.put("opt3", "3") val df = spark.read .format("org.apache.spark.sql.test") .option("opt1", "1") .options(Map("opt2" -> "2")) .options(map) .load() assert(LastOptions.parameters("opt1") == "1") assert(LastOptions.parameters("opt2") == "2") assert(LastOptions.parameters("opt3") == "3") LastOptions.clear() df.write .format("org.apache.spark.sql.test") .option("opt1", "1") .options(Map("opt2" -> "2")) .options(map) .save() assert(LastOptions.parameters("opt1") == "1") assert(LastOptions.parameters("opt2") == "2") assert(LastOptions.parameters("opt3") == "3") } test("pass partitionBy as options") { Seq(1).toDF.write .format("org.apache.spark.sql.test") .partitionBy("col1", "col2") .save() val partColumns = LastOptions.parameters(DataSourceUtils.PARTITIONING_COLUMNS_KEY) assert(DataSourceUtils.decodePartitioningColumns(partColumns) === Seq("col1", "col2")) } test ("SPARK-29537: throw exception when user defined a wrong base path") { withTempPath { p => val path = new Path(p.toURI).toString Seq((1, 1), (2, 2)).toDF("c1", "c2") .write.partitionBy("c1").mode(SaveMode.Overwrite).parquet(path) val wrongBasePath = new File(p, "unknown") // basePath must be a directory wrongBasePath.mkdir() val msg = intercept[IllegalArgumentException] { spark.read.option("basePath", wrongBasePath.getCanonicalPath).parquet(path) }.getMessage assert(msg === s"Wrong basePath ${wrongBasePath.getCanonicalPath} for the root path: $path") } } test("save mode") { spark.range(10).write .format("org.apache.spark.sql.test") .mode(SaveMode.ErrorIfExists) .save() assert(LastOptions.saveMode === SaveMode.ErrorIfExists) spark.range(10).write .format("org.apache.spark.sql.test") .mode(SaveMode.Append) .save() assert(LastOptions.saveMode === SaveMode.Append) // By default the save mode is `ErrorIfExists` for data source v1. spark.range(10).write .format("org.apache.spark.sql.test") .save() assert(LastOptions.saveMode === SaveMode.ErrorIfExists) spark.range(10).write .format("org.apache.spark.sql.test") .mode("default") .save() assert(LastOptions.saveMode === SaveMode.ErrorIfExists) } test("save mode for data source v2") { var plan: LogicalPlan = null val listener = new QueryExecutionListener { override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = { plan = qe.analyzed } override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {} } spark.listenerManager.register(listener) try { // append mode creates `AppendData` spark.range(10).write .format(classOf[NoopDataSource].getName) .mode(SaveMode.Append) .save() sparkContext.listenerBus.waitUntilEmpty() assert(plan.isInstanceOf[AppendData]) // overwrite mode creates `OverwriteByExpression` spark.range(10).write .format(classOf[NoopDataSource].getName) .mode(SaveMode.Overwrite) .save() sparkContext.listenerBus.waitUntilEmpty() assert(plan.isInstanceOf[OverwriteByExpression]) // By default the save mode is `ErrorIfExists` for data source v2. val e = intercept[AnalysisException] { spark.range(10).write .format(classOf[NoopDataSource].getName) .save() } assert(e.getMessage.contains("ErrorIfExists")) val e2 = intercept[AnalysisException] { spark.range(10).write .format(classOf[NoopDataSource].getName) .mode("default") .save() } assert(e2.getMessage.contains("ErrorIfExists")) } finally { spark.listenerManager.unregister(listener) } } test("Throw exception on unsafe table insertion with strict casting policy") { withSQLConf( SQLConf.USE_V1_SOURCE_LIST.key -> "parquet", SQLConf.STORE_ASSIGNMENT_POLICY.key -> SQLConf.StoreAssignmentPolicy.STRICT.toString) { withTable("t") { sql("create table t(i int, d double) using parquet") // Calling `saveAsTable` to an existing table with append mode results in table insertion. var msg = intercept[AnalysisException] { Seq((1L, 2.0)).toDF("i", "d").write.mode("append").saveAsTable("t") }.getMessage assert(msg.contains("Cannot safely cast 'i': bigint to int")) // Insert into table successfully. Seq((1, 2.0)).toDF("i", "d").write.mode("append").saveAsTable("t") // The API `saveAsTable` matches the fields by name. Seq((4.0, 3)).toDF("d", "i").write.mode("append").saveAsTable("t") checkAnswer(sql("select * from t"), Seq(Row(1, 2.0), Row(3, 4.0))) } } } test("Throw exception on unsafe cast with ANSI casting policy") { withSQLConf( SQLConf.USE_V1_SOURCE_LIST.key -> "parquet", SQLConf.STORE_ASSIGNMENT_POLICY.key -> SQLConf.StoreAssignmentPolicy.ANSI.toString) { withTable("t") { sql("create table t(i int, d double) using parquet") // Calling `saveAsTable` to an existing table with append mode results in table insertion. var msg = intercept[AnalysisException] { Seq(("a", "b")).toDF("i", "d").write.mode("append").saveAsTable("t") }.getMessage assert(msg.contains("Cannot safely cast 'i': string to int") && msg.contains("Cannot safely cast 'd': string to double")) msg = intercept[AnalysisException] { Seq((true, false)).toDF("i", "d").write.mode("append").saveAsTable("t") }.getMessage assert(msg.contains("Cannot safely cast 'i': boolean to int") && msg.contains("Cannot safely cast 'd': boolean to double")) } } } test("test path option in load") { spark.read .format("org.apache.spark.sql.test") .option("intOpt", 56) .load("/test") assert(LastOptions.parameters("intOpt") == "56") assert(LastOptions.parameters("path") == "/test") LastOptions.clear() spark.read .format("org.apache.spark.sql.test") .option("intOpt", 55) .load() assert(LastOptions.parameters("intOpt") == "55") assert(!LastOptions.parameters.contains("path")) LastOptions.clear() spark.read .format("org.apache.spark.sql.test") .option("intOpt", 54) .load("/test", "/test1", "/test2") assert(LastOptions.parameters("intOpt") == "54") assert(!LastOptions.parameters.contains("path")) } test("test different data types for options") { val df = spark.read .format("org.apache.spark.sql.test") .option("intOpt", 56) .option("boolOpt", false) .option("doubleOpt", 6.7) .load("/test") assert(LastOptions.parameters("intOpt") == "56") assert(LastOptions.parameters("boolOpt") == "false") assert(LastOptions.parameters("doubleOpt") == "6.7") LastOptions.clear() df.write .format("org.apache.spark.sql.test") .option("intOpt", 56) .option("boolOpt", false) .option("doubleOpt", 6.7) .save("/test") assert(LastOptions.parameters("intOpt") == "56") assert(LastOptions.parameters("boolOpt") == "false") assert(LastOptions.parameters("doubleOpt") == "6.7") } test("check jdbc() does not support partitioning, bucketBy or sortBy") { val df = spark.read.text(Utils.createTempDir(namePrefix = "text").getCanonicalPath) var w = df.write.partitionBy("value") var e = intercept[AnalysisException](w.jdbc(null, null, null)) Seq("jdbc", "partitioning").foreach { s => assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT))) } w = df.write.bucketBy(2, "value") e = intercept[AnalysisException](w.jdbc(null, null, null)) Seq("jdbc", "does not support bucketBy right now").foreach { s => assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT))) } w = df.write.sortBy("value") e = intercept[AnalysisException](w.jdbc(null, null, null)) Seq("sortBy must be used together with bucketBy").foreach { s => assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT))) } w = df.write.bucketBy(2, "value").sortBy("value") e = intercept[AnalysisException](w.jdbc(null, null, null)) Seq("jdbc", "does not support bucketBy and sortBy right now").foreach { s => assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT))) } } test("prevent all column partitioning") { withTempDir { dir => val path = dir.getCanonicalPath intercept[AnalysisException] { spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path) } intercept[AnalysisException] { spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path) } } } test("load API") { spark.read.format("org.apache.spark.sql.test").load() spark.read.format("org.apache.spark.sql.test").load(dir) spark.read.format("org.apache.spark.sql.test").load(dir, dir, dir) spark.read.format("org.apache.spark.sql.test").load(Seq(dir, dir): _*) Option(dir).map(spark.read.format("org.apache.spark.sql.test").load) } test("write path implements onTaskCommit API correctly") { withSQLConf( SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key -> classOf[MessageCapturingCommitProtocol].getCanonicalName) { withTempDir { dir => val path = dir.getCanonicalPath MessageCapturingCommitProtocol.commitMessages.clear() spark.range(10).repartition(10).write.mode("overwrite").parquet(path) assert(MessageCapturingCommitProtocol.commitMessages.size() == 10) } } } test("read a data source that does not extend SchemaRelationProvider") { val dfReader = spark.read .option("from", "1") .option("TO", "10") .format("org.apache.spark.sql.sources.SimpleScanSource") // when users do not specify the schema checkAnswer(dfReader.load(), spark.range(1, 11).toDF()) // when users specify a wrong schema val inputSchema = new StructType().add("s", IntegerType, nullable = false) val e = intercept[AnalysisException] { dfReader.schema(inputSchema).load() } assert(e.getMessage.contains("The user-specified schema doesn't match the actual schema")) } test("read a data source that does not extend RelationProvider") { val dfReader = spark.read .option("from", "1") .option("TO", "10") .option("option_with_underscores", "someval") .option("option.with.dots", "someval") .format("org.apache.spark.sql.sources.AllDataTypesScanSource") // when users do not specify the schema val e = intercept[AnalysisException] { dfReader.load() } assert(e.getMessage.contains("A schema needs to be specified when using")) // when users specify the schema val inputSchema = new StructType().add("s", StringType, nullable = false) assert(dfReader.schema(inputSchema).load().count() == 10) } ignore("text - API and behavior regarding schema") { // Writer spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir) testRead(spark.read.text(dir), data, textSchema) // Reader, without user specified schema testRead(spark.read.text(), Seq.empty, textSchema) testRead(spark.read.text(dir, dir, dir), data ++ data ++ data, textSchema) testRead(spark.read.text(Seq(dir, dir): _*), data ++ data, textSchema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.text).get, data, textSchema) // Reader, with user specified schema, should just apply user schema on the file data testRead(spark.read.schema(userSchema).text(), Seq.empty, userSchema) testRead(spark.read.schema(userSchema).text(dir), data, userSchema) testRead(spark.read.schema(userSchema).text(dir, dir), data ++ data, userSchema) testRead(spark.read.schema(userSchema).text(Seq(dir, dir): _*), data ++ data, userSchema) } ignore("textFile - API and behavior regarding schema") { spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir) // Reader, without user specified schema testRead(spark.read.textFile().toDF(), Seq.empty, textSchema) testRead(spark.read.textFile(dir).toDF(), data, textSchema) testRead(spark.read.textFile(dir, dir).toDF(), data ++ data, textSchema) testRead(spark.read.textFile(Seq(dir, dir): _*).toDF(), data ++ data, textSchema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.text).get, data, textSchema) // Reader, with user specified schema, should just apply user schema on the file data val e = intercept[AnalysisException] { spark.read.schema(userSchema).textFile() } assert(e.getMessage.toLowerCase(Locale.ROOT).contains( "user specified schema not supported")) intercept[AnalysisException] { spark.read.schema(userSchema).textFile(dir) } intercept[AnalysisException] { spark.read.schema(userSchema).textFile(dir, dir) } intercept[AnalysisException] { spark.read.schema(userSchema).textFile(Seq(dir, dir): _*) } } ignore("csv - API and behavior regarding schema") { // Writer spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).csv(dir) val df = spark.read.csv(dir) checkAnswer(df, spark.createDataset(data).toDF()) val schema = df.schema // Reader, without user specified schema val message = intercept[AnalysisException] { testRead(spark.read.csv(), Seq.empty, schema) }.getMessage assert(message.contains("Unable to infer schema for CSV. It must be specified manually.")) testRead(spark.read.csv(dir), data, schema) testRead(spark.read.csv(dir, dir), data ++ data, schema) testRead(spark.read.csv(Seq(dir, dir): _*), data ++ data, schema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.csv).get, data, schema) // Reader, with user specified schema, should just apply user schema on the file data testRead(spark.read.schema(userSchema).csv(), Seq.empty, userSchema) testRead(spark.read.schema(userSchema).csv(dir), data, userSchema) testRead(spark.read.schema(userSchema).csv(dir, dir), data ++ data, userSchema) testRead(spark.read.schema(userSchema).csv(Seq(dir, dir): _*), data ++ data, userSchema) } ignore("json - API and behavior regarding schema") { // Writer spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).json(dir) val df = spark.read.json(dir) checkAnswer(df, spark.createDataset(data).toDF()) val schema = df.schema // Reader, without user specified schema intercept[AnalysisException] { testRead(spark.read.json(), Seq.empty, schema) } testRead(spark.read.json(dir), data, schema) testRead(spark.read.json(dir, dir), data ++ data, schema) testRead(spark.read.json(Seq(dir, dir): _*), data ++ data, schema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.json).get, data, schema) // Reader, with user specified schema, data should be nulls as schema in file different // from user schema val expData = Seq[String](null, null, null) testRead(spark.read.schema(userSchema).json(), Seq.empty, userSchema) testRead(spark.read.schema(userSchema).json(dir), expData, userSchema) testRead(spark.read.schema(userSchema).json(dir, dir), expData ++ expData, userSchema) testRead(spark.read.schema(userSchema).json(Seq(dir, dir): _*), expData ++ expData, userSchema) } ignore("parquet - API and behavior regarding schema") { // Writer spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).parquet(dir) val df = spark.read.parquet(dir) checkAnswer(df, spark.createDataset(data).toDF()) val schema = df.schema // Reader, without user specified schema intercept[AnalysisException] { testRead(spark.read.parquet(), Seq.empty, schema) } testRead(spark.read.parquet(dir), data, schema) testRead(spark.read.parquet(dir, dir), data ++ data, schema) testRead(spark.read.parquet(Seq(dir, dir): _*), data ++ data, schema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.parquet).get, data, schema) // Reader, with user specified schema, data should be nulls as schema in file different // from user schema val expData = Seq[String](null, null, null) testRead(spark.read.schema(userSchema).parquet(), Seq.empty, userSchema) testRead(spark.read.schema(userSchema).parquet(dir), expData, userSchema) testRead(spark.read.schema(userSchema).parquet(dir, dir), expData ++ expData, userSchema) testRead( spark.read.schema(userSchema).parquet(Seq(dir, dir): _*), expData ++ expData, userSchema) } test("orc - API and behavior regarding schema") { withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") { // Writer spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).orc(dir) val df = spark.read.orc(dir) checkAnswer(df, spark.createDataset(data).toDF()) val schema = df.schema // Reader, without user specified schema intercept[AnalysisException] { testRead(spark.read.orc(), Seq.empty, schema) } testRead(spark.read.orc(dir), data, schema) testRead(spark.read.orc(dir, dir), data ++ data, schema) testRead(spark.read.orc(Seq(dir, dir): _*), data ++ data, schema) // Test explicit calls to single arg method - SPARK-16009 testRead(Option(dir).map(spark.read.orc).get, data, schema) // Reader, with user specified schema, data should be nulls as schema in file different // from user schema val expData = Seq[String](null, null, null) testRead(spark.read.schema(userSchema).orc(), Seq.empty, userSchema) testRead(spark.read.schema(userSchema).orc(dir), expData, userSchema) testRead(spark.read.schema(userSchema).orc(dir, dir), expData ++ expData, userSchema) testRead( spark.read.schema(userSchema).orc(Seq(dir, dir): _*), expData ++ expData, userSchema) } } test("column nullability and comment - write and then read") { withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") { Seq("json", "orc", "parquet", "csv").foreach { format => val schema = StructType( StructField("cl1", IntegerType, nullable = false).withComment("test") :: StructField("cl2", IntegerType, nullable = true) :: StructField("cl3", IntegerType, nullable = true) :: Nil) val row = Row(3, null, 4) val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema) // if we write and then read, the read will enforce schema to be nullable val tableName = "tab" withTable(tableName) { df.write.format(format).mode("overwrite").saveAsTable(tableName) // Verify the DDL command result: DESCRIBE TABLE checkAnswer( sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"), Row("cl1", "test") :: Nil) // Verify the schema val expectedFields = schema.fields.map(f => f.copy(nullable = true)) assert(spark.table(tableName).schema === schema.copy(fields = expectedFields)) } } } } test("parquet - column nullability -- write only") { val schema = StructType( StructField("cl1", IntegerType, nullable = false) :: StructField("cl2", IntegerType, nullable = true) :: Nil) val row = Row(3, 4) val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema) withTempPath { dir => val path = dir.getAbsolutePath df.write.mode("overwrite").parquet(path) val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0) val hadoopInputFile = HadoopInputFile.fromPath(new Path(file), new Configuration()) val f = ParquetFileReader.open(hadoopInputFile) val parquetSchema = f.getFileMetaData.getSchema.getColumns.asScala .map(_.getPrimitiveType) f.close() // the write keeps nullable info from the schema val expectedParquetSchema = Seq( new PrimitiveType(Repetition.REQUIRED, PrimitiveTypeName.INT32, "cl1"), new PrimitiveType(Repetition.OPTIONAL, PrimitiveTypeName.INT32, "cl2") ) assert (expectedParquetSchema === parquetSchema) } } ignore("SPARK-17230: write out results of decimal calculation") { val df = spark.range(99, 101) .selectExpr("id", "cast(id as long) * cast('1.0' as decimal(38, 18)) as num") df.write.mode(SaveMode.Overwrite).parquet(dir) val df2 = spark.read.parquet(dir) checkAnswer(df2, df) } private def testRead( df: => DataFrame, expectedResult: Seq[String], expectedSchema: StructType): Unit = { checkAnswer(df, spark.createDataset(expectedResult).toDF()) assert(df.schema === expectedSchema) } test("saveAsTable with mode Append should not fail if the table not exists " + "but a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.Append).saveAsTable("same_name") assert( spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default")))) } } } test("saveAsTable with mode Append should not fail if the table already exists " + "and a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { val format = spark.sessionState.conf.defaultDataSourceName sql(s"CREATE TABLE same_name(id LONG) USING $format") spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.Append).saveAsTable("same_name") checkAnswer(spark.table("same_name"), spark.range(10).toDF()) checkAnswer(spark.table("default.same_name"), spark.range(20).toDF()) } } } test("saveAsTable with mode ErrorIfExists should not fail if the table not exists " + "but a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.ErrorIfExists).saveAsTable("same_name") assert( spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default")))) } } } test("saveAsTable with mode Overwrite should not drop the temp view if the table not exists " + "but a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.Overwrite).saveAsTable("same_name") assert(spark.sessionState.catalog.getTempView("same_name").isDefined) assert( spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default")))) } } } test("saveAsTable with mode Overwrite should not fail if the table already exists " + "and a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { sql("CREATE TABLE same_name(id LONG) USING parquet") spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.Overwrite).saveAsTable("same_name") checkAnswer(spark.table("same_name"), spark.range(10).toDF()) checkAnswer(spark.table("default.same_name"), spark.range(20).toDF()) } } } test("saveAsTable with mode Ignore should create the table if the table not exists " + "but a same-name temp view exist") { withTable("same_name") { withTempView("same_name") { spark.range(10).createTempView("same_name") spark.range(20).write.mode(SaveMode.Ignore).saveAsTable("same_name") assert( spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default")))) } } } ignore("SPARK-18510: use user specified types for partition columns in file sources") { import org.apache.spark.sql.functions.udf withTempDir { src => val createArray = udf { (length: Long) => for (i <- 1 to length.toInt) yield i.toString } spark.range(4).select(createArray('id + 1) as 'ex, 'id, 'id % 4 as 'part).coalesce(1).write .partitionBy("part", "id") .mode("overwrite") .parquet(src.toString) // Specify a random ordering of the schema, partition column in the middle, etc. // Also let's say that the partition columns are Strings instead of Longs. // partition columns should go to the end val schema = new StructType() .add("id", StringType) .add("ex", ArrayType(StringType)) val df = spark.read .schema(schema) .format("parquet") .load(src.toString) assert(df.schema.toList === List( StructField("ex", ArrayType(StringType)), StructField("part", IntegerType), // inferred partitionColumn dataType StructField("id", StringType))) // used user provided partitionColumn dataType checkAnswer( df, // notice how `part` is ordered before `id` Row(Array("1"), 0, "0") :: Row(Array("1", "2"), 1, "1") :: Row(Array("1", "2", "3"), 2, "2") :: Row(Array("1", "2", "3", "4"), 3, "3") :: Nil ) } } test("SPARK-18899: append to a bucketed table using DataFrameWriter with mismatched bucketing") { withTable("t") { Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.bucketBy(2, "i").saveAsTable("t") val e = intercept[AnalysisException] { Seq(3 -> "c").toDF("i", "j").write.bucketBy(3, "i").mode("append").saveAsTable("t") } assert(e.message.contains("Specified bucketing does not match that of the existing table")) } } test("SPARK-18912: number of columns mismatch for non-file-based data source table") { withTable("t") { sql("CREATE TABLE t USING org.apache.spark.sql.test.DefaultSource") val e = intercept[AnalysisException] { Seq(1 -> "a").toDF("a", "b").write .format("org.apache.spark.sql.test.DefaultSource") .mode("append").saveAsTable("t") } assert(e.message.contains("The column number of the existing table")) } } test("SPARK-18913: append to a table with special column names") { withTable("t") { Seq(1 -> "a").toDF("x.x", "y.y").write.saveAsTable("t") Seq(2 -> "b").toDF("x.x", "y.y").write.mode("append").saveAsTable("t") checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil) } } test("SPARK-16848: table API throws an exception for user specified schema") { withTable("t") { val schema = StructType(StructField("a", StringType) :: Nil) val e = intercept[AnalysisException] { spark.read.schema(schema).table("t") }.getMessage assert(e.contains("User specified schema not supported with `table`")) } } ignore("SPARK-20431: Specify a schema by using a DDL-formatted string") { spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir) testRead(spark.read.schema(userSchemaString).text(), Seq.empty, userSchema) testRead(spark.read.schema(userSchemaString).text(dir), data, userSchema) testRead(spark.read.schema(userSchemaString).text(dir, dir), data ++ data, userSchema) testRead(spark.read.schema(userSchemaString).text(Seq(dir, dir): _*), data ++ data, userSchema) } test("SPARK-20460 Check name duplication in buckets") { Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { var errorMsg = intercept[AnalysisException] { Seq((1, 1)).toDF("col", c0).write.bucketBy(2, c0, c1).saveAsTable("t") }.getMessage assert(errorMsg.contains("Found duplicate column(s) in the bucket definition")) errorMsg = intercept[AnalysisException] { Seq((1, 1)).toDF("col", c0).write.bucketBy(2, "col").sortBy(c0, c1).saveAsTable("t") }.getMessage assert(errorMsg.contains("Found duplicate column(s) in the sort definition")) } } } ignore("SPARK-20460 Check name duplication in schema") { def checkWriteDataColumnDuplication( format: String, colName0: String, colName1: String, tempDir: File): Unit = { val errorMsg = intercept[AnalysisException] { Seq((1, 1)).toDF(colName0, colName1).write.format(format).mode("overwrite") .save(tempDir.getAbsolutePath) }.getMessage assert(errorMsg.contains("Found duplicate column(s) when inserting into")) } def checkReadUserSpecifiedDataColumnDuplication( df: DataFrame, format: String, colName0: String, colName1: String, tempDir: File): Unit = { val testDir = Utils.createTempDir(tempDir.getAbsolutePath) df.write.format(format).mode("overwrite").save(testDir.getAbsolutePath) val errorMsg = intercept[AnalysisException] { spark.read.format(format).schema(s"$colName0 INT, $colName1 INT") .load(testDir.getAbsolutePath) }.getMessage assert(errorMsg.contains("Found duplicate column(s) in the data schema:")) } def checkReadPartitionColumnDuplication( format: String, colName0: String, colName1: String, tempDir: File): Unit = { val testDir = Utils.createTempDir(tempDir.getAbsolutePath) Seq(1).toDF("col").write.format(format).mode("overwrite") .save(s"${testDir.getAbsolutePath}/$colName0=1/$colName1=1") val errorMsg = intercept[AnalysisException] { spark.read.format(format).load(testDir.getAbsolutePath) }.getMessage assert(errorMsg.contains("Found duplicate column(s) in the partition schema:")) } Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) => withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) { withTempDir { src => // Check CSV format checkWriteDataColumnDuplication("csv", c0, c1, src) checkReadUserSpecifiedDataColumnDuplication( Seq((1, 1)).toDF("c0", "c1"), "csv", c0, c1, src) // If `inferSchema` is true, a CSV format is duplicate-safe (See SPARK-16896) var testDir = Utils.createTempDir(src.getAbsolutePath) Seq("a,a", "1,1").toDF().coalesce(1).write.mode("overwrite").text(testDir.getAbsolutePath) val df = spark.read.format("csv").option("inferSchema", true).option("header", true) .load(testDir.getAbsolutePath) checkAnswer(df, Row(1, 1)) checkReadPartitionColumnDuplication("csv", c0, c1, src) // Check JSON format checkWriteDataColumnDuplication("json", c0, c1, src) checkReadUserSpecifiedDataColumnDuplication( Seq((1, 1)).toDF("c0", "c1"), "json", c0, c1, src) // Inferred schema cases testDir = Utils.createTempDir(src.getAbsolutePath) Seq(s"""{"$c0":3, "$c1":5}""").toDF().write.mode("overwrite") .text(testDir.getAbsolutePath) val errorMsg = intercept[AnalysisException] { spark.read.format("json").option("inferSchema", true).load(testDir.getAbsolutePath) }.getMessage assert(errorMsg.contains("Found duplicate column(s) in the data schema:")) checkReadPartitionColumnDuplication("json", c0, c1, src) // Check Parquet format checkWriteDataColumnDuplication("parquet", c0, c1, src) checkReadUserSpecifiedDataColumnDuplication( Seq((1, 1)).toDF("c0", "c1"), "parquet", c0, c1, src) checkReadPartitionColumnDuplication("parquet", c0, c1, src) // Check ORC format checkWriteDataColumnDuplication("orc", c0, c1, src) checkReadUserSpecifiedDataColumnDuplication( Seq((1, 1)).toDF("c0", "c1"), "orc", c0, c1, src) checkReadPartitionColumnDuplication("orc", c0, c1, src) } } } } test("Insert overwrite table command should output correct schema: basic") { withTable("tbl", "tbl2") { withView("view1") { val df = spark.range(10).toDF("id") df.write.format("parquet").saveAsTable("tbl") spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl") spark.sql("CREATE TABLE tbl2(ID long) USING parquet") spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT ID FROM view1") val identifier = TableIdentifier("tbl2") val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString val expectedSchema = StructType(Seq(StructField("ID", LongType, true))) assert(spark.read.parquet(location).schema == expectedSchema) checkAnswer(spark.table("tbl2"), df) } } } test("Insert overwrite table command should output correct schema: complex") { withTable("tbl", "tbl2") { withView("view1") { val df = spark.range(10).map(x => (x, x.toInt, x.toInt)).toDF("col1", "col2", "col3") df.write.format("parquet").saveAsTable("tbl") spark.sql("CREATE VIEW view1 AS SELECT * FROM tbl") spark.sql("CREATE TABLE tbl2(COL1 long, COL2 int, COL3 int) USING parquet PARTITIONED " + "BY (COL2) CLUSTERED BY (COL3) INTO 3 BUCKETS") spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT COL1, COL2, COL3 FROM view1") val identifier = TableIdentifier("tbl2") val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString val expectedSchema = StructType(Seq( StructField("COL1", LongType, true), StructField("COL3", IntegerType, true), StructField("COL2", IntegerType, true))) assert(spark.read.parquet(location).schema == expectedSchema) checkAnswer(spark.table("tbl2"), df) } } } test("Create table as select command should output correct schema: basic") { withTable("tbl", "tbl2") { withView("view1") { val df = spark.range(10).toDF("id") df.write.format("parquet").saveAsTable("tbl") spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl") spark.sql("CREATE TABLE tbl2 USING parquet AS SELECT ID FROM view1") val identifier = TableIdentifier("tbl2") val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString val expectedSchema = StructType(Seq(StructField("ID", LongType, true))) assert(spark.read.parquet(location).schema == expectedSchema) checkAnswer(spark.table("tbl2"), df) } } } ignore("Create table as select command should output correct schema: complex") { withTable("tbl", "tbl2") { withView("view1") { val df = spark.range(10).map(x => (x, x.toInt, x.toInt)).toDF("col1", "col2", "col3") df.write.format("parquet").saveAsTable("tbl") spark.sql("CREATE VIEW view1 AS SELECT * FROM tbl") spark.sql("CREATE TABLE tbl2 USING parquet PARTITIONED BY (COL2) " + "CLUSTERED BY (COL3) INTO 3 BUCKETS AS SELECT COL1, COL2, COL3 FROM view1") val identifier = TableIdentifier("tbl2") val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString val expectedSchema = StructType(Seq( StructField("COL1", LongType, true), StructField("COL3", IntegerType, true), StructField("COL2", IntegerType, true))) assert(spark.read.parquet(location).schema == expectedSchema) checkAnswer(spark.table("tbl2"), df) } } } ignore("use Spark jobs to list files") { withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "1") { withTempDir { dir => val jobDescriptions = new ConcurrentLinkedQueue[String]() val jobListener = new SparkListener { override def onJobStart(jobStart: SparkListenerJobStart): Unit = { jobDescriptions.add(jobStart.properties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION)) } } sparkContext.addSparkListener(jobListener) try { spark.range(0, 3).map(i => (i, i)) .write.partitionBy("_1").mode("overwrite").parquet(dir.getCanonicalPath) // normal file paths checkDatasetUnorderly( spark.read.parquet(dir.getCanonicalPath).as[(Long, Long)], 0L -> 0L, 1L -> 1L, 2L -> 2L) sparkContext.listenerBus.waitUntilEmpty() assert(jobDescriptions.asScala.toList.exists( _.contains("Listing leaf files and directories for 3 paths"))) } finally { sparkContext.removeSparkListener(jobListener) } } } } }
Intel-bigdata/OAP
oap-native-sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
Scala
apache-2.0
44,621
class C { def =+(n: Int): C = new C def =++(n: Int): C = new C } var v = new C v /* resolved: false */ =+= 1 v /* resolved: false */ =++= 1 v /* line: 2 */ =+ 1 v /* line: 3 */ =++ 1
katejim/intellij-scala
testdata/resolve2/function/assignment/StartsWithEqual.scala
Scala
apache-2.0
191
package io.youi.task import scala.concurrent.duration.FiniteDuration case class AnimateIn(get: () => Double, apply: Double => Unit, destination: () => Double, duration: FiniteDuration, easing: Easing) extends DurationTask { private var initialPosition: Double = 0.0 def easing(easing: Easing): AnimateIn = copy(easing = easing) override def time: FiniteDuration = duration override def act(delta: Double, elapsed: Double, progress: Double, reset: Boolean): Unit = { if (reset) { initialPosition = get() } val endPosition = destination() val length = endPosition - initialPosition val eased = easing.calculate(progress) val adjust = length * eased val value = initialPosition + adjust apply(value) } }
outr/youi
ui/shared/src/main/scala/io/youi/task/AnimateIn.scala
Scala
mit
838
package com.goyeau.kubernetes.client.crd import cats.syntax.either._ import io.circe.syntax._ import io.circe.{Decoder, Encoder} import io.k8s.apiextensionsapiserver.pkg.apis.apiextensions.v1.JSONSchemaProps trait JSONSchemaPropsOrArray case class SchemaNotArrayValue(value: JSONSchemaProps) extends JSONSchemaPropsOrArray case class ArrayValue(value: Array[JSONSchemaProps]) extends JSONSchemaPropsOrArray object JSONSchemaPropsOrArray { implicit val encode: Encoder[JSONSchemaPropsOrArray] = { case SchemaNotArrayValue(schema) => schema.asJson case ArrayValue(array) => array.asJson } implicit val decode: Decoder[JSONSchemaPropsOrArray] = cursor => { val decodeSchema = cursor.as[JSONSchemaProps].map(SchemaNotArrayValue.apply) val decodeArray = cursor.as[Array[JSONSchemaProps]].map(ArrayValue.apply) decodeSchema.leftFlatMap(_ => decodeArray) } }
joan38/kubernetes-client
kubernetes-client/src/com/goyeau/kubernetes/client/crd/JSONSchemaPropsOrArray.scala
Scala
apache-2.0
897
/** * Copyright (C) 2015-2016 DANS - Data Archiving and Networked Services ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.knaw.dans.easy.stage.dataset import java.io.File import nl.knaw.dans.easy.stage.lib.Util.loadXML import nl.knaw.dans.easy.stage.{ RejectedDepositException, Settings } import nl.knaw.dans.lib.logging.DebugEnhancedLogging import scala.sys.error import scala.util.Try import scala.xml.{ Elem, NodeSeq } object Util extends DebugEnhancedLogging { class CompositeException(throwables: Seq[Throwable]) extends RuntimeException(throwables.foldLeft("Multiple failures:")((msg, t) => s"$msg\\n${t.getClass}: ${t.getMessage}, ${getFirstDansFrame(t)}")) private def getFirstDansFrame(t: Throwable): String = { if(t.getStackTrace.length > 0) { val st = t.getStackTrace st.find(_.getClassName.contains("nl.knaw.dans")) match { case Some(el) => s"${el.getClassName}.${el.getMethodName} (${el.getFileName}, ${el.getLineNumber})" case None => "<No DANS code in stacktrace ?>" } } else "<Unknown error location>" } /** * Load file metadata XML file and extract the metadata for the specified file. * Use this as input for further processing and extraction of sub-elements like title and mime type. * * @param filePath Path to the file, relative to the bag * @param s Settings * @return File metadata (XML Nodes) for the specified file */ def readFileMetadata(filePath: String)(implicit s: Settings): Try[NodeSeq] = Try { for { file <- loadBagXML("metadata/files.xml") \\\\ "files" \\ "file" if (file \\@ "filepath") == filePath } yield file } def readMimeType(fileMetadata: NodeSeq)(implicit s: Settings): Try[String] = Try { val mimes = fileMetadata \\ "format" if (mimes.size != 1) throw RejectedDepositException(s"format element doesn't exist for the file, or isn't unique.") mimes.head.text } def readTitle(fileMetadata: NodeSeq)(implicit s: Settings): Try[Option[String]] = Try { val titles = fileMetadata \\ "title" if(titles.size == 1) Option(titles.head.text) else None } def readAccessRights(fileMetadata: NodeSeq)(implicit s: Settings): Try[Option[String]] = Try { val rights = fileMetadata \\ "accessRights" if(rights.size == 1) Option(rights.head.text) else None } def readAudiences()(implicit s: Settings): Try[Seq[String]] = Try { trace(()) for { audience <- loadBagXML("metadata/dataset.xml") \\\\ "DDM" \\ "profile" \\ "audience" } yield audience.text } def loadBagXML(fileName: String)(implicit s: Settings): Elem = { val metadataFile = new File(s.bagitDir, fileName) if (!metadataFile.exists) { error(s"Unable to find `$fileName` in bag.") } loadXML(metadataFile) } }
PaulBoon/easy-stage-dataset
src/main/scala/nl/knaw/dans/easy/stage/dataset/Util.scala
Scala
apache-2.0
3,340
package breeze.linalg import breeze.generic.UFunc import breeze.macros.cforRange /** * Computes the determinant of the given real matrix. */ object det extends UFunc { implicit def canDetUsingLU[T](implicit luImpl: LU.primitive.Impl[T, (DenseMatrix[Double], Array[Int])]): Impl[T, Double] = { new Impl[T, Double] { def apply(X: T): Double = { // For triangular N-by-N matrices X, the determinant of X equals the product // of the diagonal elements X(i,i) where 0 <= i < N. // Since det(AB) = det(A) * det(B), the LU factorization is well-suited for // the computation of the determinant of general N-by-N matrices. val (m: DenseMatrix[Double], ipiv: Array[Int]) = LU.primitive(X) // Count the number of exchanged rows. ipiv contains an array of swapped indices, // but it also contains indices that weren't swapped. To count the swapped // indices, we have to compare them against their position within the array. A // final complication is that the array indices are 1-based, due to the LU call // into LAPACK. val numExchangedRows = ipiv.map(_ - 1).zipWithIndex.count { piv => piv._1 != piv._2 } var acc = if (numExchangedRows % 2 == 1) -1.0 else 1.0 cforRange(0 until m.rows) { i => acc *= m(i, i) } acc } } } }
scalanlp/breeze
math/src/main/scala/breeze/linalg/functions/det.scala
Scala
apache-2.0
1,403
package model case class Status(status: String, time: String, hostname: String)
jmarin/microservice-template
src/main/scala/model/Status.scala
Scala
apache-2.0
80
/** * For copyright information see the LICENSE document. */ package entice.server.world.systems import entice.server._ import entice.server.utils._ import entice.server.world._ import entice.server.physics._, Geometry._ import entice.protocol._, MoveState._ import akka.actor._ import shapeless._ class MovementSystem( stopWatch : StopWatch) extends System[Position :: Movement :: HNil] with Actor with Subscriber with Worlds with ActorLogging { val subscriptions = classOf[Tick] :: Nil override def preStart { register } def receive = { case MessageEvent(_, Tick()) => worlds.getAll foreach update } override def update(world: World) { val timeDiff = stopWatch.current entities(world) .filter { e => val curPos = e[Position].pos val curGoal = e[Movement].goal (e[Movement].moveState != NotMoving && curGoal - curPos != Coord2D(0, 0)) } .foreach { e => val curPos = e[Position].pos val curGoal = e[Movement].goal val curDir = curGoal - curPos // TODO: add movementspeed here, add trapezoid // the 0.288 is actually distance per milliseconds, so // it would be 288 terrain tiles per second val nextPos = curPos + ((curDir.unit * 0.288F) * timeDiff) // check out the next position and set it // world.pmap.nextValidPosition(curPos, nextPos) match { // case Some(pos) if (pos != curPos) => // // set the new position // e.set[Position](e[Position].copy(pos = pos)) // // get a new goal // world.pmap.farthestPosition(pos, curDir) match { // case Some(goal) if (goal != pos) => // e.set[Movement](e[Movement].copy(goal = goal)) // case _ => // e.set[Movement](e[Movement].copy(goal = pos, state = NotMoving.toString)) // } // case _ => // e.set[Movement](e[Movement].copy(goal = curPos, state = NotMoving.toString)) // } world.collisionMesh.farthestPosition(curPos, curDir) match { // we are already there case Some(goal) if (curPos == goal) => e.set[Movement](e[Movement].copy(goal = curPos, state = NotMoving.toString)) // we can walk there, (or almost there) then stop case Some(goal) if ((nextPos == goal) || ((goal - curPos).len < (nextPos - curPos).len)) => e.set[Position](e[Position].copy(pos = goal)) e.set[Movement](e[Movement].copy(goal = goal, state = NotMoving.toString)) // we can walk on further case Some(goal) => e.set[Position](e[Position].copy(pos = nextPos)) e.set[Movement](e[Movement].copy(goal = goal)) // our position and or direction is invalid case _ => log.info("Entity at invalid position. [TimeDelta: " + timeDiff + " | Position: " + curPos + " | Direction: " + curDir + " | Next Position: " + nextPos + "]") e.set[Movement](e[Movement].copy(goal = curPos, state = NotMoving.toString)) } } // NOTE: yes, it IS curcial to reset the timer at this point :P stopWatch.reset } }
entice/old-server
src/main/scala/entice/server/world/systems/MovementSystem.scala
Scala
bsd-3-clause
3,751
/* Copyright 2014 - 2015 Janek Bogucki Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.scalacraft.domain.v2.internal import scala.reflect.runtime.universe._ /** * `Reflections` */ object Reflections { /** * Extract the declared constructors for the given type. * @tparam T The type to reflect on * @return A list of declared constructors */ def declaredConstructors[T: TypeTag]: Iterable[MethodSymbol] = typeOf[T]. decls. filter(_.isMethod). map(_.asMethod). filter(_.isConstructor) }
janekdb/scalacraft-domain
src/main/scala/com/scalacraft/domain/v2/internal/Reflections.scala
Scala
apache-2.0
1,094
/** * Copyright 2012-2013 StackMob * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.stackmob.scaliak.tests import org.specs2._ import mock._ import scalaz._ import scalaz.NonEmptyList._ import Scalaz._ import scalaz.effect.IO import com.basho.riak.client.query.functions.NamedErlangFunction import com.basho.riak.client.cap.{UnresolvedConflictException, VClock, Quorum} import org.mockito.{Matchers => MM} import com.stackmob.scaliak._ import com.basho.riak.client.raw._ import com.basho.riak.client.{RiakLink, IRiakObject} import java.util.Date import com.basho.riak.client.query.indexes.{IntIndex, BinIndex} import query.indexes.{IntValueQuery, BinValueQuery, IndexQuery} // TODO: these specs really cover both ReadObject and ScaliakBucket, they should be split up class ScaliakBucketSpecs extends Specification with Mockito with util.MockRiakUtils { def is = "Scaliak Bucket".title ^ """ This class provides the primary functionality for fetching data from and storing data in Riak. """ ^ p^ "Fetching Data" ^ "Fetching with No Conversion" ^ "When the key being fetched is missing returns None" ! skipped ^ "When the key being fetched exists" ^ "When there are no conflicts" ^ "returns a ReadObject whose key is the same as the one fetched" ! simpleFetch.someWKey ^ "can get the stored bytes by calling getBytes on the returned object" ! simpleFetch.testGetBytes ^ "calling stringValue on the returned object returns the string value" ! simpleFetch.testStringValue ^ "the returned object has the same bucket name as the one used to fetch it"! simpleFetch.testBucketName ^ "the returned object has a vclock" ! simpleFetch.testVClock ^ "calling vclockString returns the vclock as a string" ! simpleFetch.testVClockStr ^ "the returned object has a vTag" ! simpleFetch.testVTag ^ "the returned object has a lastModified timestamp" ! simpleFetch.testLastModified ^ "the returned object has a content type" ! simpleFetch.tContentType ^ "if the fetched object has an empty list of links" ^ "links returns None" ! simpleFetch.testEmptyLinksIsNone ^ "hasLinks returns false" ! simpleFetch.testEmptyLinkHasLinksIsFalse ^ "numLinks returns 0" ! simpleFetch.testEmptyLinksReturnsZeroNumLinks ^ "containsLink returns false for any link" ! simpleFetch.testEmptyLinksContainsLinkReturnsFalse ^p^ "if the fetched object has a non-empty list of links" ^ "links returns Some -- a non-empty list of links converted to scaliak" ! nonEmptyLinkFetch.testConvertedLinks ^ "hasLinks returns some" ! nonEmptyLinkFetch.testHasLinks ^ "numLinks returns the number of links in the fetched object" ! nonEmptyLinkFetch.testNumLinks ^ "containsLink returns true for a link that exists" ! nonEmptyLinkFetch.testContainsLinkTrueIfContained ^ "containsLink returns false for a link that does not exist" ! nonEmptyLinkFetch.testContainsLinkFalseIfNotContained ^p^ "if the fetched object does not have metadata" ^ "metadata returns an empty Map[String, String]" ! simpleFetch.testEmptyMetadataMap ^ "hasMetadata returns false" ! simpleFetch.testEmptyMetadataHasMetadataReturnsFalse ^ "containsMetadata returns false for any key" ! simpleFetch.testEmptyMetadataContainsMetadataReturnsFalse ^ "getMetadata returns None for any key" ! simpleFetch.testEmptyMetadataGetReturnsNone ^ "if the fetched object has metadata" ^ "metadata returns a Map[String, String] w/ data from fetched obj" ! nonEmptyMetadataFetch.testHasCorrectMetadata ^ "hasMetadata returns true" ! nonEmptyMetadataFetch.testHasMetadataIsTrue ^ "containsMetadata returns true for a key in the metadata map" ! nonEmptyMetadataFetch.testContainsMetadataForExistingKey ^ "containsMetadata returns false for a key in the metadata map" ! nonEmptyMetadataFetch.testContainsMetadataForMissingKey ^ "getMetadata returns Some containing the string if key exists" ! nonEmptyMetadataFetch.testGetMetadataForExistingKey ^ "getMetadata returns None if key does not exist" ! nonEmptyMetadataFetch.testGetMetadataForMissingKey ^p^ "if the fetched object does not have bin indexes" ^ "binIndexes is empty" ! simpleFetch.testEmptyBinIndexes ^ "binIndex returns None for any index name" ! simpleFetch.testEmptyBinIndexesGetIndexReturnsNone ^p^ "if the fetched object has bin indexes" ^ "binIndexes returns a Map[BinIndex,Set[String]] containing all bin idxs"! nonEmptyBinIndexesFetch.testHasAll ^ "binIndex returns Some(Set[String]) for an index string that exists" ! nonEmptyBinIndexesFetch.testGetBinIndexExists ^ "binIndex returns None for an index string that d.n.e" ! nonEmptyBinIndexesFetch.testGetBinIndexMissing ^p^ "if the fetched object does not have int indexes" ^ "intIndexes is empty" ! simpleFetch.testEmptyIntIndexes ^ "intIndex returns None for any index name" ! simpleFetch.testEmptyIntIndexesGetReturnsNone ^p^ "if the fetched object has int indexes" ^ "intIndexes returns a Map[IntIndex,Set[Int]] containing all int idxs" ! nonEmptyIntIndexesFetch.testHasAll ^ "intIndex returns Some(Set[Int]) for an index string that exists" ! nonEmptyIntIndexesFetch.testGetIntIndexExists ^ "intIndex returns None for an index that d.n.e" ! nonEmptyIntIndexesFetch.testGetIntIndexMissing ^ p^ "When there are conflicts" ^ "the default conflict resolver throws an UnresolvedConflictException" ! conflictedFetch.testDefaultConflictRes ^ p^p^p^ "Fetching with Conversion" ^ "When the key being fetched is missing returns None" ! skipped ^ "when the key being fetched exists" ^ "when there are no conflicts" ^ "when the conversion succeeds" ^ "returns the object of type T when converter is supplied explicitly" ! simpleFetch.testConversionExplicit ^ "returns the object of type T when converter is supplied implicitly" ! simpleFetch.testConversionImplicit ^ p^p^p^p^ "Setting the r value for the request" ^ "if not set the generated meta has a null r value" ! riakArguments.testDefaultR ^ "if set the generated meta has the given r value" ! riakArguments.testPassedR ^p^ "Setting the pr value for the request" ^ "if not set the generated meta has a null pr value" ! riakArguments.testDefaultPR ^ "if set the generated meta has the given pr value" ! riakArguments.testPassedPR ^p^ "Setting the notFoundOk value for the request" ^ "if not set the generated meta has a null notFoundOk value" ! riakArguments.testDefaultNotFoundOk ^ "if set the generated meta has the given notFoundOk value" ! riakArguments.testPassedNotFoundOk ^p^ "Setting the basicQuorum value for the request" ^ "if not set the generated meta has a null basicQuorum value" ! riakArguments.testDefaultBasicQuorum ^ "if set the generated meta has the given basicQuorum value" ! riakArguments.testPassedBasicQuorum ^p^ "Setting the returnDeletedVClock value for the request" ^ "if not set the generated meta has a null returnDeletedVClock value" ! riakArguments.testDefaultReturnedVClock ^ "if set the generated meta has the given returnDeletedVClock value" ! riakArguments.testPassedReturnedVClock ^p^ "Setting the modifiedSince value for the request" ^ "if not set the generated meta has a null modifiedSince value" ! riakArguments.testDefaultModifiedSince ^ "if set the generated meta has the given modifiedSince value" ! riakArguments.testPassedModifiedSince ^p^ "Setting the ifModified value for the request" ^ "if not set the generated meta has a null ifModified value" ! riakArguments.testDefaultIfModified ^ "if set the generated meta has the given ifModified value" ! riakArguments.testPassedIfModified ^ "fetchDangerous is fetch but without default exception handling" ! simpleFetch.testDangerous ^ "fetchUnsafe calls unsafePerformIO immediately" ! simpleFetch.testUnsafe ^ endp^ "Writing Data" ^ "With No Conversion" ^ "When the Key Being Fetched Does Not Exist" ^ """Given the default "Clobber Mutation" """ ^ "Writes the ReadObject as passed in (converted to an IRiakObject)" ! writeMissing.performsWrite ^ "returns Success(None) when return body is false (default)" ! writeMissing.noReturnBody ^ "returns successfully with the stored object as a ReadObject instance" ! writeMissingReturnBody.noConversion ^ p^ "Given a mutator other than the default" ^ "Writes the ReadObject as returned from the mutator" ! writeMissing.customMutator ^ p^p^ "When the Key Being Fetched Exists" ^ """Given the default "Clobber Mutator" """ ^ "Writes the ReadObject as passed in (converted to an IRiakObject)" ! writeExisting.performsWrite ^ "returns Success(None) when return body is false (default)" ! writeExisting.noReturnBody ^ "returns successfully with the stored object as a ReadObject instance" ! writeExistingReturnBody.noConversion ^ p^ "Given a mutator other than the default" ^ "Writes the ReadObject as returned from the mutator" ! writeExisting.customMutator ^ p^p^ "Can update the links on an object" ! writeExisting.testUpdateLinks ^ "Can update the metadata on an object" ! writeExisting.testUpdateMetadata ^ "Can update the binary indexes on an object" ! writeExisting.testUpdateBinIndexes ^ "Can update the integer indexes on an object" ! writeExisting.testUpdateIntIndexes ^ p^ "With Conversion" ^ "When the Key Being Fetched Does Not Exist" ^ """Given the default "Clobber Mutation" """ ^ "Writes object converted to a WriteObject then a ReadObject" ! writeMissing.domainObject ^p^ "Given a mutator other than the default" ^ "Writes the object as returned from the mutator, converting it afterwards"! writeMissing.domainObjectCustomMutator ^p^p^ "When the Key Being Fetched Exists" ^ """Given the default "Clobber Mutation" """ ^ "Writes object converted to a WriteObject then a ReadObject" ! writeExisting.domainObject ^p^ "Given a mutator other than the default" ^ "Writes the object as returned from the mutator, converting it afterwards"! writeExisting.domainObjectCustomMutator ^ p^p^p^ "Without Reading First" ^ "Does not call read before writing the data" ! writeOnly.writesButNoRead ^ "Preserves the VClock from the generated WriteObject if there is one"! writeOnly.usesPartialScaliakObjectVClock ^ "Setting the w value for the request" ^ "if not set the generated meta has a null w value" ! riakArguments.testDefaultWPut ^ "if set the generated meta has the given w value" ! riakArguments.testPassedWPut ^p^ "Setting the pw value for the request" ^ "if not set the generated meta has a null pw value" ! riakArguments.testDefaultPWPut ^ "if set the generated meta has the given pw value" ! riakArguments.testPassedPWPut ^p^ "Setting the dw value for the request" ^ "if not set the generated meta has a null dw value" ! riakArguments.testDefaultDWPut ^ "if set the generated meta has the given dw value" ! riakArguments.testPassedDWPut ^p^ "Setting the return body value" ^ "if not set the generated meta has a false return body" ! riakArguments.testDefaultReturnBodyPut ^ "if set the generated meta has the given return body" ! riakArguments.testPassedReturnBodyPut ^p^ p^ "Read Properties" ^ "Setting the r value for the request" ^ "if not set the generated meta has a null r value" ! riakArguments.testWriteDefaultR ^ "if set the generated meta has the given r value" ! riakArguments.testPassedRWrite ^p^ "Setting the pr value for the request" ^ "if not set the generated meta has a null pr value" ! riakArguments.testWriteDefaultPR ^ "if set the generated meta has the given pr value" ! riakArguments.testPassedPRWrite ^p^ "Setting the notFoundOk value for the request" ^ "if not set the generated meta has a null notFoundOk value" ! riakArguments.testWriteDefaultNotFoundOk ^ "if set the generated meta has the given notFoundOk value" ! riakArguments.testPassedNotFoundOkWrite ^p^ "Setting the basicQuorum value for the request" ^ "if not set the generated meta has a null basicQuorum value" ! riakArguments.testWriteDefaultBasicQuorum ^ "if set the generated meta has the given basicQuorum value" ! riakArguments.testPassedBasicQuorumWrite ^p^ "Setting the returnDeletedVClock value for the request" ^ "if not set the generated meta has a null returnDeletedVClock value" ! riakArguments.testWriteDefaultReturnedVClock ^ "if set the generated meta has the given returnDeletedVClock value" ! riakArguments.testPassedReturnVClockWrite ^p^p^ "Write Properties" ^ "Setting the w value for the request" ^ "if not set the generated meta has a null w value" ! riakArguments.testDefaultW ^ "if set the generated meta has the given w value" ! riakArguments.testPassedW ^p^ "Setting the pw value for the request" ^ "if not set the generated meta has a null pw value" ! riakArguments.testDefaultPW ^ "if set the generated meta has the given pw value" ! riakArguments.testPassedPW ^p^ "Setting the dw value for the request" ^ "if not set the generated meta has a null dw value" ! riakArguments.testDefaultDW ^ "if set the generated meta has the given dw value" ! riakArguments.testPassedDW ^p^ "Setting the return body value" ^ "if not set the generated meta has a false return body" ! riakArguments.testDefaultReturnBody ^ "if set the generated meta has the given return body" ! riakArguments.testPassedReturnBody ^p^ "Setting if none match" ^ "if not set the generated meta has a null array of etags" ! riakArguments.testDefaultIfNoneMatch ^ "if set the generated meta has a array of etags containing only the vtag" ! riakArguments.testPassedIfNoneMatch ^p^ "Setting if not modified" ^ "if not set the generated meta has a null last modified timestamp" ! riakArguments.testDefaultIfNotModified ^ "if set the genereated meta has the last modified timestamp set" ! riakArguments.testPassedIfNotModified ^ endp^ "Deleting Data" ^ "By Key" ^ "Uses the raw client passing in the bucket name, key and delete meta" ! deleteByKey.test ^p^ "By ReadObject" ^ "Deletes the object by its key" ! deleteScaliakObject.test ^p^ "By Domain Object" ^ "Deletes the object by its key" ! deleteDomainObject.test ^p^ "If fetch before delete is true (defaults to false)" ^ "Adds the returned vClock to the delete meta" ! deleteWithFetchBefore.testAddsVclock ^ endp^ "Fetching by Index" ^ "by value" ^ "if the value is a string a BinValueQuery is performed" ! fetchIndexByValue.testBinValueGeneratesBinQuery ^ "if the value is an integer a IntValueQuery is performed" ! fetchIndexByValue.testIntValueGeneratesIntQuery ^ "returns the results of the query, a List[String] as returned by the client" ! fetchIndexByValue.testReturnsKeys ^ end // TODO: updating metadata class DummyDomainObject(val someField: String) val dummyWriteVal = "dummy" val dummyDomainConverter = ScaliakConverter.newConverter[DummyDomainObject]( o => (new DummyDomainObject(o.key)).successNel[Throwable], o => WriteObject(o.someField, dummyWriteVal.getBytes) ) val mutationValueAddition = "abc" val dummyDomainMutation = ScaliakMutation.newMutation[DummyDomainObject] { (mbOld, newObj) => { new DummyDomainObject(newObj.someField + mutationValueAddition) } } object fetchIndexByValue extends context { val rawClient = mock[RawClientWithStreaming] // not used in these tests val bucket = createBucket // not used in these tests class IndexQueryExtractor extends util.MockitoArgumentExtractor[IndexQuery] val testIdx = "idx1" def testBinValueGeneratesBinQuery = { val client = mock[RawClientWithStreaming] val b = createBucketWithClient(client) val testBinVal = "someval" val extractor = new IndexQueryExtractor val testResults: java.util.List[String] = new java.util.LinkedList[String]() testResults.add("key1") testResults.add("key12") client.fetchIndex(MM.argThat(extractor)) returns testResults b.fetchIndexByValue(index = testIdx, value = testBinVal).unsafePerformIO().toOption // execute extractor.argument must beSome.like { case obj: BinValueQuery => (obj.getBucket must beEqualTo(bucket.name)) and (obj.getIndex must beEqualTo(testIdx + "_bin")) and (obj.getValue must beEqualTo(testBinVal)) } } def testIntValueGeneratesIntQuery = { val client = mock[RawClientWithStreaming] val b = createBucketWithClient(client) val testIntVal = 1 val extractor = new IndexQueryExtractor val testResults: java.util.List[String] = new java.util.LinkedList[String]() client.fetchIndex(MM.argThat(extractor)) returns testResults b.fetchIndexByValue(index = testIdx, value = testIntVal).unsafePerformIO().toOption // execute extractor.argument must beSome.like { case obj: IntValueQuery => (obj.getBucket must beEqualTo(bucket.name)) and (obj.getIndex must beEqualTo(testIdx + "_int")) and (obj.getValue must beEqualTo(testIntVal)) } } def testReturnsKeys = { import scala.collection.JavaConverters._ val client = mock[RawClientWithStreaming] val b = createBucketWithClient(client) val indexVal = "string" val testResults: java.util.List[String] = new java.util.LinkedList[String]() testResults.add("1") testResults.add("2") client.fetchIndex(any) returns testResults b.fetchIndexByValue(index = testIdx, value = indexVal).map(_.toOption).unsafePerformIO() must beSome.like { case res => res must beEqualTo(testResults.asScala.toList) } } } object deleteWithFetchBefore extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket class DeleteMetaArgExtractor extends util.MockitoArgumentExtractor[DeleteMeta] lazy val result = bucket.deleteByKey(testKey, fetchBefore = true).unsafePerformIO() val extractor = new DeleteMetaArgExtractor (rawClient.delete(MM.eq(testBucket), MM.eq(testKey), MM.argThat(extractor)) throws new NullPointerException("can't stub void methods, tests don't depend on the result anyway")) val mockHeadVClock = mock[VClock] mockHeadVClock.asString returns "fetched" val mockHeadResponse = mockRiakResponse(Array()) mockHeadResponse.getVclock returns mockHeadVClock rawClient.head(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockHeadResponse def testAddsVclock = { result extractor.argument must beSome.like { case meta => meta.getVclock.asString must beEqualTo("fetched") } } } object deleteDomainObject extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val obj = new DummyDomainObject(testKey) implicit val converter = dummyDomainConverter lazy val result = bucket.delete(obj).unsafePerformIO() def test = { result there was one(rawClient).delete(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[DeleteMeta])) } } object deleteScaliakObject extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val obj = ReadObject(testKey, testBucket, testContentType, mock[VClock], "".getBytes) lazy val result = bucket.delete(obj).unsafePerformIO() def test = { result there was one(rawClient).delete(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[DeleteMeta])) } } object deleteByKey extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket lazy val result = bucket.deleteByKey(testKey).unsafePerformIO() def test = { result there was one(rawClient).delete(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[DeleteMeta])) } } object writeExistingReturnBody extends writeBase { val mock2VClockStr = "vclock2" val mockStoreObj = mockRiakObj(testBucket, testKey, testStoreObject.getBytes, testContentType, mock2VClockStr) val mockStoreResponse = mockRiakResponse(Array(mockStoreObj)) override lazy val result = bucket.store(testStoreObject, returnBody = true).unsafePerformIO() val mock1VClockStr = "vclock1" val mockFetchObj = mockRiakObj(testBucket, testKey, "abc".getBytes, testContentType, mock1VClockStr) val mockFetchVClockStr = mock1VClockStr rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockRiakResponse(Array(mockFetchObj)) val extractor = new IRiakObjExtractor rawClient.store(MM.argThat(extractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse def noConversion = { val r = result.toOption | None r aka "the object returned from store or None" must beSome[ReadObject].which { obj => obj.stringValue == testStoreObject.stringValue && obj.vClockString == mock2VClockStr } } } object writeMissingReturnBody extends writeBase { val mock2VClockStr = "vclock2" val mockStoreObj = mockRiakObj(testBucket, testKey, testStoreObject.getBytes, testContentType, mock2VClockStr) val mockStoreResponse = mockRiakResponse(Array(mockStoreObj)) val mockFetchVClockStr = "" rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockRiakResponse(Array()) override lazy val result = bucket.store(testStoreObject, returnBody = true).unsafePerformIO() val extractor = new IRiakObjExtractor rawClient.store(MM.argThat(extractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse def noConversion = { val r = result.toOption | None r aka "the object returned from store or None" must beSome[ReadObject].which { obj => obj.stringValue == testStoreObject.stringValue && obj.vClockString == mock2VClockStr } } } object writeExisting extends writeBase { val mock1Bytes = Array[Byte](1, 2) val mock1VClockStr = "a vclock" val mockRiakObj1 = mockRiakObj(testBucket, testKey, mock1Bytes, testContentType, mock1VClockStr) val mockResponse = mockRiakResponse(Array(mockRiakObj1)) mockResponse.getVclock returns mockRiakObj1.getVClock val mockFetchVClockStr = mockRiakObj1.getVClock.asString rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val extractor = new IRiakObjExtractor val mockStoreResponse = mockRiakResponse(Array()) rawClient.store(MM.argThat(extractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse // TODO: these should not return null def testUpdateLinks = { result // execute call extractor.argument must beSome.like { case obj => obj.getLinks.toArray must haveSize(testStoreObject.numLinks) } } def testUpdateMetadata = { import scala.collection.JavaConverters._ result // execute call extractor.argument must beSome.like { case obj => obj.getMeta.asScala.toMap must beEqualTo(testStoreObject.metadata) } } def testUpdateBinIndexes = { import scala.collection.JavaConverters._ result // execute call extractor.argument must beSome.like { case obj => obj.allBinIndexes.asScala.mapValues(_.asScala.toSet).toMap must beEqualTo(testStoreObject.binIndexes) } } def testUpdateIntIndexes = { import scala.collection.JavaConverters._ result // execute call extractor.argument must beSome.like { case obj => obj.allIntIndexesV2.asScala.mapValues(_.asScala.toSet).toMap must beEqualTo(testStoreObject.intIndexes) } } def customMutator = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse var fakeValue: String = "fail" implicit val mutator = ScaliakMutation.newMutation[ReadObject] { (o: Option[ReadObject], n: ReadObject) => { fakeValue = "custom" n.copy(bytes = fakeValue.getBytes) } } newBucket.store(testStoreObject).unsafePerformIO() newExtractor.argument must beSome.like { case obj => obj.getValueAsString must_== fakeValue } } def domainObject = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse implicit val converter = dummyDomainConverter newBucket.store(new DummyDomainObject(testKey)).unsafePerformIO() newExtractor.argument must beSome.like { case o => o.getValueAsString must beEqualTo(dummyWriteVal) } } def domainObjectCustomMutator = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse implicit val converter = dummyDomainConverter implicit val mutation = dummyDomainMutation newBucket.store(new DummyDomainObject(testKey)).unsafePerformIO() newExtractor.argument must beSome.like { case o => o.getKey must beEqualTo(testKey + mutationValueAddition) } } } object writeMissing extends writeBase { val mockResponse = mockRiakResponse(Array()) mockResponse.getVclock returns null val mockFetchVClockStr = "" rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val extractor = new IRiakObjExtractor val mockStoreResponse = mockRiakResponse(Array()) rawClient.store(MM.argThat(extractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse def customMutator = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse var fakeValue: String = "fail" implicit val mutator = ScaliakMutation.newMutation[ReadObject] { (o: Option[ReadObject], n: ReadObject) => { fakeValue = "custom" n.copy(bytes = fakeValue.getBytes) } } newBucket.store(testStoreObject).unsafePerformIO() newExtractor.argument must beSome.like { case obj => obj.getValueAsString must_== fakeValue } } def domainObject = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse implicit val converter = dummyDomainConverter newBucket.store(new DummyDomainObject(testKey)).unsafePerformIO() newExtractor.argument must beSome.like { case o => o.getValueAsString must beEqualTo(dummyWriteVal) } } def domainObjectCustomMutator = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResponse val newExtractor = new IRiakObjExtractor newRawClient.store(MM.argThat(newExtractor), MM.isA(classOf[StoreMeta])) returns mockStoreResponse implicit val converter = dummyDomainConverter implicit val mutation = dummyDomainMutation newBucket.store(new DummyDomainObject(testKey)).unsafePerformIO() newExtractor.argument must beSome.like { case o => o.getKey must beEqualTo(testKey + mutationValueAddition) } } } object writeOnly extends writeBase { val mockFetchVClockStr = "test" val extractor = new IRiakObjExtractor def writesButNoRead = { bucket.put(testStoreObject).unsafePerformIO() // execute write only operation there was no(rawClient).fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) andThen one(rawClient).store(MM.isA(classOf[IRiakObject]), MM.isA(classOf[StoreMeta])) } def usesPartialScaliakObjectVClock = { val newRawClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newRawClient) newRawClient.store(MM.argThat(extractor), MM.isA(classOf[StoreMeta])) returns null val mockVClock = mock[VClock] mockVClock.asString returns "test" implicit val testConverter = ScaliakConverter.newConverter[ReadObject]( scObj => (new Exception("who cares")).failNel, obj => WriteObject(obj.key, obj.bytes, vClock = mockVClock.some) ) newBucket.put(testStoreObject).unsafePerformIO() extractor.argument must beSome.like { case o => o.getVClock.asString must_== "test" } } } trait writeBase extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket def mockFetchVClockStr: String def extractor: IRiakObjExtractor class CustomMutation extends ScaliakMutation[ReadObject] { val fakeValue = "custom" def apply(old: Option[ReadObject], newObj: ReadObject) = { newObj.copy(bytes = fakeValue.getBytes) } } lazy val result = bucket.store(testStoreObject).unsafePerformIO() val mockVClock = mock[VClock] mockVClock.getBytes returns Array[Byte]() mockVClock.asString returns "" val testStoreObject = new ReadObject( testKey, testBucket, testContentType, mockVClock, "".getBytes, links = nels(ScaliakLink("test", "test", "test")).some, metadata = Map("m1" -> "v1", "m2" -> "v2"), binIndexes = Map(BinIndex.named("idx1") -> Set("a", "b"), BinIndex.named("idx2") -> Set("d", "e")), intIndexes = Map(IntIndex.named("idx1") -> Set(1), IntIndex.named("idx2") -> Set(3,4)) ) class IRiakObjExtractor extends util.MockitoArgumentExtractor[IRiakObject] def performsWrite = { result extractor.argument must beSome.which { obj => (obj.getKey == testKey && obj.getContentType == testContentType && obj.getBucket == testBucket && obj.getValueAsString == testStoreObject.stringValue && ~(Option(obj.getVClockAsString)) == mockFetchVClockStr) // in this last step we are checking if the vclock is null by using string Zero value when it is } } def noReturnBody = { result.toOption must beSome.like { case o => o must beNone } } } object riakArguments extends context { // these aren't really used val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val testVTag = "test" val lastModified = new java.util.Date(System.currentTimeMillis) val mockRiakObj1 = mockRiakObj(testBucket, testKey, "".getBytes("utf-8"), "text/plain", "vclock", vTag = testVTag, lastModified = lastModified) val testStoreObject = new ReadObject( testKey, testBucket, testContentType, null, "".getBytes, links = nels(ScaliakLink("test", "test", "test")).some, metadata = Map("m1" -> "v1", "m2" -> "v2"), vTag = testVTag, lastModified = lastModified ) class FetchMetaExtractor extends util.MockitoArgumentExtractor[FetchMeta] class StoreMetaExtractor extends util.MockitoArgumentExtractor[StoreMeta] def initExtractor(rawClient: RawClient) = { val extractor = new FetchMetaExtractor rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.argThat(extractor)) returns mockRiakResponse(Array()) extractor } def initWriteExtractor(rawClient: RawClient) = { initExtractor(rawClient) val extractor = new StoreMetaExtractor rawClient.store(MM.isA(classOf[IRiakObject]), MM.argThat(extractor)) returns null extractor } def testArg(f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]]): FetchMetaExtractor = { val rawClient = mock[RawClientWithStreaming] val ex = initExtractor(rawClient) val bucket = createBucketWithClient(rawClient) f(bucket).unsafePerformIO() ex } def testWriteArg(f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]]): StoreMetaExtractor = { val rawClient = mock[RawClientWithStreaming] val ex = initWriteExtractor(rawClient) val bucket = createBucketWithClient(rawClient) f(bucket).unsafePerformIO() ex } def testDefaultFetchMetaBase[T](metaProp: FetchMeta => T, f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]]) = { testArg(f).argument must beSome.like { case meta => metaProp(meta) must beNull } } def testDefaultStoreMetaBase[T](metaProp: StoreMeta => T, f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]]) = { testWriteArg(f).argument must beSome.like { case meta => metaProp(meta) must beNull } } def testDefaultFetchMeta[T](metaProp: FetchMeta => T) = testDefaultFetchMetaBase(metaProp, _.fetch(testKey)) def testWriteDefaultFetchMeta[T](metaProp: FetchMeta => T) = testDefaultFetchMetaBase(metaProp, _.store(testStoreObject)) def testDefaultStoreMeta[T](metaProp: StoreMeta => T) = testDefaultStoreMetaBase(metaProp, _.store(testStoreObject)) def testPutDefaultStoreMeta[T](metaProp: StoreMeta => T) = testDefaultStoreMetaBase(metaProp, _.put(testStoreObject)) def testPassedFetchMeta[T](f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]], metaProp: FetchMeta => T, expected: T) = { testArg(f).argument must beSome.like { case meta => metaProp(meta) must beEqualTo(expected) } } def testPassedStoreMeta[T](f: ScaliakBucket => IO[ValidationNel[Throwable, Option[ReadObject]]], metaProp: StoreMeta => T, expected: T) = { testWriteArg(f).argument must beSome.like { case meta => metaProp(meta) must beEqualTo(expected) } } def testDefaultR = testDefaultFetchMeta(_.getR) def testDefaultPR = testDefaultFetchMeta(_.getPr) def testDefaultNotFoundOk = testDefaultFetchMeta(_.getNotFoundOK) def testDefaultBasicQuorum = testDefaultFetchMeta(_.getBasicQuorum) def testDefaultReturnedVClock = testDefaultFetchMeta(_.getReturnDeletedVClock) def testDefaultModifiedSince = testDefaultFetchMeta(_.getIfModifiedSince) def testDefaultIfModified = testDefaultFetchMeta(_.getIfModifiedVClock) def testWriteDefaultR = testWriteDefaultFetchMeta(_.getR) def testWriteDefaultPR = testWriteDefaultFetchMeta(_.getPr) def testWriteDefaultNotFoundOk = testWriteDefaultFetchMeta(_.getNotFoundOK) def testWriteDefaultBasicQuorum = testWriteDefaultFetchMeta(_.getBasicQuorum) def testWriteDefaultReturnedVClock = testWriteDefaultFetchMeta(_.getReturnDeletedVClock) def testDefaultW = testDefaultStoreMeta(_.getW) def testDefaultDW = testDefaultStoreMeta(_.getDw) def testDefaultPW = testDefaultStoreMeta(_.getPw) def testDefaultIfNoneMatch = testDefaultStoreMeta(_.getEtags) def testDefaultIfNotModified = testDefaultStoreMeta(_.getLastModified) def testDefaultReturnBody = testWriteArg(_.store(testStoreObject)).argument must beSome.like { case meta => meta.getReturnBody must beEqualTo(false) } def testDefaultWPut = testPutDefaultStoreMeta(_.getW) def testDefaultDWPut = testPutDefaultStoreMeta(_.getDw) def testDefaultPWPut = testPutDefaultStoreMeta(_.getPw) def testDefaultReturnBodyPut = testWriteArg(_.put(testStoreObject)).argument must beSome.like { case meta => meta.getReturnBody must beEqualTo(false) } def testPassedR = testPassedFetchMeta(_.fetch(testKey, r = 3), _.getR, new Quorum(3)) def testPassedPR = testPassedFetchMeta(_.fetch(testKey, pr = 2), _.getPr, new Quorum(2)) def testPassedNotFoundOk = testPassedFetchMeta(_.fetch(testKey, notFoundOk = true), _.getNotFoundOK, true) def testPassedBasicQuorum = testPassedFetchMeta(_.fetch(testKey, basicQuorum = false), _.getBasicQuorum, false) def testPassedReturnedVClock = testPassedFetchMeta(_.fetch(testKey, returnDeletedVClock = true), _.getReturnDeletedVClock, true) def testPassedModifiedSince = testPassedFetchMeta(_.fetch(testKey, ifModifiedSince = testDate), _.getIfModifiedSince, testDate) def testPassedIfModified = testPassedFetchMeta(_.fetch(testKey, ifModified = testVClock), _.getIfModifiedVClock, testVClock) def testPassedRWrite = testPassedFetchMeta(_.store(testStoreObject, r = 2), _.getR, new Quorum(2)) def testPassedPRWrite = testPassedFetchMeta(_.store(testStoreObject, pr = 1), _.getPr, new Quorum(1)) def testPassedNotFoundOkWrite = testPassedFetchMeta(_.store(testStoreObject, notFoundOk = true), _.getNotFoundOK, true) def testPassedBasicQuorumWrite = testPassedFetchMeta(_.store(testStoreObject, basicQuorum = true), _.getBasicQuorum, true) def testPassedReturnVClockWrite = testPassedFetchMeta(_.store(testStoreObject, returnDeletedVClock = false), _.getReturnDeletedVClock, false) def testPassedW = testPassedStoreMeta(_.store(testStoreObject, w = 2), _.getW, new Quorum(2)) def testPassedPW = testPassedStoreMeta(_.store(testStoreObject, pw = 3), _.getPw, new Quorum(3)) def testPassedDW = testPassedStoreMeta(_.store(testStoreObject, dw = 1), _.getDw, new Quorum(1)) def testPassedReturnBody = testPassedStoreMeta(_.store(testStoreObject, returnBody = true), _.getReturnBody, true) def testPassedIfNoneMatch = testPassedStoreMeta(_.store(testStoreObject, ifNoneMatch = true), _.getEtags.headOption, Option(testVTag)) def testPassedIfNotModified = testPassedStoreMeta(_.store(testStoreObject, ifNotModified = true), _.getLastModified, lastModified) def testPassedWPut = testPassedStoreMeta(_.put(testStoreObject, w = 2), _.getW, new Quorum(2)) def testPassedPWPut = testPassedStoreMeta(_.put(testStoreObject, pw = 3), _.getPw, new Quorum(3)) def testPassedDWPut = testPassedStoreMeta(_.put(testStoreObject, dw = 1), _.getDw, new Quorum(1)) def testPassedReturnBodyPut = testPassedStoreMeta(_.put(testStoreObject, returnBody = true), _.getReturnBody, true) val testDate = new Date(System.currentTimeMillis()) val testVClock = mock[VClock] } object conflictedFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val mock1Bytes = Array[Byte](1, 2) val mock1VClockStr = "a vclock" val mockRiakObj1 = mockRiakObj(testBucket, testKey, mock1Bytes, testContentType, mock1VClockStr) val mock2Bytes = Array[Byte](1, 3) val mock2VClockStr = "a vclock2" val mockRiakObj2 = mockRiakObj(testBucket, testKey, mock2Bytes, testContentType, mock2VClockStr) val multiObjectResponse = mockRiakResponse(Array(mockRiakObj1, mockRiakObj2)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns multiObjectResponse def testDefaultConflictRes = { val r = bucket.fetch(testKey).unsafePerformIO() r.toEither must beLeft.like { case e => ((_: Throwable) must beAnInstanceOf[UnresolvedConflictException]).forall(e.list) } } } object nonEmptyMetadataFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val metadata = Map("m1" -> "v1", "m2" -> "v2") val mockObj = mockRiakObj(testBucket, testKey, "value".getBytes, "text/plain", "vclock", metadata = metadata) val mockResp = mockRiakResponse(Array(mockObj)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResp lazy val result: Option[ReadObject] = { val r: ValidationNel[Throwable, Option[ReadObject]] = bucket.fetch(testKey).unsafePerformIO() r.toOption | None } def testHasCorrectMetadata = { result must beSome.which { _.metadata == metadata } } def testHasMetadataIsTrue = { result must beSome.which { _.hasMetadata } } def testContainsMetadataForExistingKey = { result must beSome.which { o => o.containsMetadata(metadata.keys.head) && o.containsMetadata(metadata.keys.tail.head) } } def testContainsMetadataForMissingKey = { result must beSome.which { !_.containsMetadata("dne") } } def testGetMetadataForExistingKey = { result must beSome.like { case o =>((_:Option[String]) must beSome).forall(o.getMetadata(metadata.keys.head) :: o.getMetadata(metadata.keys.tail.head) :: Nil) } } def testGetMetadataForMissingKey = { result must beSome.which { !_.getMetadata("dne").isDefined } } } object nonEmptyIntIndexesFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val indexes = Map("idx1" -> Set(1,2), "idx2" -> Set(3)) val expectedIndexes = (for { (k,v) <- indexes } yield (IntIndex.named(k), v)).toMap val mockObj = mockRiakObj(testBucket, testKey, "value".getBytes, "text/plain", "vlock", intIndexes = indexes) val mockResp = mockRiakResponse(Array(mockObj)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResp lazy val result: Option[ReadObject] = { bucket.fetch(testKey) .map(_.toOption | None) .unsafePerformIO() } def testHasAll = { result must beSome.like { case obj => obj.intIndexes must haveTheSameElementsAs(expectedIndexes) } } def testGetIntIndexExists = { result must beSome.like { case obj => obj.intIndex("idx1") must beSome.like { case values => values must haveTheSameElementsAs(expectedIndexes(IntIndex.named("idx1"))) } } } def testGetIntIndexMissing = { result must beSome.like { case obj => obj.intIndex("dne") must beNone } } } object nonEmptyBinIndexesFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val indexes = Map("idx1" -> Set("a", "b"), "idx2" -> Set("d")) val expectedIndexes = (for { (k,v) <- indexes } yield (BinIndex.named(k), v)).toMap val mockObj = mockRiakObj(testBucket, testKey, "value".getBytes, "text/plain", "vclock", binIndexes = indexes) val mockResp = mockRiakResponse(Array(mockObj)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResp lazy val result: Option[ReadObject] = { bucket.fetch(testKey) .map(_.toOption | None) .unsafePerformIO() } def testHasAll = { result must beSome.like { case obj => obj.binIndexes must haveTheSameElementsAs(expectedIndexes) } } def testGetBinIndexExists = { result must beSome.like { case obj => obj.binIndex("idx1") must beSome.like { case values => values must haveTheSameElementsAs(expectedIndexes(BinIndex.named("idx1"))) } } } def testGetBinIndexMissing = { result must beSome.like { case obj => obj.binIndex("dne") must beNone } } } object nonEmptyLinkFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val links = (new RiakLink(testBucket, "somekey", "tag")) :: (new RiakLink(testBucket, "somekey", "tag")) :: Nil val expectedLinks = links map { l => ScaliakLink(l.getBucket, l.getKey, l.getTag) } val mockObj = mockRiakObj(testBucket, testKey, "value".getBytes, "text/plain", "vclock", links) val mockResp = mockRiakResponse(Array(mockObj)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns mockResp lazy val result: Option[ReadObject] = { val r: ValidationNel[Throwable, Option[ReadObject]] = bucket.fetch(testKey).unsafePerformIO() r.toOption | None } def testConvertedLinks = { result must beSome.like { case obj => obj.links must beSome.like { case links => links.list must haveTheSameElementsAs(expectedLinks) } } } def testNumLinks = { result must beSome.which { _.numLinks == expectedLinks.length } } def testHasLinks = { result must beSome.which { _.hasLinks } } def testContainsLinkTrueIfContained = { result must beSome.which { r => r.containsLink(expectedLinks.head) && r.containsLink(expectedLinks.tail.head) } } def testContainsLinkFalseIfNotContained = { result must beSome.which { !_.containsLink(ScaliakLink("who", "cares", "shouldnt-match")) } } } object simpleFetch extends context { val rawClient = mock[RawClientWithStreaming] val bucket = createBucket val mock1Bytes = Array[Byte](1, 2) val mock1VClockStr = "a vclock" val mock1VTag = "vtag" val mock1LastModified = new java.util.Date(System.currentTimeMillis) val mockRiakObj1 = mockRiakObj(testBucket, testKey, mock1Bytes, testContentType, mock1VClockStr, vTag = mock1VTag, lastModified = mock1LastModified) val singleObjectResponse = mockRiakResponse(Array(mockRiakObj1)) rawClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) returns singleObjectResponse def someWKey = { result must beSome.which { _.key == testKey } } def testGetBytes = { result must beSome.which { _.getBytes == mock1Bytes } } def testStringValue = { result must beSome.which { _.stringValue == new String(mock1Bytes) } } def testBucketName = { result must beSome.which { _.bucket == testBucket } } def testVClock = { result must beSome.which { _.vClock.getBytes.toList == mock1VClockStr.getBytes.toList } } def testVClockStr = { result must beSome.which { _.vClockString == mock1VClockStr } } def testVTag = { result must beSome.which { _.vTag == mock1VTag } } def testLastModified = { result must beSome.like { case obj => obj.lastModified must_== mock1LastModified } } def tContentType = { result must beSome.which { _.contentType == testContentType } } def testEmptyLinksIsNone = { result must beSome.like { case obj => obj.links must beNone } } def testEmptyLinkHasLinksIsFalse = { result must beSome.which { !_.hasLinks } } def testEmptyLinksReturnsZeroNumLinks = { result must beSome.which { _.numLinks == 0 } } def testEmptyLinksContainsLinkReturnsFalse = { val link1 = ScaliakLink("who", "cares", "at all") val link2 = ScaliakLink("because", "this-object", "has-no-links") val Some(r) = result val res1 = r.containsLink(link1) val res2 = r.containsLink(link2) List(res1, res2) must haveTheSameElementsAs(false :: false :: Nil) } def testEmptyMetadataMap = { result must beSome.like { case obj => obj.metadata must beEmpty } } def testEmptyMetadataHasMetadataReturnsFalse = { result must beSome.which { !_.hasMetadata } } def testEmptyMetadataContainsMetadataReturnsFalse = { val Some(r) = result val res1 = r.containsMetadata("m1") val res2 = r.containsMetadata("m2") List(res1, res2) must haveTheSameElementsAs(false :: false :: Nil) } def testEmptyMetadataGetReturnsNone = { val Some(r) = result val res1 = r.getMetadata("m1") val res2 = r.getMetadata("m2") List(res1, res2) must haveTheSameElementsAs(None :: None :: Nil) } def testEmptyBinIndexes = { result must beSome.like { case obj => obj.binIndexes must beEmpty } } def testEmptyBinIndexesGetIndexReturnsNone = { result must beSome.like { case obj => obj.binIndex("whocares") must beNone } } def testEmptyIntIndexes = { result must beSome.like { case obj => obj.intIndexes must beEmpty } } def testEmptyIntIndexesGetReturnsNone = { result must beSome.like { case obj => obj.intIndex("whocares") must beNone } } def testConversionExplicit = { // this will fail unti you start explicitly passing a resolver val r = bucket.fetch(testKey)(dummyDomainConverter, ScaliakResolver.DefaultResolver).unsafePerformIO() (r.toOption | None) aka "the optional result discarding the exceptions" must beSome.which { _.someField == testKey } } def testConversionImplicit = { implicit val converter = dummyDomainConverter val r: ValidationNel[Throwable, Option[DummyDomainObject]] = bucket.fetch(testKey).unsafePerformIO() (r.toOption | None) aka "the optional result discarding the exceptions" must beSome.which { _.someField == testKey } } def testDangerous = { val newClient = mock[RawClientWithStreaming] val newBucket = createBucketWithClient(newClient) newClient.fetch(MM.eq(testBucket), MM.eq(testKey), MM.isA(classOf[FetchMeta])) throws (new NullPointerException) newBucket.fetchDangerous(testKey).unsafePerformIO() must throwA[NullPointerException] } def testUnsafe = { (bucket.fetchUnsafe(testKey).toOption | None) must beSome } // the result after discarding any possible exceptions lazy val result: Option[ReadObject] = { val r: ValidationNel[Throwable, Option[ReadObject]] = bucket.fetch(testKey).unsafePerformIO() r.toOption | None } } trait context { val testBucket = "test_bucket" val testKey = "somekey" val testContentType = "text/plain" def rawClient: RawClientWithStreaming def createBucketWithClient(r: RawClientWithStreaming) = new ScaliakBucket( rawClientOrClientPool = Left(r), name = testBucket, allowSiblings = false, lastWriteWins = false, nVal = 3, backend = None, smallVClock = 1, bigVClock = 2, youngVClock = 3, oldVClock = 4, precommitHooks = Nil, postcommitHooks = Nil, rVal = new Quorum(2), wVal = new Quorum(2), rwVal = new Quorum(0), dwVal = new Quorum(0), prVal = new Quorum(0), pwVal = new Quorum(0), basicQuorum = false, notFoundOk = false, chashKeyFunction = mock[NamedErlangFunction], linkWalkFunction = mock[NamedErlangFunction], isSearchable = false ) def createBucket = createBucketWithClient(rawClient) } }
stackmob/scaliak
src/test/scala/com/stackmob/scaliak/tests/ScaliakBucketSpecs.scala
Scala
apache-2.0
59,255
package mr.merc.ui.common.geom import org.scalatest.funsuite.AnyFunSuite class PolygonTest extends AnyFunSuite { test("check inside triangle") { val polygon = new Polygon((0, 0), (5, 5), (5, 0)) assert(polygon.isInside(3, 0) === true) assert(polygon.isInside(1, 0.5) === true) assert(polygon.isInside(10, 10) === false) assert(polygon.isInside(1, 2) === false) } test("check inside rectangle") { val polygon = new Polygon((0, 0), (5, 0), (5, 5), (0, 5)) assert(polygon.isInside(3, 0) === true) assert(polygon.isInside(1, 0.5) === true) assert(polygon.isInside(10, 10) === false) assert(polygon.isInside(1, 2) === true) } }
RenualdMarch/merc
src/test/scala/mr/merc/ui/common/geom/PolygonTest.scala
Scala
gpl-3.0
673
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import junit.framework.Assert._ import org.scalatest.junit.JUnitSuite import org.junit.Test import kafka.server.OffsetManager import kafka.coordinator.ConsumerCoordinator class TopicFilterTest extends JUnitSuite { @Test def testWhitelists() { val topicFilter1 = new Whitelist("white1,white2") assertTrue(topicFilter1.isTopicAllowed("white2", excludeInternalTopics = true)) assertTrue(topicFilter1.isTopicAllowed("white2", excludeInternalTopics = false)) assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = true)) assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = false)) val topicFilter2 = new Whitelist(".+") assertTrue(topicFilter2.isTopicAllowed("alltopics", excludeInternalTopics = true)) assertFalse(topicFilter2.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = true)) assertTrue(topicFilter2.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = false)) val topicFilter3 = new Whitelist("white_listed-topic.+") assertTrue(topicFilter3.isTopicAllowed("white_listed-topic1", excludeInternalTopics = true)) assertFalse(topicFilter3.isTopicAllowed("black1", excludeInternalTopics = true)) val topicFilter4 = new Whitelist("test-(?!bad\\\\b)[\\\\w]+") assertTrue(topicFilter4.isTopicAllowed("test-good", excludeInternalTopics = true)) assertFalse(topicFilter4.isTopicAllowed("test-bad", excludeInternalTopics = true)) } @Test def testBlacklists() { val topicFilter1 = new Blacklist("black1") assertTrue(topicFilter1.isTopicAllowed("white2", excludeInternalTopics = true)) assertTrue(topicFilter1.isTopicAllowed("white2", excludeInternalTopics = false)) assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = true)) assertFalse(topicFilter1.isTopicAllowed("black1", excludeInternalTopics = false)) assertFalse(topicFilter1.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = true)) assertTrue(topicFilter1.isTopicAllowed(ConsumerCoordinator.OffsetsTopicName, excludeInternalTopics = false)) } @Test def testWildcardTopicCountGetTopicCountMapEscapeJson() { def getTopicCountMapKey(regex: String): String = { val topicCount = new WildcardTopicCount(null, "consumerId", new Whitelist(regex), 1, true) topicCount.getTopicCountMap.head._1 } //lets make sure that the JSON strings are escaping as we expect //if they are not then when they get saved to zookeeper and read back out they will be broken on parse assertEquals("-\\\\\\"-", getTopicCountMapKey("-\\"-")) assertEquals("-\\\\\\\\-", getTopicCountMapKey("-\\\\-")) assertEquals("-\\\\/-", getTopicCountMapKey("-/-")) assertEquals("-\\\\\\\\b-", getTopicCountMapKey("-\\\\b-")) assertEquals("-\\\\\\\\f-", getTopicCountMapKey("-\\\\f-")) assertEquals("-\\\\\\\\n-", getTopicCountMapKey("-\\\\n-")) assertEquals("-\\\\\\\\r-", getTopicCountMapKey("-\\\\r-")) assertEquals("-\\\\\\\\t-", getTopicCountMapKey("-\\\\t-")) assertEquals("-\\\\\\\\u0000-", getTopicCountMapKey("-\\\\u0000-")) assertEquals("-\\\\\\\\u001f-", getTopicCountMapKey("-\\\\u001f-")) assertEquals("-\\\\\\\\u007f-", getTopicCountMapKey("-\\\\u007f-")) assertEquals("-\\\\\\\\u009f-", getTopicCountMapKey("-\\\\u009f-")) } }
tempbottle/kafka
core/src/test/scala/unit/kafka/consumer/TopicFilterTest.scala
Scala
apache-2.0
4,144
import com.td.utils.{GoogleSearchAPI, TwitterAPI} import java.net.{HttpURLConnection, URL} import scala.io.Source import scalaj.http.Token object Main { def main(args: Array[String]) { val tweetRegex = """(@[a-zA-Z0-9_]+) to (@[a-zA-Z0-9_]+) ([a-zA-Z0-9 .]+)""".r // Tweet a Gif Tokens val consumer = Token("", "") val accessToken = Token("", "") val twitterAPI = new TwitterAPI(consumer, accessToken) val googleSearchAPI = new GoogleSearchAPI() var id = args.headOption match { case Some(head) => head.toLong case None => 479814603560747010L //Magic # to start from... } while (true) { try { println(twitterAPI.getApplicationLimit) println(twitterAPI.getMentionsLimit) while (twitterAPI.getApplicationLimit == 0 && twitterAPI.getMentionsLimit == 0) { // Sleep while at API limit Thread.sleep(30000) } try { // reverse to get them in chronological order val tweets = twitterAPI.getUserMentions(Some(id.toString)).reverse tweets.foreach { tweet => tweetRegex.findFirstMatchIn(tweet._2) match { case Some(t) => t.groupCount match { case 3 => t.group(1).toLowerCase match { case "@tweetagif" => { if (id < tweet._1) { println("In Response to " + tweet._1) val urls = googleSearchAPI.SearchImage(t.group(3)).find { mediaURL => { val connection = new URL(mediaURL).openConnection().asInstanceOf[HttpURLConnection] connection.setRequestMethod("GET") val is = connection.getInputStream val length = Stream.continually(is.read).takeWhile(-1 !=).map(_.toByte).toArray.length is.close() println(mediaURL + " " + length) length < 5000000 } } urls.headOption match { case Some(head) => { val status = t.group(2) + " @" + tweet._3 println(twitterAPI.replyWithMedia(tweet._1, status, head)) } case None => { println("No GIFs found...") twitterAPI.reply(tweet._1, "Sorry, no GIFs found...") } } id = tweet._1 + 1; } } } } case None => } } println(id) println("----------------") Thread.sleep(59000) } catch { case e: Exception => println("Exception!!! " + e); Thread.sleep(10000) } } catch { // This will catch exceptions for getting API limits, usually some connectivity issue // Just sleep for 10S and it should be fine. case e: Exception => println("Catch all..." + e); Thread.sleep(10000); } } } }
tonyd3/TweetAGIF
src/main/scala/com/td/tweetagif/Main.scala
Scala
mit
3,360
package scalainvoice object DocumentItemCalculator { val round = Rounding.rounder def calculateItemTotals(item : DocumentItem) : DocumentItem = { val beforeTaxes = round(item.unitPrice * item.quantity) val taxes = round(beforeTaxes * item.taxFactor) item.copy( total = round(beforeTaxes + taxes), totalBeforeTaxes = beforeTaxes, totalTaxes = taxes ) } }
donbonifacio/scala-invoice
src/main/scala/scalainvoice/DocumentItemCalculator.scala
Scala
mit
400
package controllers import scala.Left import scala.Right import play.api._ import play.api.mvc._ import play.api.Play.current import play.api.libs.concurrent.Execution.Implicits.defaultContext import scala.concurrent.Future import play.api.mvc.WebSocket import play.api.libs.iteratee._ import actors.GamerActor import actors.BoardActor import play.api.libs.json._ import play.api.mvc.WebSocket.FrameFormatter import java.util.UUID object Application extends Controller { val NICK = "nick" val UID = "uid" var counter = 0 val board = BoardActor() def index = Action { Ok(views.html.index("Your new application is ready.")) } // def connect(nick: AnyRef) = WebSocket.tryAcceptWithActor[String, String] { implicit request => // Future.successful(nick match { def connect = WebSocket.tryAcceptWithActor[Array[Byte], Array[Byte]] { implicit request => val uid: String = request.session.get(UID).getOrElse { counter += 1 val uuid = UUID.randomUUID(); request.session + (UID -> uuid.toString()) uuid.toString } println("uid = " + uid) Future.successful(request.getQueryString(NICK) match { case None => Left(Forbidden) case Some(nick) => Right(GamerActor.props(board, nick, uid)) }) } /* def connect = WebSocket.using[String] { request => request.getQueryString("nick") match { case None => Left(Forbidden) case Some(nick) => println(nick + " connect!!"); } println(request.getQueryString("nick") + " connect!!"); // Log events to the console val in = Iteratee.foreach[String](println).map { msg => println("Disconnectedaa " + msg) } // Send a single 'Hello!' message val out = Enumerator("Hello!") (in, out) }*/ }
terdong/CustomRPS7ServerForPlay
custom_rps7_server/app/controllers/Application.scala
Scala
mit
1,770
package org.jetbrains.plugins.scala.lang.refactoring.rename import java.util import com.intellij.openapi.editor.Editor import com.intellij.openapi.util.Pass import com.intellij.psi.PsiElement import com.intellij.refactoring.listeners.RefactoringElementListener import com.intellij.refactoring.rename.RenamePsiElementProcessor import com.intellij.usageView.UsageInfo import org.jetbrains.plugins.scala.lang.psi.fake.FakePsiMethod import org.jetbrains.plugins.scala.lang.psi.light.{PsiTypedDefinitionWrapper, ScFunctionWrapper, StaticPsiMethodWrapper, StaticPsiTypedDefinitionWrapper} /** * User: Alexander Podkhalyuzin * Date: 08.09.2009 */ class RenameLightProcessor extends RenamePsiElementProcessor { override def canProcessElement(element: PsiElement): Boolean = { element match { case _: FakePsiMethod => true case _: ScFunctionWrapper => true case _: PsiTypedDefinitionWrapper => true case _: StaticPsiTypedDefinitionWrapper => true case _: StaticPsiMethodWrapper => true case _ => false } } override def prepareRenaming(element: PsiElement, newName: String, allRenames: util.Map[PsiElement, String]): Unit = { val orig = originalElement(element) allRenames.put(orig, newName) import scala.jdk.CollectionConverters._ for (processor <- RenamePsiElementProcessor.allForElement(orig).asScala) { processor.prepareRenaming(orig, newName, allRenames) } } override def substituteElementToRename(element: PsiElement, editor: Editor): PsiElement = { val orig = originalElement(element) if (orig != null) { val processor = RenamePsiElementProcessor.forElement(orig) processor.substituteElementToRename(orig, editor) } else null } override def substituteElementToRename(element: PsiElement, editor: Editor, renameCallback: Pass[PsiElement]): Unit = { val orig = originalElement(element) if (orig != null) { val processor = RenamePsiElementProcessor.forElement(orig) processor.substituteElementToRename(orig, editor, renameCallback) } } private def originalElement(element: PsiElement) = element match { case _: FakePsiMethod => null case ScFunctionWrapper(delegate) => delegate case PsiTypedDefinitionWrapper(delegate) => delegate case StaticPsiTypedDefinitionWrapper(delegate) => delegate case StaticPsiMethodWrapper(method) => method case _ => element } override def renameElement(element: PsiElement, newName: String, usages: Array[UsageInfo], listener: RefactoringElementListener): Unit = { ScalaRenameUtil.doRenameGenericNamedElement(element, newName, usages, listener) } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/refactoring/rename/RenameLightProcessor.scala
Scala
apache-2.0
2,655
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** @author John Miller * @version 1.3 * @date Wed Jul 31 13:54:40 EDT 2013 * @see LICENSE (MIT style license file). */ package scalation.graphalytics import scala.collection.immutable.{Set => SET} import scalation.linalgebra.{MatriI, MatrixI, SparseMatrixI} //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `Convert` object is used to convert between an Adjacency Matrix * representation to an Adjacency Sets representation. */ object Convert { //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert the graph from an Adjacency Matrix representation to an * Adjacency Sets representation. * @param mat the Adjacency Matrix representation of a graph */ def matrix2Graph (mat: MatriI): Graph = { val n = mat.dim1 val ch = Array.ofDim [SET [Int]] (n) for (i <- 0 until n) { ch(i) = Set [Int] () for (j <- 0 until n if ! (mat(i, j) == 0)) ch(i) += j } // for new Graph (ch) } // matrix2Graph //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** Convert the graph from an Adjacency Sets representation to an * Adjacency Matrix representation. * @param mat the Adjacency Sets representation of a graph */ def graph2Matrix (gr: Graph, sparse: Boolean = false): MatriI = { val n = gr.ch.length val mat = if (sparse) new SparseMatrixI (n, n) else new MatrixI (n, n) for (i <- 0 until n; j <- 0 until n if gr.ch(i) contains j) mat(i, j) = 1 mat } // graph2Matrix } // Convert object import Convert._ //:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: /** The `ConvertTest` object is used to test the `Convert` object. * > run-main scalation.graphalytics.ConvertTest */ object ConvertTest extends App { val m1 = new MatrixI ((3, 3), 0, 1, 1, 0, 0, 1, 0, 1, 0) val g = matrix2Graph (m1) val m2 = graph2Matrix (g) println ("m1 = " + m1) g.printG () println ("m2 = " + m2) } // ConvertTest
NBKlepp/fda
scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/Convert.scala
Scala
mit
2,281
/* * La Trobe University - Distributed Deep Learning System * Copyright 2016 Matthias Langer ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package edu.latrobe.blaze.objectives import edu.latrobe._ import edu.latrobe.blaze._ import edu.latrobe.blaze.sinks._ import edu.latrobe.io.FileHandle import edu.latrobe.time._ import scala.util.hashing._ final class OutputRedirection(override val builder: OutputRedirectionBuilder, override val seed: InstanceSeed) extends DependentObjectiveEx[OutputRedirectionBuilder] { require(builder != null && seed != null) val sink : Sink = builder.sink.build(seed) override protected def doClose() : Unit = { sink.close() super.doClose() } override protected def doEvaluate(sink: Sink, optimizer: OptimizerLike, runBeginIterationNo: Long, runBeginTime: Timestamp, runNoSamples: Long, model: Module, batch: Batch, output: Tensor, value: Real) : Option[ObjectiveEvaluationResult] = { super.doEvaluate( this.sink, optimizer, runBeginIterationNo, runBeginTime, runNoSamples, model, batch, output, value ) } } final class OutputRedirectionBuilder extends DependentObjectiveExBuilder[OutputRedirectionBuilder] { override def repr : OutputRedirectionBuilder = this private var _sink : SinkBuilder = StdErrSinkBuilder() def sink : SinkBuilder = _sink def sink_=(value: SinkBuilder) : Unit = { require(value != null) _sink = value } def setSink(value: SinkBuilder) : OutputRedirectionBuilder = { sink_=(value) this } override protected def doToString() : List[Any] = _sink :: super.doToString() override def hashCode() : Int = MurmurHash3.mix(super.hashCode(), _sink.hashCode()) override def canEqual(that: Any) : Boolean = that.isInstanceOf[OutputRedirectionBuilder] override protected def doEquals(other: Equatable) : Boolean = super.doEquals(other) && (other match { case other: OutputRedirectionBuilder => _sink == other._sink case _ => false }) override protected def doCopy() : OutputRedirectionBuilder = OutputRedirectionBuilder() override def copyTo(other: InstanceBuilder) : Unit = { super.copyTo(other) other match { case other: OutputRedirectionBuilder => other._sink = _sink.copy case _ => } } override def build(seed: InstanceSeed) : OutputRedirection = new OutputRedirection(this, seed) override protected def doPermuteSeeds(fn: BuilderSeed => BuilderSeed) : Unit = { super.doPermuteSeeds(fn) _sink.permuteSeeds(fn) } } object OutputRedirectionBuilder { final def apply() : OutputRedirectionBuilder = new OutputRedirectionBuilder final def apply(sink: SinkBuilder) : OutputRedirectionBuilder = apply().setSink(sink) final def apply(sink: SinkBuilder, child0: ObjectiveBuilder) : OutputRedirectionBuilder = apply(sink) += child0 final def apply(sink: SinkBuilder, child0: ObjectiveBuilder, childN: ObjectiveBuilder*) : OutputRedirectionBuilder = apply(sink, child0) ++= childN final def apply(sink: SinkBuilder, childN: TraversableOnce[ObjectiveBuilder]) : OutputRedirectionBuilder = apply(sink) ++= childN }
bashimao/ltudl
blaze/src/main/scala/edu/latrobe/blaze/objectives/OutputRedirection.scala
Scala
apache-2.0
4,227
package ml.wolfe.nlp.io import ml.wolfe.nlp.{Sentence, CharOffsets, Token} import org.json4s.JsonAST.JArray import org.json4s.reflect.TypeInfo import org.json4s.{Extraction, MappingException, Formats, Serializer} /** * @author Sebastian Riedel */ object JsonIO { class IndexedSeqSerializer extends Serializer[IndexedSeq[_]] { def deserialize(implicit format: Formats) = { case (TypeInfo(clazz, ptype), json) if classOf[IndexedSeq[_]].isAssignableFrom(clazz) => json match { case JArray(xs) => val t = ptype.getOrElse(throw new MappingException("parameterized type not known")) xs.map(x => Extraction.extract(x, TypeInfo(t.getActualTypeArguments()(0).asInstanceOf[Class[_]], None))).toIndexedSeq case x => throw new MappingException("Can't convert " + x + " to IndexedSeq") } } def serialize(implicit format: Formats) = { case i: IndexedSeq[_] => JArray(i.map(Extraction.decompose).toList) } } def main(args: Array[String]) { import org.json4s._ import org.json4s.jackson.Serialization import org.json4s.jackson.Serialization.{read, write} implicit val formats = Serialization.formats(NoTypeHints) + new IndexedSeqSerializer val token = Token("the", CharOffsets(0, 3)) val sentence = Sentence(IndexedSeq(token)) val ser = write(sentence) println(ser) val deser = read[Sentence](ser) println(deser) println(write(sentence)) } }
wolfe-pack/wolfe
wolfe-nlp/src/main/scala/ml/wolfe/nlp/io/JsonIO.scala
Scala
apache-2.0
1,457
package eu.timepit.refined.benchmark import eu.timepit.refined.types.numeric.PosInt import java.util.concurrent.TimeUnit import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, Mode, OutputTimeUnit} @BenchmarkMode(Array(Mode.AverageTime)) class PosIntBenchmark { @Benchmark @OutputTimeUnit(TimeUnit.NANOSECONDS) def unsafeFrom_1: Any = PosInt.unsafeFrom(1) }
fthomas/refined
modules/benchmark/src/main/scala/eu/timepit/refined/benchmark/PosIntBenchmark.scala
Scala
mit
378
package org.jetbrains.plugins.scala package lang.scaladoc import com.intellij.codeInsight.generation.surroundWith.SurroundWithHandler import com.intellij.lang.surroundWith.SurroundDescriptor import com.intellij.psi.PsiElement import com.intellij.testFramework.EditorTestUtil.{SELECTION_END_TAG => END, SELECTION_START_TAG => START} import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter import org.jetbrains.plugins.scala.extensions.executeWriteActionCommand import org.jetbrains.plugins.scala.lang.surroundWith.descriptors.ScalaSurroundDescriptors import org.jetbrains.plugins.scala.lang.surroundWith.surrounders.scaladoc._ import org.junit.Assert.{assertFalse, assertTrue} /** * User: Dmitry Naydanov * Date: 3/12/12 */ class SurroundWithWikiSyntaxTest extends ScalaLightCodeInsightFixtureTestAdapter { import ScalaLightCodeInsightFixtureTestAdapter.normalize import SurroundWithWikiSyntaxTest._ private def configureByText(text: String, stripTrailingSpaces: Boolean): Seq[PsiElement] = { val normalizedText = normalize(text, stripTrailingSpaces) getFixture.configureByText("dummy.scala", normalizedText) val selectionModel = getEditor.getSelectionModel descriptor.getElementsToSurround(getFile, selectionModel.getSelectionStart, selectionModel.getSelectionEnd) } private def performTest(text: String, surrounder: ScalaDocWithSyntaxSurrounder): Unit = { val stripTrailingSpaces = false val elements = configureByText(text, stripTrailingSpaces) assertFalse("No elements to be surrounded", elements.isEmpty) executeWriteActionCommand("Surround With Test") { SurroundWithHandler.invoke(getProject, getEditor, getFile, surrounder) }(getProject) val surrounded = surroundWith(text, surrounder) val expected = normalize(surrounded, stripTrailingSpaces) getFixture.checkResult(expected, stripTrailingSpaces) } private def performWithAllSurrounders(text: String): Unit = surrounders.foreach(performTest(text, _)) private def checkCannotBeSurrounded(text: String): Unit = { val elements = configureByText(text, stripTrailingSpaces = false) assertTrue(s"Elements to be surrounded: ${elements.mkString(", ")}", elements.isEmpty) } def testSurroundSimpleData(): Unit = { val text = s""" |/** | * b${START}lah b${END}lah | * blah blah blah | */""".stripMargin performWithAllSurrounders(text) } def testSurroundMultilineData(): Unit = { val text = s""" |/** blah lb${START}lah akfhsdhfsadhf | * skjgh dfsg shdfa hsdaf jhsad fsd | * dfgas dfhgsajdf sad${END}jfjsd | */""".stripMargin performWithAllSurrounders(text) } def testSurroundAnotherSyntax1(): Unit = { val text = s""" |/** | * __blah blah | * dfgasdhgfjk ^ashgdfkjgds| * ''aaaaaa'' sdkfhsadjkh^ ll | * sd${START}hfkhsa${END}dl__ | */""".stripMargin performWithAllSurrounders(text) } def testSurroundAnotherSyntax2(): Unit = { val text = s""" |/** | * __blah blah | * blkjhsd${START}asdhajs ''sdfsddlk'' | * shfg`sad`jhg${END}f__ | */""".stripMargin performWithAllSurrounders(text) } def testSurroundDataWithLeadingWhitespace(): Unit = { val text = s""" |/** | * $START datadatad${END}atadata | */""".stripMargin performWithAllSurrounders(text) } def testSurroundWholeToken(): Unit = { val text = s""" |/** | * ${START}comment_data$END | */""".stripMargin performWithAllSurrounders(text) } def testSurroundInTag1(): Unit = { val text = s""" |/** | * @param a aaa${START}aa | * aaaaa${END}aaa | */""".stripMargin performWithAllSurrounders(text) } def testSurroundInTag2(): Unit = { val text = s""" |/** | * @todo blah ${START}blah b${END}lah | */""".stripMargin performWithAllSurrounders(text) } def testSurroundAlreadyMarkedElement1(): Unit = { val text = s""" |/** | * blah $START^blah blah | * jhsdbjbhsafd^$END dajsdgf | */""".stripMargin performWithAllSurrounders(text) } def testSurroundAlreadyMarkedElement2(): Unit = { val text = s""" |/** | * blah ,,${START}blah blha | * blah blah$END,, blah | */""".stripMargin performWithAllSurrounders(text) } def testCannotSurroundCrossTags(): Unit = { val text = s""" |/** | * aa${START}aa__sahdkljahskdhasd | * dajs${END}kjhd__kas | */""".stripMargin checkCannotBeSurrounded(text) } def testCannotSurroundMultilineWhitespace(): Unit = { val text = s""" |/** | * b${START}lah blah | * | * blah blah$END blah | */""".stripMargin checkCannotBeSurrounded(text) } def testCannotSurroundTagName(): Unit = { val text = s""" |/** | * bla${START}h blah blah | * @see some${END}thing | */""".stripMargin checkCannotBeSurrounded(text) } def testCannotSurroundCrossTag2(): Unit = { val text = s""" |/** | * blah${START}__blah${END}blah__ | */""".stripMargin checkCannotBeSurrounded(text) } def testCannotSurroundCrossTagWithWSAndSyntax(): Unit = { val text = s""" |/** | * blah blah ${START}__blah blah | * blah bl${END}ah blah __ | */""".stripMargin checkCannotBeSurrounded(text) } } object SurroundWithWikiSyntaxTest { private val descriptor: SurroundDescriptor = ScalaSurroundDescriptors.getSurroundDescriptors()(1) private val surrounders: Seq[ScalaDocWithSyntaxSurrounder] = descriptor.getSurrounders .collect { case surrounder: ScalaDocWithSyntaxSurrounder => surrounder } private def surroundWith(text: String, surrounder: ScalaDocWithSyntaxSurrounder): String = { val tag = surrounder.getSyntaxTag text.replace(START, tag).replace(END, tag) } }
jastice/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/lang/scaladoc/SurroundWithWikiSyntaxTest.scala
Scala
apache-2.0
6,274
package org.jetbrains.plugins.scala.lang.resolve2 /** * Pavel.Fatin, 02.02.2010 */ class InheritanceOverrideTest extends ResolveTestBase { override def folderPath: String = { super.folderPath + "inheritance/override/" } //TODO answer? // def testCaseClass = doTest def testClass = doTest def testClassParameter = doTest def testClassParameterValue = doTest //TODO classparameter // def testClassParameterValueFrom = doTest def testClassParameterValueTo = doTest def testClassParameterVariable = doTest //TODO classparameter // def testClassParameterVariableFrom = doTest def testClassParameterVariableTo = doTest def testFunction = doTest //TODO answer? // def testObject = doTest def testTrait = doTest def testValue = doTest def testVariable = doTest }
triggerNZ/intellij-scala
test/org/jetbrains/plugins/scala/lang/resolve2/InheritanceOverrideTest.scala
Scala
apache-2.0
800
package scalaz.stream import Cause._ import java.util.concurrent.atomic.{AtomicBoolean, AtomicReference} import scala.annotation.tailrec import scala.collection.SortedMap import scala.concurrent.duration._ import scala.Function.const import scalaz.stream.async.immutable.Signal import scalaz.{\/-, Catchable, Functor, Monad, Monoid, Nondeterminism, \/, -\/, ~>} import scalaz.\/._ import scalaz.concurrent.{Actor, Future, Strategy, Task} import scalaz.stream.process1.Await1 import scalaz.syntax.monad._ import scala.annotation.unchecked.uncheckedVariance /** * An effectful stream of `O` values. In between emitting values * a `Process` may request evaluation of `F` effects. * A `Process[Nothing,A]` is a pure `Process` with no effects. * A `Process[Task,A]` may have `Task` effects. A `Process` * halts due to some `Cause`, generally `End` (indicating normal * termination) or `Error(t)` for some `t: Throwable` indicating * abnormal termination due to some uncaught error. */ sealed trait Process[+F[_], +O] extends Process1Ops[F,O] with TeeOps[F,O] { import scalaz.stream.Process._ import scalaz.stream.Util._ /** * Generate a `Process` dynamically for each output of this `Process`, and * sequence these processes using `append`. */ final def flatMap[F2[x] >: F[x], O2](f: O => Process[F2, O2]): Process[F2, O2] = { // Util.debug(s"FMAP $this") this match { case Halt(_) => this.asInstanceOf[Process[F2, O2]] case Emit(os) if os.isEmpty => this.asInstanceOf[Process[F2, O2]] case Emit(os) => os.tail.foldLeft(Try(f(os.head)))((p, n) => p ++ Try(f(n))) case aw@Await(_, _, _) => aw.extend(_ flatMap f) case ap@Append(p, n) => ap.extend(_ flatMap f) } } /** Transforms the output values of this `Process` using `f`. */ final def map[O2](f: O => O2): Process[F, O2] = flatMap { o => emit(f(o))} /** * If this process halts due to `Cause.End`, runs `p2` after `this`. * Otherwise halts with whatever caused `this` to `Halt`. */ final def append[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = { onHalt { case End => p2 case cause => Halt(cause) } } /** Alias for `append` */ final def ++[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = append(p2) /** Alias for `append` */ final def fby[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = append(p2) /** * Run one step of an incremental traversal of this `Process`. * This function is mostly intended for internal use. As it allows * a `Process` to be observed and captured during its execution, * users are responsible for ensuring resource safety. */ final def step: HaltOrStep[F, O] = { val empty: Emit[Nothing] = Emit(Nil) @tailrec def go(cur: Process[F,O], stack: Vector[Cause => Trampoline[Process[F,O]]], cnt: Int) : HaltOrStep[F,O] = { if (stack.nonEmpty) cur match { case Halt(End) if cnt <= 0 => Step(empty,Cont(stack)) case Halt(cause) => go(Try(stack.head(cause).run), stack.tail, cnt - 1) case Emit(os) if os.isEmpty => Step(empty,Cont(stack)) case emt@(Emit(os)) => Step(emt,Cont(stack)) case awt@Await(_,_,_) => Step(awt,Cont(stack)) case Append(h,st) => go(h, st fast_++ stack, cnt - 1) } else cur match { case hlt@Halt(cause) => hlt case emt@Emit(os) if os.isEmpty => halt0 case emt@Emit(os) => Step(emt,Cont(Vector.empty)) case awt@Await(_,_,_) => Step(awt,Cont(Vector.empty)) case Append(h,st) => go(h,st, cnt - 1) } } go(this,Vector.empty, 10) // *any* value >= 1 works here. higher values improve throughput but reduce concurrency and fairness. 10 is a totally wild guess } /** * `p.suspendStep` propagates exceptions to `p`. */ final def suspendStep: Process0[HaltOrStep[F, O]] = halt onHalt { case End => emit(step) case early: EarlyCause => emit(injectCause(early).step) } /** * When this `Process` halts, call `f` to produce the next state. * Note that this function may be used to swallow or handle errors. */ final def onHalt[F2[x] >: F[x], O2 >: O](f: Cause => Process[F2, O2]): Process[F2, O2] = { val next = (t: Cause) => Trampoline.delay(Try(f(t))) this match { case (append: Append[F2, O2] @unchecked) => Append(append.head, append.stack :+ next) case emt@Emit(_) => Append(emt, Vector(next)) case awt@Await(_, _, _) => Append(awt, Vector(next)) case hlt@Halt(rsn) => Append(hlt, Vector(next)) } } ////////////////////////////////////////////////////////////////////////////////////// // // Pipe and Tee // ///////////////////////////////////////////////////////////////////////////////////// /** * Feed the output of this `Process` as input of `p1`. The implementation * will fuse the two processes, so this process will only generate * values as they are demanded by `p1`. If `p1` signals termination, `this` * is killed with same reason giving it an opportunity to cleanup. */ final def pipe[O2](p1: Process1[O, O2]): Process[F, O2] = p1.suspendStep.flatMap({ s1 => s1 match { case s@Step(awt1@Await1(rcv1), cont1) => val nextP1 = s.toProcess this.step match { case Step(awt@Await(_, _, _), cont) => awt.extend(p => (p +: cont) pipe nextP1) case Step(Emit(os), cont) => cont.continue pipe process1.feed(os)(nextP1) case hlt@Halt(End) => hlt pipe nextP1.disconnect(Kill).swallowKill case hlt@Halt(rsn: EarlyCause) => hlt pipe nextP1.disconnect(rsn) } case Step(emt@Emit(os), cont) => // When the pipe is killed from the outside it is killed at the beginning or after emit. // This ensures that Kill from the outside is not swallowed. emt onHalt { case End => this.pipe(cont.continue) case early => this.pipe(Halt(early) +: cont).causedBy(early) } case Halt(rsn) => this.kill onHalt { _ => Halt(rsn) } } }) /** Operator alias for `pipe`. */ final def |>[O2](p2: Process1[O, O2]): Process[F, O2] = pipe(p2) /** * Use a `Tee` to interleave or combine the outputs of `this` and * `p2`. This can be used for zipping, interleaving, and so forth. * Nothing requires that the `Tee` read elements from each * `Process` in lockstep. It could read fifty elements from one * side, then two elements from the other, then combine or * interleave these values in some way, etc. * * If at any point the `Tee` awaits on a side that has halted, * we gracefully kill off the other side, then halt. * * If at any point `t` terminates with cause `c`, both sides are killed, and * the resulting `Process` terminates with `c`. */ final def tee[F2[x] >: F[x], O2, O3](p2: Process[F2, O2])(t: Tee[O, O2, O3]): Process[F2, O3] = { import scalaz.stream.tee.{AwaitL, AwaitR, disconnectL, disconnectR, feedL, feedR} t.suspendStep flatMap { ts => ts match { case s@Step(AwaitL(_), contT) => this.step match { case Step(awt@Await(_, _, _), contL) => awt.extend { p => (p +: contL).tee(p2)(s.toProcess) } case Step(Emit(os), contL) => contL.continue.tee(p2)(feedL[O, O2, O3](os)(s.toProcess)) case hlt@Halt(End) => hlt.tee(p2)(disconnectL(Kill)(s.toProcess).swallowKill) case hlt@Halt(rsn: EarlyCause) => hlt.tee(p2)(disconnectL(rsn)(s.toProcess)) } case s@Step(AwaitR(_), contT) => p2.step match { case s2: Step[F2, O2]@unchecked => (s2.head, s2.next) match { case (awt: Await[F2, Any, O2]@unchecked, contR) => awt.extend { (p: Process[F2, O2]) => this.tee(p +: contR)(s.toProcess) } case (Emit(o2s), contR) => this.tee(contR.continue.asInstanceOf[Process[F2,O2]])(feedR[O, O2, O3](o2s)(s.toProcess)) } case hlt@Halt(End) => this.tee(hlt)(disconnectR(Kill)(s.toProcess).swallowKill) case hlt@Halt(rsn : EarlyCause) => this.tee(hlt)(disconnectR(rsn)(s.toProcess)) } case Step(emt@Emit(o3s), contT) => // When the process is killed from the outside it is killed at the beginning or after emit. // This ensures that Kill from the outside isn't swallowed. emt onHalt { case End => this.tee(p2)(contT.continue) case early => this.tee(p2)(Halt(early) +: contT).causedBy(early) } case Halt(rsn) => this.kill onHalt { _ => p2.kill onHalt { _ => Halt(rsn) } } } } } ////////////////////////////////////////////////////////////////////////////////////// // // Alphabetically, Other combinators // ///////////////////////////////////////////////////////////////////////////////////// /** * Catch exceptions produced by this `Process`, not including termination by `Continue`, `End`, `Kill` * and uses `f` to decide whether to resume a second process. */ final def attempt[F2[x] >: F[x], O2]( f: Throwable => Process[F2, O2] = (t: Throwable) => emit(t) ): Process[F2, O2 \/ O] = this.map(right) onHalt { case Error(t) => Try(f(t)).map(left) case rsn => Halt(rsn) } /** * Attached `cause` when this Process terminates. See `Cause.causedBy` for semantics. */ final def causedBy(cause: Cause): Process[F, O] = cause.fold(this)(ec => this.onHalt(c => Halt(c.causedBy(ec)))) /** * Used when a `Process1`, `Tee`, or `Wye` is terminated by awaiting * on a branch that is in the halted state or was killed. Such a process * is given the opportunity to emit any final values. All Awaits are * converted to terminate with `cause` */ final def disconnect(cause: EarlyCause): Process0[O] = this.step match { case Step(emt@Emit(_), cont) => emt +: cont.extend(_.disconnect(cause)) case Step(awt@Await(_, rcv,_), cont) => suspend((Try(rcv(left(cause)).run) +: cont).disconnect(cause)) case hlt@Halt(rsn) => Halt(rsn) } /** Ignore all outputs of this `Process`. */ final def drain: Process[F, Nothing] = flatMap(_ => halt) /** * Map over this `Process` to produce a stream of `F`-actions, * then evaluate these actions. */ def evalMap[F2[x]>:F[x],O2](f: O => F2[O2]): Process[F2,O2] = map(f).eval /** Prepend a sequence of elements to the output of this `Process`. */ def prepend[O2>:O](os:Seq[O2]) : Process[F,O2] = { if (os.nonEmpty) { emitAll(os) onHalt { case End => this case cause: EarlyCause => this.step match { case Step(Await(_, rcv, _), cont) => Try(rcv(left(cause)).run) +: cont case Step(Emit(_), cont) => Halt(cause) +: cont case Halt(rsn) => Halt(rsn.causedBy(cause)) } } } else this } /** * Map over this `Process` to produce a stream of `F`-actions, * then evaluate these actions in batches of `bufSize`, allowing * for nondeterminism in the evaluation order of each action in the * batch. */ def gatherMap[F2[x]>:F[x],O2](bufSize: Int)(f: O => F2[O2])( implicit F: Nondeterminism[F2]): Process[F2,O2] = map(f).gather(bufSize) /** * Catch some of the exceptions generated by this `Process`, rethrowing any * not handled by the given `PartialFunction` and stripping out any values * emitted before the error. */ def handle[F2[x]>:F[x],O2](f: PartialFunction[Throwable, Process[F2,O2]])(implicit F: Catchable[F2]): Process[F2, O2] = attempt(rsn => f.lift(rsn).getOrElse(fail(rsn))) .dropWhile(_.isRight) .map(_.fold(identity, _ => sys.error("unpossible"))) /** Returns true, if this process is halted */ final def isHalt: Boolean = this match { case Halt(_) => true case _ => false } /** * Skip the first part of the process and pretend that it ended with `early`. * The first part is the first `Halt` or the first `Emit` or request from the first `Await`. */ private[stream] final def injectCause(early: EarlyCause): Process[F, O] = (this match { // Note: We cannot use `step` in the implementation since we want to inject `early` as soon as possible. // Eg. Let `q` be `halt ++ halt ++ ... ++ p`. `step` reduces `q` to `p` so if `injectCause` was implemented // by `step` then `q.injectCause` would be same as `p.injectCause`. But in our current implementation // `q.injectCause` behaves as `Halt(early) ++ halt ++ ... ++ p` which behaves as `Halt(early)` // (by the definition of `++` and the fact `early != End`). case Halt(rsn) => Halt(rsn.causedBy(early)) case Emit(_) => Halt(early) case Await(_, rcv, _) => Try(rcv(left(early)).run) case Append(Halt(rsn), stack) => Append(Halt(rsn.causedBy(early)), stack) case Append(Emit(_), stack) => Append(Halt(early), stack) case Append(Await(_, rcv, _), stack) => Try(rcv(left(early)).run) +: Cont(stack) }) /** * Causes this process to be terminated immediately with `Kill` cause, * giving chance for any cleanup actions to be run */ final def kill: Process[F, Nothing] = injectCause(Kill).drain.causedBy(Kill) /** * Run `p2` after this `Process` completes normally, or in the event of an error. * This behaves almost identically to `append`, except that `p1 append p2` will * not run `p2` if `p1` halts with an `Error` or is killed. Any errors raised by * `this` are reraised after `p2` completes. * * Note that `p2` is made into a finalizer using `asFinalizer`, so we * can be assured it is run even when this `Process` is being killed * by a downstream consumer. */ final def onComplete[F2[x] >: F[x], O2 >: O](p2: => Process[F2, O2]): Process[F2, O2] = this.onHalt { cause => p2.asFinalizer.causedBy(cause) } /** * Mostly internal use function. Ensures this `Process` is run even * when being `kill`-ed. Used to ensure resource safety in various * combinators. */ final def asFinalizer: Process[F, O] = { def mkAwait[F[_], A, O](req: F[A], cln: A => Trampoline[Process[F,Nothing]])(rcv: EarlyCause \/ A => Trampoline[Process[F, O]]) = Await(req, rcv,cln) step match { case Step(e@Emit(_), cont) => e onHalt { case Kill => (halt +: cont).asFinalizer.causedBy(Kill) case cause => (Halt(cause) +: cont).asFinalizer } case Step(Await(req, rcv, cln), cont) => mkAwait(req, cln) { case -\/(Kill) => Trampoline.delay(Await(req, rcv, cln).asFinalizer.causedBy(Kill)) case x => rcv(x).map(p => (p +: cont).asFinalizer) } case hlt@Halt(_) => hlt } } /** * If this `Process` completes with an error, call `f` to produce * the next state. `f` is responsible for reraising the error if that * is the desired behavior. Since this function is often used for attaching * resource deallocation logic, the result of `f` is made into a finalizer * using `asFinalizer`, so we can be assured it is run even when this `Process` * is being killed by a downstream consumer. */ final def onFailure[F2[x] >: F[x], O2 >: O](f: Throwable => Process[F2, O2]): Process[F2, O2] = this.onHalt { case err@Error(rsn) => f(rsn).asFinalizer case other => Halt(other) } /** * Attach supplied process only if process has been killed. * Since this function is often used for attaching resource * deallocation logic, the result of `f` is made into a finalizer * using `asFinalizer`, so we can be assured it is run even when * this `Process` is being killed by a downstream consumer. */ final def onKill[F2[x] >: F[x], O2 >: O](p: => Process[F2, O2]): Process[F2, O2] = this.onHalt { case Kill => p.asFinalizer case other => Halt(other) } /** * Like `attempt`, but accepts a partial function. Unhandled errors are rethrown. */ def partialAttempt[F2[x]>:F[x],O2](f: PartialFunction[Throwable, Process[F2,O2]]) (implicit F: Catchable[F2]): Process[F2, O2 \/ O] = attempt(err => f.lift(err).getOrElse(fail(err))) /** * Run this process until it halts, then run it again and again, as * long as no errors or `Kill` occur. */ final def repeat: Process[F, O] = this.append(this.repeat) /** * For anly process terminating with `Kill`, this swallows the `Kill` and replaces it with `End` termination */ final def swallowKill: Process[F,O] = this.onHalt { case Kill | End => halt case cause => Halt(cause) } /** Translate the request type from `F` to `G`, using the given polymorphic function. */ def translate[G[_]](f: F ~> G): Process[G,O] = this.suspendStep.flatMap { case Step(Emit(os),cont) => emitAll(os) +: cont.extend(_.translate(f)) case Step(Await(req,rcv,cln),cont) => Await[G,Any,O](f(req), r => { Trampoline.suspend(rcv(r)).map(_ translate f) }, cln.andThen(_.map(_.translate(f)))) +: cont.extend(_.translate(f)) case hlt@Halt(rsn) => hlt } /** * Remove any leading emitted values from this `Process`. */ @tailrec final def trim: Process[F,O] = this.step match { case Step(Emit(_), cont) => cont.continue.trim case _ => this } /** * Removes all emitted elements from the front of this `Process`. * The second argument returned by this method is guaranteed to be * an `Await`, `Halt` or an `Append`-- if there are multiple `Emit'`s at the * front of this process, the sequences are concatenated together. * * If this `Process` does not begin with an `Emit`, returns the empty * sequence along with `this`. */ final def unemit:(Seq[O],Process[F,O]) = { @tailrec def go(cur: Process[F, O], acc: Vector[O]): (Seq[O], Process[F, O]) = { cur.step match { case Step(Emit(os),cont) => go(cont.continue, acc fast_++ os) case Step(awt, cont) => (acc,awt +: cont) case Halt(rsn) => (acc,Halt(rsn)) } } go(this, Vector()) } final def uncons[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[(O2, Process[F2, O2])] = unconsOption(F, C).flatMap(_.map(F.point[(O2, Process[F2, O2])](_)).getOrElse(C.fail(new NoSuchElementException))) final def unconsOption[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[Option[(O2, Process[F2, O2])]] = step match { case Step(head, next) => head match { case Emit(as) => as.headOption.map(x => F.point[Option[(O2, Process[F2, O2])]](Some((x, Process.emitAll[O2](as drop 1) +: next)))) getOrElse next.continue.unconsOption case await: Await[F2, _, O2] => await.evaluate.flatMap(p => (p +: next).unconsOption(F,C)) } case Halt(cause) => cause match { case End | Kill => F.point(None) case _ : EarlyCause => C.fail(cause.asThrowable) } } /////////////////////////////////////////// // // Interpreters, runXXX // /////////////////////////////////////////// /** * Collect the outputs of this `Process[F,O]` into a Monoid `B`, given a `Monad[F]` in * which we can catch exceptions. This function is not tail recursive and * relies on the `Monad[F]` to ensure stack safety. */ final def runFoldMap[F2[x] >: F[x], B](f: O => B)(implicit F: Monad[F2], C: Catchable[F2], B: Monoid[B]): F2[B] = { def go(cur: Process[F2, O], acc: B): F2[B] = { cur.step match { case s: Step[F2,O]@unchecked => (s.head, s.next) match { case (Emit(os), cont) => F.bind(F.point(os.foldLeft(acc)((b, o) => B.append(b, f(o))))) { nacc => go(cont.continue.asInstanceOf[Process[F2,O]], nacc) } case (awt:Await[F2,Any,O]@unchecked, cont) => awt.evaluate.flatMap(p => go(p +: cont, acc)) } case Halt(End) => F.point(acc) case Halt(Kill) => F.point(acc) case Halt(Error(rsn)) => C.fail(rsn) } } go(this, B.zero) } /** * Collect the outputs of this `Process[F,O]`, given a `Monad[F]` in * which we can catch exceptions. This function is not tail recursive and * relies on the `Monad[F]` to ensure stack safety. */ final def runLog[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[Vector[O2]] = { runFoldMap[F2, Vector[O2]](Vector(_))( F, C, // workaround for performance bug in Vector ++ Monoid.instance[Vector[O2]]((a, b) => a fast_++ b, Vector()) ) } /** Run this `Process` solely for its final emitted value, if one exists. */ final def runLast[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[Option[O2]] = { implicit val lastOpt = new Monoid[Option[O2]] { def zero = None def append(left: Option[O2], right: => Option[O2]) = right orElse left // bias toward the end } this.last.runFoldMap[F2, Option[O2]]({ Some(_) }) } /** Run this `Process` solely for its final emitted value, if one exists, using `o2` otherwise. */ final def runLastOr[F2[x] >: F[x], O2 >: O](o2: => O2)(implicit F: Monad[F2], C: Catchable[F2]): F2[O2] = runLast[F2, O2] map { _ getOrElse o2 } /** Run this `Process`, purely for its effects. */ final def run[F2[x] >: F[x]](implicit F: Monad[F2], C: Catchable[F2]): F2[Unit] = F.void(drain.runLog(F, C)) } object Process extends ProcessInstances { import scalaz.stream.Util._ ////////////////////////////////////////////////////////////////////////////////////// // // Algebra // ///////////////////////////////////////////////////////////////////////////////////// type Trampoline[+A] = scalaz.Free.Trampoline[A] @uncheckedVariance val Trampoline = scalaz.Trampoline /** * Tags a state of process that has no appended tail, tha means can be Halt, Emit or Await */ sealed trait HaltEmitOrAwait[+F[_], +O] extends Process[F, O] object HaltEmitOrAwait { def unapply[F[_], O](p: Process[F, O]): Option[HaltEmitOrAwait[F, O]] = p match { case emit: Emit[O@unchecked] => Some(emit) case halt: Halt => Some(halt) case aw: Await[F@unchecked, _, O@unchecked] => Some(aw) case _ => None } } /** * Marker trait representing process in Emit or Await state. * Is useful for more type safety. */ sealed trait EmitOrAwait[+F[_], +O] extends Process[F, O] /** * The `Halt` constructor instructs the driver * that the last evaluation of Process completed with * supplied cause. */ case class Halt(cause: Cause) extends HaltEmitOrAwait[Nothing, Nothing] with HaltOrStep[Nothing, Nothing] /** * The `Emit` constructor instructs the driver to emit * the given sequence of values to the output * and then halt execution with supplied reason. * * Instead calling this constructor directly, please use one * of the following helpers: * * Process.emit * Process.emitAll */ case class Emit[+O](seq: Seq[O]) extends HaltEmitOrAwait[Nothing, O] with EmitOrAwait[Nothing, O] /** * The `Await` constructor instructs the driver to evaluate * `req`. If it returns successfully, `recv` is called with result on right side * to transition to the next state. * * In case the req terminates with failure the `Error(failure)` is passed on left side * giving chance for any fallback action. * * In case the process was killed before the request is evaluated `Kill` is passed on left side. * `Kill` is passed on left side as well as when the request is already in progress, but process was killed. * * The `preempt` parameter is used when constructing resource and preemption safe cleanups. * See `Process.bracket` for more. * * Note that * * Instead of this constructor directly, please use: * * Process.await or Process.bracket * */ case class Await[+F[_], A, +O]( req: F[A] , rcv: (EarlyCause \/ A) => Trampoline[Process[F, O]] @uncheckedVariance , preempt : A => Trampoline[Process[F,Nothing]] @uncheckedVariance = (_:A) => Trampoline.delay(halt:Process[F,Nothing]) ) extends HaltEmitOrAwait[F, O] with EmitOrAwait[F, O] { /** * Helper to modify the result of `rcv` parameter of await stack-safely on trampoline. */ def extend[F2[x] >: F[x], O2](f: Process[F, O] => Process[F2, O2]): Await[F2, A, O2] = Await[F2, A, O2](req, r => Trampoline.suspend(rcv(r)).map(f), preempt) def evaluate[F2[x] >: F[x], O2 >: O](implicit F: Monad[F2], C: Catchable[F2]): F2[Process[F2,O2]] = C.attempt(req).map { e => rcv(EarlyCause.fromTaskResult(e)).run } } /** * The `Append` constructor instructs the driver to continue with * evaluation of first step found in tail Vector. * * Instead of this constructor please use: * * Process.append */ case class Append[+F[_], +O]( head: HaltEmitOrAwait[F, O] , stack: Vector[Cause => Trampoline[Process[F, O]]] @uncheckedVariance ) extends Process[F, O] { /** * Helper to modify the head and appended processes */ def extend[F2[x] >: F[x], O2](f: Process[F, O] => Process[F2, O2]): Process[F2, O2] = { val ms = stack.map(n => (cause: Cause) => Trampoline.suspend(n(cause)).map(f)) f(head) match { case HaltEmitOrAwait(p) => Append(p, ms) case app: Append[F2@unchecked, O2@unchecked] => Append(app.head, app.stack fast_++ ms) } } } /** * Marker trait representing next step of process or terminated process in `Halt` */ sealed trait HaltOrStep[+F[_], +O] /** * Intermediate step of process. * Used to step within the process to define complex combinators. */ case class Step[+F[_], +O](head: EmitOrAwait[F, O], next: Cont[F, O]) extends HaltOrStep[F, O] { def toProcess : Process[F,O] = Append(head.asInstanceOf[HaltEmitOrAwait[F,O]],next.stack) } /** * Continuation of the process. Represents process _stack_. Used in conjunction with `Step`. */ case class Cont[+F[_], +O](stack: Vector[Cause => Trampoline[Process[F, O]]] @uncheckedVariance) { /** * Prepends supplied process to this stack */ def +:[F2[x] >: F[x], O2 >: O](p: Process[F2, O2]): Process[F2, O2] = prepend(p) /** alias for +: */ def prepend[F2[x] >: F[x], O2 >: O](p: Process[F2, O2]): Process[F2, O2] = { if (stack.isEmpty) p else p match { case app: Append[F2@unchecked, O2@unchecked] => Append[F2, O2](app.head, app.stack fast_++ stack) case emt: Emit[O2@unchecked] => Append(emt, stack) case awt: Await[F2@unchecked, _, O2@unchecked] => Append(awt, stack) case hlt@Halt(_) => Append(hlt, stack) } } /** * Converts this stack to process, that is used * when following process with normal termination. */ def continue: Process[F, O] = prepend(halt) /** * Applies transformation function `f` to all frames of this stack. */ def extend[F2[_], O2](f: Process[F, O] => Process[F2, O2]): Cont[F2, O2] = Cont(stack.map(tf => (cause: Cause) => Trampoline.suspend(tf(cause).map(f)))) /** * Returns true, when this continuation is empty, i.e. no more appends to process */ def isEmpty : Boolean = stack.isEmpty } object Cont { /** empty continuation, that means evaluation is at end **/ val empty:Cont[Nothing,Nothing] = Cont(Vector.empty) } /////////////////////////////////////////////////////////////////////////////////////// // // CONSTRUCTORS // ////////////////////////////////////////////////////////////////////////////////////// /** Alias for emitAll */ def apply[O](o: O*): Process0[O] = emitAll(o) /** * Await the given `F` request and use its result. * If you need to specify fallback, use `awaitOr` */ def await[F[_], A, O](req: F[A])(rcv: A => Process[F, O]): Process[F, O] = awaitOr(req)(Halt.apply)(rcv) /** * Await a request, and if it fails, use `fb` to determine the next state. * Otherwise, use `rcv` to determine the next state. */ def awaitOr[F[_], A, O](req: F[A])(fb: EarlyCause => Process[F, O])(rcv: A => Process[F, O]): Process[F, O] = Await(req,(r: EarlyCause \/ A) => Trampoline.delay(Try(r.fold(fb,rcv)))) /** The `Process1` which awaits a single input, emits it, then halts normally. */ def await1[I]: Process1[I, I] = receive1(emit) /** `Writer` based version of `await1`. */ def await1W[A]: Writer1[Nothing, A, A] = writer.liftO(Process.await1[A]) /** Like `await1`, but consults `fb` when await fails to receive an `I` */ def await1Or[I](fb: => Process1[I, I]): Process1[I, I] = receive1Or(fb)(emit) /** The `Wye` which request from both branches concurrently. */ def awaitBoth[I, I2]: Wye[I, I2, ReceiveY[I, I2]] = await(Both[I, I2])(emit) /** `Writer` based version of `awaitBoth`. */ def awaitBothW[I, I2]: WyeW[Nothing, I, I2, ReceiveY[I, I2]] = writer.liftO(Process.awaitBoth[I, I2]) /** The `Tee` which requests from the left branch, emits this value, then halts. */ def awaitL[I]: Tee[I, Any, I] = await(L[I])(emit) /** `Writer` based version of `awaitL`. */ def awaitLW[I]: TeeW[Nothing, I, Any, I] = writer.liftO(Process.awaitL[I]) /** The `Tee` which requests from the right branch, emits this value, then halts. */ def awaitR[I2]: Tee[Any, I2, I2] = await(R[I2])(emit) /** `Writer` based version of `awaitR`. */ def awaitRW[I2]: TeeW[Nothing, Any, I2, I2] = writer.liftO(Process.awaitR[I2]) /** * Resource and preemption safe `await` constructor. * * Use this combinator, when acquiring resources. This build a process that when run * evaluates `req`, and then runs `rcv`. Once `rcv` is completed, fails, or is interrupted, it will run `release` * * When the acquisition (`req`) is interrupted, neither `release` or `rcv` is run, however when the req was interrupted after * resource in `req` was acquired then, the `release` is run. * * If,the acquisition fails, use `bracket(req)(onPreempt)(rcv).onFailure(err => ???)` code to recover from the * failure eventually. * */ def bracket[F[_], A, O](req: F[A])(release: A => Process[F, Nothing])(rcv: A => Process[F, O]): Process[F, O] = { Await(req, { (r: EarlyCause \/ A) => Trampoline.delay(Try(r.fold(Halt.apply, a => rcv(a) onComplete release(a) ))) }, { a: A => Trampoline.delay(release(a)) }) } /** * The infinite `Process`, always emits `a`. * If for performance reasons it is good to emit `a` in chunks, * specify size of chunk by `chunkSize` parameter */ def constant[A](a: A, chunkSize: Int = 1): Process0[A] = { lazy val go: Process0[A] = if (chunkSize.max(1) == 1) emit(a) ++ go else emitAll(List.fill(chunkSize)(a)) ++ go go } /** The `Process` which emits the single value given, then halts. */ def emit[O](o: O): Process0[O] = Emit(Vector(o)) /** The `Process` which emits the given sequence of values, then halts. */ def emitAll[O](os: Seq[O]): Process0[O] = Emit(os) /** A `Writer` which emits one value to the output. */ def emitO[O](o: O): Process0[Nothing \/ O] = emit(right(o)) /** A `Writer` which writes the given value. */ def emitW[W](s: W): Process0[W \/ Nothing] = emit(left(s)) /** The `Process` which emits no values and halts immediately with the given exception. */ def fail(rsn: Throwable): Process0[Nothing] = Halt(Error(rsn)) /** A `Process` which emits `n` repetitions of `a`. */ def fill[A](n: Int)(a: A, chunkSize: Int = 1): Process0[A] = { val chunkN = chunkSize max 1 val chunk = emitAll(List.fill(chunkN)(a)) // we can reuse this for each step def go(m: Int): Process0[A] = if (m >= chunkN) chunk ++ go(m - chunkN) else if (m <= 0) halt else emitAll(List.fill(m)(a)) go(n max 0) } /** * Produce a continuous stream from a discrete stream by using the * most recent value. */ def forwardFill[A](p: Process[Task, A])(implicit S: Strategy): Process[Task, A] = async.toSignal(p).continuous /** `halt` but with precise type. */ private[stream] val halt0: Halt = Halt(End) /** The `Process` which emits no values and signals normal termination. */ val halt: Process0[Nothing] = halt0 /** Alias for `halt`. */ def empty[F[_],O]: Process[F, O] = halt /** * An infinite `Process` that repeatedly applies a given function * to a start value. `start` is the first value emitted, followed * by `f(start)`, then `f(f(start))`, and so on. */ def iterate[A](start: A)(f: A => A): Process0[A] = emit(start) ++ iterate(f(start))(f) /** * Like [[iterate]], but takes an effectful function for producing * the next state. `start` is the first value emitted. */ def iterateEval[F[_], A](start: A)(f: A => F[A]): Process[F, A] = emit(start) ++ await(f(start))(iterateEval(_)(f)) /** Lazily produce the range `[start, stopExclusive)`. If you want to produce the sequence in one chunk, instead of lazily, use `emitAll(start until stopExclusive)`. */ def range(start: Int, stopExclusive: Int, by: Int = 1): Process0[Int] = unfold(start)(i => if (i < stopExclusive) Some((i, i + by)) else None) /** * Lazily produce a sequence of nonoverlapping ranges, where each range * contains `size` integers, assuming the upper bound is exclusive. * Example: `ranges(0, 1000, 10)` results in the pairs * `(0, 10), (10, 20), (20, 30) ... (990, 1000)` * * Note: The last emitted range may be truncated at `stopExclusive`. For * instance, `ranges(0,5,4)` results in `(0,4), (4,5)`. * * @throws IllegalArgumentException if `size` <= 0 */ def ranges(start: Int, stopExclusive: Int, size: Int): Process0[(Int, Int)] = { require(size > 0, "size must be > 0, was: " + size) unfold(start){ lower => if (lower < stopExclusive) Some((lower -> ((lower+size) min stopExclusive), lower+size)) else None } } /** * The `Process1` which awaits a single input and passes it to `rcv` to * determine the next state. */ def receive1[I, O](rcv: I => Process1[I, O]): Process1[I, O] = await(Get[I])(rcv) /** Like `receive1`, but consults `fb` when it fails to receive an input. */ def receive1Or[I, O](fb: => Process1[I, O])(rcv: I => Process1[I, O]): Process1[I, O] = awaitOr(Get[I])((rsn: EarlyCause) => fb.causedBy(rsn))(rcv) /** * Delay running `p` until `awaken` becomes true for the first time. * The `awaken` process may be discrete. */ def sleepUntil[F[_], A](awaken: Process[F, Boolean])(p: Process[F, A]): Process[F, A] = awaken.dropWhile(!_).once.flatMap(_ => p) /** * A supply of `Long` values, starting with `initial`. * Each read is guaranteed to return a value which is unique * across all threads reading from this `supply`. */ def supply(initial: Long): Process[Task, Long] = { import java.util.concurrent.atomic.AtomicLong val l = new AtomicLong(initial) repeatEval { Task.delay { l.getAndIncrement }} } /** A `Writer` which writes the given value; alias for `emitW`. */ def tell[S](s: S): Process0[S \/ Nothing] = emitW(s) /** Produce a (potentially infinite) source from an unfold. */ def unfold[S, A](s0: S)(f: S => Option[(A, S)]): Process0[A] = { def go(s: S): Process0[A] = f(s) match { case Some((a, sn)) => emit(a) ++ go(sn) case None => halt } suspend(go(s0)) } /** Like [[unfold]], but takes an effectful function. */ def unfoldEval[F[_], S, A](s0: S)(f: S => F[Option[(A, S)]]): Process[F, A] = { def go(s: S): Process[F, A] = await(f(s)) { case Some((a, sn)) => emit(a) ++ go(sn) case None => halt } suspend(go(s0)) } ////////////////////////////////////////////////////////////////////////////////////// // // ENV, Tee, Wye et All // ///////////////////////////////////////////////////////////////////////////////////// case class Env[-I, -I2]() { sealed trait Y[-X] { def tag: Int def fold[R](l: => R, r: => R, both: => R): R } sealed trait T[-X] extends Y[X] sealed trait Is[-X] extends T[X] case object Left extends Is[I] { def tag = 0 def fold[R](l: => R, r: => R, both: => R): R = l } case object Right extends T[I2] { def tag = 1 def fold[R](l: => R, r: => R, both: => R): R = r } case object Both extends Y[ReceiveY[I, I2]] { def tag = 2 def fold[R](l: => R, r: => R, both: => R): R = both } } private val Left_ = Env[Any, Any]().Left private val Right_ = Env[Any, Any]().Right private val Both_ = Env[Any, Any]().Both def Get[I]: Env[I, Any]#Is[I] = Left_ def L[I]: Env[I, Any]#Is[I] = Left_ def R[I2]: Env[Any, I2]#T[I2] = Right_ def Both[I, I2]: Env[I, I2]#Y[ReceiveY[I, I2]] = Both_ ////////////////////////////////////////////////////////////////////////////////////// // // SYNTAX // ///////////////////////////////////////////////////////////////////////////////////// /** Adds syntax for `Channel`. */ implicit def toChannelSyntax[F[_], I, O](self: Channel[F, I, O]): ChannelSyntax[F, I, O] = new ChannelSyntax(self) /** Adds syntax for `Process1`. */ implicit def toProcess1Syntax[I, O](self: Process1[I, O]): Process1Syntax[I, O] = new Process1Syntax(self) /** Adds syntax for `Sink`. */ implicit def toSinkSyntax[F[_], I](self: Sink[F, I]): SinkSyntax[F, I] = new SinkSyntax(self) /** Adds syntax for `Sink` that is specialized for Task. */ implicit def toSinkTaskSyntax[F[_], I](self: Sink[Task, I]): SinkTaskSyntax[I] = new SinkTaskSyntax(self) /** Adds syntax for `Tee`. */ implicit def toTeeSyntax[I, I2, O](self: Tee[I, I2, O]): TeeSyntax[I, I2, O] = new TeeSyntax(self) /** Adds syntax for `Writer`. */ implicit def toWriterSyntax[F[_], W, O](self: Writer[F, W, O]): WriterSyntax[F, W, O] = new WriterSyntax(self) /** Adds syntax for `Writer` that is specialized for Task. */ implicit def toWriterTaskSyntax[W, O](self: Writer[Task, W, O]): WriterTaskSyntax[W, O] = new WriterTaskSyntax(self) /** Adds syntax for `Wye`. */ implicit def toWyeSyntax[I, I2, O](self: Wye[I, I2, O]): WyeSyntax[I, I2, O] = new WyeSyntax(self) implicit class ProcessSyntax[F[_],O](val self: Process[F,O]) extends AnyVal { /** Feed this `Process` through the given effectful `Channel`. */ def through[F2[x]>:F[x],O2](f: Channel[F2,O,O2]): Process[F2,O2] = self.zipWith(f)((o,f) => f(o)).eval onHalt { _.asHalt } // very gross; I don't like this, but not sure what to do /** * Feed this `Process` through the given effectful `Channel`, signaling * termination to `f` via `None`. Useful to allow `f` to flush any * buffered values to the output when it detects termination, see * [[scalaz.stream.io.bufferedChannel]] combinator. */ def throughOption[F2[x]>:F[x],O2](f: Channel[F2,Option[O],O2]): Process[F2,O2] = self.terminated.through(f) /** Attaches `Sink` to this `Process` */ def to[F2[x]>:F[x]](f: Sink[F2,O]): Process[F2,Unit] = through(f) /** Attach a `Sink` to the output of this `Process` but echo the original. */ def observe[F2[x]>:F[x]](f: Sink[F2,O]): Process[F2,O] = self.zipWith(f)((o,f) => (o,f(o))) flatMap { case (orig,action) => eval(action).drain ++ emit(orig) } onHalt { _.asHalt } } /** * Provides infix syntax for `eval: Process[F,F[O]] => Process[F,O]` */ implicit class EvalProcess[F[_], O](val self: Process[F, F[O]]) extends AnyVal { /** * Evaluate the stream of `F` actions produced by this `Process`. * This sequences `F` actions strictly--the first `F` action will * be evaluated before work begins on producing the next `F` * action. To allow for concurrent evaluation, use `sequence` * or `gather`. */ def eval: Process[F, O] = self flatMap { await(_)(emit) } /** * Read chunks of `bufSize` from input, then use `Nondeterminism.gatherUnordered` * to run all these actions to completion. */ def gather(bufSize: Int)(implicit F: Nondeterminism[F]): Process[F,O] = self.pipe(process1.chunk(bufSize)).map(F.gatherUnordered).eval.flatMap(emitAll) /** * Read chunks of `bufSize` from input, then use `Nondeterminism.gather` * to run all these actions to completion and return elements in order. */ def sequence(bufSize: Int)(implicit F: Nondeterminism[F]): Process[F,O] = self.pipe(process1.chunk(bufSize)).map(F.gather).eval.flatMap(emitAll) } /** * This class provides infix syntax specific to `Process0`. */ implicit class Process0Syntax[O](val self: Process0[O]) extends AnyVal { /** Converts this `Process0` to a `Vector`. */ def toVector: Vector[O] = self.unemit match { case (_, Halt(Error(rsn))) => throw rsn case (os, _) => os.toVector } /** Converts this `Process0` to an `IndexedSeq`. */ def toIndexedSeq: IndexedSeq[O] = toVector /** Converts this `Process0` to a `List`. */ def toList: List[O] = toVector.toList /** Converts this `Process0` to a `Seq`. */ def toSeq: Seq[O] = toVector /** Converts this `Process0` to a `Stream`. */ def toStream: Stream[O] = { def go(p: Process0[O]): Stream[O] = p.step match { case s: Step[Nothing, O] => s.head match { case Emit(os) => os.toStream #::: go(s.next.continue) case _ => sys.error("impossible") } case Halt(Error(rsn)) => throw rsn case Halt(_) => Stream.empty } go(self) } /** Converts this `Process0` to a `Map`. */ def toMap[K, V](implicit isKV: O <:< (K, V)): Map[K, V] = toVector.toMap(isKV) /** Converts this `Process0` to a `SortedMap`. */ def toSortedMap[K, V](implicit isKV: O <:< (K, V), ord: Ordering[K]): SortedMap[K, V] = SortedMap(toVector.asInstanceOf[Seq[(K, V)]]: _*) def toSource: Process[Task, O] = self @deprecated("liftIO is deprecated in favor of toSource. It will be removed in a future release.", "0.7") def liftIO: Process[Task, O] = self } /** * Syntax for processes that have its effects wrapped in Task */ implicit class SourceSyntax[O](val self: Process[Task, O]) extends WyeOps[O] { /** converts process to signal **/ def toSignal(implicit S:Strategy):Signal[O] = async.toSignal(self) /** * Produce a continuous stream from a discrete stream by using the * most recent value. */ def forwardFill(implicit S: Strategy): Process[Task, O] = self.toSignal.continuous /** * Returns result of channel evaluation tupled with * original value passed to channel. **/ def observeThrough[O2](ch: Channel[Task, O, O2]): Process[Task, (O, O2)] = { val observerCh = ch map { f => o: O => f(o) map { o2 => o -> o2 } } self through observerCh } /** * Asynchronous stepping of this Process. Note that this method is not resource safe unless * callback is called with _left_ side completed. In that case it is guaranteed that all cleanups * has been successfully completed. * User of this method is responsible for any cleanup actions to be performed by running the * next Process obtained on right side of callback. * * This method returns a function, that when applied, causes the running computation to be interrupted. * That is useful of process contains any asynchronous code, that may be left with incomplete callbacks. * If the evaluation of the process is interrupted, then the interruption is only active if the callback * was not completed before, otherwise interruption is no-op. * * There is chance, that cleanup code of intermediate `Await` will get called twice on interrupt, but * always at least once. The second cleanup invocation in that case may run on different thread, asynchronously. * * Please note that this method is *not* intended for external use! It is the `Await` analogue of `step`, which * is also an internal-use function. * * @param cb result of the asynchronous evaluation of the process. Note that, the callback is never called * on the right side, if the sequence is empty. * @param S Strategy to use when evaluating the process. Note that `Strategy.Sequential` may cause SOE. * @return Function to interrupt the evaluation */ protected[stream] final def stepAsync(cb: Cause \/ (Seq[O], Cont[Task,O]) => Unit)(implicit S: Strategy): EarlyCause => Unit = { val allSteps = Task delay { /* * Represents the running state of the computation. If we're running, then the interrupt * function *for our current step* will be on the left. If we have been interrupted, then * the cause for that interrupt will be on the right. These state transitions are made * atomically, such that it is *impossible* for a task to be running, never interrupted and * to have this value be a right. If the value is a right, then either no task is running * or the running task has received an interrupt. */ val interrupted = new AtomicReference[(() => Unit) \/ EarlyCause](-\/({ () => () })) /* * Produces the evaluation for a single step. Generally, this function will be * invoked only once and return immediately. In the case of an `Await`, we must * descend recursively into the resultant child. Generally speaking, the recursion * should be extremely shallow, since it is uncommon to have a chain of nested * awaits of any significant length (usually they are punctuated by an `Emit`). */ def go(p: Process[Task, O]): Task[EarlyCause => Unit] = Task delay { p.step match { case Halt(cause) => (Task delay { S { cb(-\/(cause)) } }) >> (Task now { _: EarlyCause => () }) case Step(Emit(os), cont) => (Task delay { S { cb(\/-((os, cont))) } }) >> (Task now { _: EarlyCause => () }) case Step(awt: Await[Task, a, O], cont) => { val Await(req, rcv, cln) = awt case class PreStepAbort(c: EarlyCause) extends RuntimeException def unpack(msg: Option[Throwable \/ a]): Option[Process[Task, O]] = msg map { r => Try(rcv(EarlyCause fromTaskResult r).run) } // throws an exception if we're already interrupted (caught in preStep check) def checkInterrupt(int: => (() => Unit)): Task[Unit] = Task delay { interrupted.get() match { case ptr @ -\/(int2) => { if (interrupted.compareAndSet(ptr, -\/(int))) Task now (()) else checkInterrupt(int) } case \/-(c) => Task fail PreStepAbort(c) } } join Task delay { // will be true when we have "committed" to either a mid-step OR exceptional/completed val barrier = new AtomicBoolean(false) // detects what completion/interrupt case we're in and factors out race conditions def handle( // interrupted before the task started running; task never ran! preStep: EarlyCause => Unit, // interrupted *during* the task run; task is probably still running midStep: EarlyCause => Unit, // task finished running, but we were *previously* interrupted postStep: (Process[Task, Nothing], EarlyCause) => Unit, // task finished with an error, but was not interrupted exceptional: Throwable => Unit, // task finished with a value, no errors, no interrupts completed: Process[Task, O] => Unit)(result: Option[Throwable \/ a]): Unit = result match { // interrupted via the `Task.fail` defined in `checkInterrupt` case Some(-\/(PreStepAbort(cause: EarlyCause))) => preStep(cause) case result => { val inter = interrupted.get().toOption assert(!inter.isEmpty || result.isDefined) // interrupted via the callback mechanism, checked in `completeInterruptibly` // always matches to a `None` (we don't have a value yet) inter filter { _ => !result.isDefined } match { case Some(cause) => { if (barrier.compareAndSet(false, true)) { midStep(cause) } else { // task already completed *successfully*, pretend we weren't interrupted at all // our *next* step (which is already running) will get a pre-step interrupt () } } case None => { // completed the task, `interrupted.get()` is defined, and so we were interrupted post-completion // always matches to a `Some` (we always have value) val pc = for { cause <- inter either <- result } yield { either match { case -\/(t) => () // I guess we just swallow the exception here? no idea what to do, since we don't have a handler for this case case \/-(r) => postStep(Try(cln(r).run), cause) // produce the preemption handler, given the resulting resource } } pc match { case Some(back) => back case None => { if (barrier.compareAndSet(false, true)) { result match { // nominally completed the task, but with an exception case Some(-\/(t)) => exceptional(t) case result => { // completed the task, no interrupts, no exceptions, good to go! unpack(result) match { case Some(head) => completed(head +: cont) case None => ??? // didn't match any condition; fail! (probably a double-None bug in completeInterruptibly) } } } } else { result match { case Some(_) => // we detected mid-step interrupt; this needs to transmute to post-step; loop back to the top! handle(preStep = preStep, midStep = midStep, postStep = postStep, exceptional = exceptional, completed = completed)(result) case None => ??? // wtf?! (apparently we were called twice with None; bug in completeInterruptibly) } } } } } } } } /* * Start the task. per the `completeInterruptibly` invariants, the callback will be invoked exactly once * unless interrupted in the final computation step, in which case it will be invoked twice: once with * the interrupt signal and once with the final computed result (this case is detected by the `PostStep`) * extractor. Under all other circumstances, including interrupts, exceptions and natural completion, the * callback will be invoked exactly once. */ lazy val interrupt: () => Unit = completeInterruptibly((checkInterrupt(interrupt) >> req).get)(handle( preStep = { cause => // interrupted; now drain (Try(rcv(-\/(cause)).run) +: cont).drain.run runAsync { case -\/(t) => S { cb(-\/(Error(t) causedBy cause)) } case \/-(_) => S { cb(-\/(cause)) } } }, midStep = { cause => // interrupted; now drain (Try(rcv(-\/(cause)).run) +: cont).drain.run runAsync { case -\/(t) => S { cb(-\/(Error(t) causedBy cause)) } case \/-(_) => S { cb(-\/(cause)) } } }, postStep = { (inner, _) => inner.run runAsync { _ => () } // invoke the cleanup }, exceptional = { t => // we got an exception (not an interrupt!) and we need to drain everything go(Try(rcv(-\/(Error(t))).run) +: cont) runAsync { _ => () } }, completed = { continuation => go(continuation) runAsync { _ => () } } )) interrupt // please don't delete this! highly mutable code within // interrupts the current step (may be a recursive child!) and sets `interrupted` def referencedInterrupt(cause: EarlyCause): Unit = { interrupted.get() match { case ptr @ -\/(int) => { if (interrupted.compareAndSet(ptr, \/-(cause))) { int() } else { referencedInterrupt(cause) } } case \/-(_) => () // interrupted a second (or more) time; discard later causes and keep the first one } } referencedInterrupt _ } } } } join go _ } allSteps flatMap { _(self) } run // hey, we could totally return something sane here! what up? } /** * Analogous to Future#listenInterruptibly, but guarantees listener notification provided that the * body of any given computation step does not block indefinitely. When the interrupt function is * invoked, the callback will be immediately invoked, either with an available completion value or * with None. If the current step of the task ultimately completes with its *final* value (i.e. * the final step of the task is an Async and it starts before the interrupt and completes *afterwards*), * that value will be passed to the callback as a second return. Thus, the callback will always be * invoked at least once, and may be invoked twice. If it is invoked twice, the first callback * will always be None while the second will be Some. * * */ private def completeInterruptibly[A](f: Future[A])(cb: Option[A] => Unit)(implicit S: Strategy): () => Unit = { import Future._ val cancel = new AtomicBoolean(false) // `cb` is run exactly once or twice // Case A) `cb` is run with `None` followed by `Some` if we were cancelled but still obtained a value. // Case B) `cb` is run with just `Some` if it's never cancelled. // Case C) `cb` is run with just `None` if it's cancelled before a value is even attempted. // Case D) the same as case A, but in the opposite order, only in very rare cases lazy val actor: Actor[Option[Future[A]]] = new Actor[Option[Future[A]]]({ // pure cases case Some(Suspend(thunk)) if !cancel.get() => actor ! Some(thunk()) case Some(BindSuspend(thunk, g)) if !cancel.get() => actor ! Some(thunk() flatMap g) case Some(Now(a)) => S { cb(Some(a)) } case Some(Async(onFinish)) if !cancel.get() => { onFinish { a => Trampoline delay { S { cb(Some(a)) } } } } case Some(BindAsync(onFinish, g)) if !cancel.get() => { onFinish { a => if (!cancel.get()) { Trampoline delay { g(a) } map { r => actor ! Some(r) } } else { // here we drop `a` on the floor Trampoline done { () } // `cb` already run with `None` } } } // fallthrough case where cancel.get() == true case Some(_) => () // `cb` already run with `None` case None => { cancel.set(true) // the only place where `cancel` is set to `true` S { cb(None) } } }) S { actor ! Some(f) } { () => actor ! None } } } ////////////////////////////////////////////////////////////////////////////////////// // // SYNTAX Functions // ///////////////////////////////////////////////////////////////////////////////////// /** * Alias for await(fo)(emit) */ def eval[F[_], O](fo: F[O]): Process[F, O] = await(fo)(emit) /** * Evaluate an arbitrary effect once, purely for its effects, * ignoring its return value. This `Process` emits no values. */ def eval_[F[_], O](f: F[O]): Process[F, Nothing] = eval(f).drain /** Prefix syntax for `p.repeat`. */ def repeat[F[_], O](p: Process[F, O]): Process[F, O] = p.repeat /** * Evaluate an arbitrary effect in a `Process`. The resulting `Process` will emit values * until an error occurs. * */ def repeatEval[F[_], O](fo: F[O]): Process[F, O] = eval(fo).repeat /** * Produce `p` lazily. Useful if producing the process involves allocation of * some local mutable resource we want to ensure is freshly allocated * for each consumer of `p`. * * Note that this implementation assures that: * {{{ * suspend(p).kill === suspend(p.kill) * suspend(p).kill === p.kill * * suspend(p).repeat === suspend(p.repeat) * suspend(p).repeat === p.repeat * * suspend(p).eval === suspend(p.eval) * suspend(p).eval === p.eval * * Halt(cause) ++ suspend(p) === Halt(cause) ++ p * }}} * */ def suspend[F[_], O](p: => Process[F, O]): Process[F, O] = Append(halt0,Vector({ case End => Trampoline.done(p) case early: EarlyCause => Trampoline.done(p.injectCause(early)) })) }
shawjef3/scalaz-stream
src/main/scala/scalaz/stream/Process.scala
Scala
mit
59,169
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs // License: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.server import java.io.{ FileOutputStream, PrintStream } import java.net.InetSocketAddress import java.nio.file.Paths import scala.concurrent.Await import scala.concurrent.duration._ import scala.util._ import akka.actor._ import akka.actor.SupervisorStrategy.Stop import com.typesafe.config._ import io.netty.channel.Channel import org.ensime.api._ import org.ensime.config._ import org.ensime.config.richconfig._ import org.ensime.core._ import org.ensime.lsp.ensime.EnsimeLanguageServer import org.ensime.util.Slf4jSetup import org.ensime.api.EnsimeFile.Implicits.DefaultCharset import org.ensime.util.path._ import org.slf4j._ class ServerActor( config: EnsimeConfig, serverConfig: EnsimeServerConfig ) extends Actor with ActorLogging { var channel: Channel = _ override val supervisorStrategy = OneForOneStrategy() { case ex: Exception => log.error(ex, s"Error with monitor actor ${ex.getMessage}") self ! ShutdownRequest( s"Monitor actor failed with ${ex.getClass} - ${ex.toString}", isError = true ) Stop } def initialiseChildren(): Unit = { implicit val config: EnsimeConfig = this.config implicit val serverConfig: EnsimeServerConfig = this.serverConfig val broadcaster = context.actorOf(Broadcaster(), "broadcaster") val project = context.actorOf(Project(broadcaster), "project") // async start the HTTP Server val selfRef = self val preferredHttpPort = PortUtil.port(config.cacheDir.file, "http") val hookHandlers: WebServer.HookHandlers = { outHandler => val delegate = context.actorOf(Props(new Actor { def receive: Receive = { case res: RpcResponseEnvelope => outHandler(res) } })) val inHandler = context.actorOf(ConnectionHandler(project, broadcaster, delegate)) { req => req.fold(err => inHandler ! err, msg => inHandler ! msg) } } val docs = DocJarReading.forConfig(config) WebServer .start(docs, preferredHttpPort.getOrElse(0), hookHandlers) .onComplete { case Failure(ex) => log.error(ex, s"Error binding http endpoint ${ex.getMessage}") selfRef ! ShutdownRequest( s"http endpoint failed to bind ($preferredHttpPort)", isError = true ) case Success(ch) => this.channel = ch log.info(s"ENSIME HTTP on ${ch.localAddress()}") try { val port = ch.localAddress().asInstanceOf[InetSocketAddress].getPort() PortUtil.writePort(config.cacheDir.file, port, "http") } catch { case ex: Throwable => log.error(ex, s"Error initializing http endpoint ${ex.getMessage}") selfRef ! ShutdownRequest( s"http endpoint failed to initialise: ${ex.getMessage}", isError = true ) } }(context.system.dispatcher) Environment.info foreach log.info } override def preStart(): Unit = try { initialiseChildren() } catch { case t: Throwable => log.error(t, s"Error during startup - ${t.getMessage}") self ! ShutdownRequest(t.toString, isError = true) } override def receive: Receive = { case req: ShutdownRequest => triggerShutdown(req) } def triggerShutdown(request: ShutdownRequest): Unit = Server.shutdown(context.system, channel, request, serverConfig.exit) } object ServerActor { def props()(implicit ensimeConfig: EnsimeConfig, serverConfig: EnsimeServerConfig): Props = Props(new ServerActor(ensimeConfig, serverConfig)) } object Server { Slf4jSetup.init() val log = LoggerFactory.getLogger("Server") // Config is loaded in this order: // // 1. system properties // 2. .ensime-server.conf beside .ensime // 3. .ensime-server.conf in the XDG / user.home // 4. bundled application.conf def loadConfig(): Config = { val fallback = ConfigFactory.load() val user = List( parseServerConfig(fallback).config.file.getParent, Paths.get( sys.env.get("XDG_CONFIG_HOME").getOrElse(sys.props("user.home")) ) ).map(_ / ".ensime-server.conf") .filter(_.exists()) .map(p => ConfigFactory.parseFile(p.toFile)) (ConfigFactory.systemProperties() :: user ::: fallback :: Nil).reduce { (higher, lower) => higher.withFallback(lower) } } def startRegularServer(): Unit = { val config = loadConfig() implicit val serverConfig: EnsimeServerConfig = parseServerConfig(config) EnsimeConfigProtocol.parse(serverConfig.config.file.readString()) match { case Right(cfg) => implicit val ensimeConfig: EnsimeConfig = cfg ActorSystem .create("ENSIME", config) .actorOf(ServerActor.props(), "ensime-main") case Left(err) => System.err.println(s"Failed to parse .ensime $err") } } def startLspServer(): Unit = { val cwd = Option(System.getProperty("lsp.workspace")).getOrElse(".") val server = new EnsimeLanguageServer(System.in, System.out) // route System.out somewhere else. The presentation compiler may spit out text // and that confuses VScode, since stdout is used for the language server protocol val origOut = System.out try { System.setOut( new PrintStream(new FileOutputStream(s"$cwd/pc.stdout.log")) ) System.setErr( new PrintStream(new FileOutputStream(s"$cwd/pc.stdout.log")) ) log.info("This file contains stdout from the presentation compiler.") log.info(s"Starting server in $cwd") log.info(s"Classpath: ${Properties.javaClassPath}") server.start() } finally { System.setOut(origOut) } // make sure we actually exit System.exit(0) } def main(args: Array[String]): Unit = if (args.contains("--lsp")) { startLspServer() } else { startRegularServer() } def shutdown(system: ActorSystem, channel: Channel, request: ShutdownRequest, exit: Boolean): Unit = { val t = new Thread(new Runnable { def run(): Unit = { if (request.isError) log.error( s"Shutdown requested due to internal error: ${request.reason}" ) else log.info(s"Shutdown requested: ${request.reason}") log.info("Shutting down the ActorSystem") Try(system.terminate()) log.info("Awaiting actor system termination") Try(Await.result(system.whenTerminated, Duration.Inf)) log.info("Shutting down the Netty channel") Try(channel.close().sync()) log.info("Shutdown complete") if (exit) { if (request.isError) System.exit(1) else System.exit(0) } } }) t.setName("Server Shutdown") t.setDaemon(true) t.start() } }
ensime/ensime-server
server/src/main/scala/org/ensime/server/Server.scala
Scala
gpl-3.0
7,213
/** * See <a href="https://www.codeeval.com/open_challenges/6/">Longest common sequence</a> */ object CommonSequence extends Challenge { val lines = scala.io.Source.fromFile(args(0)).getLines().filter(_.length > 0) lines.collect { case Input(left, right) => eval(left, right, Nil) } foreach { result => println(result.reverse.mkString) } def eval(in1: List[Char], in2: List[Char], res: List[Char]): List[Char] = in1 match { case Nil => res case x :: xs => val i = in2.indexOf(x) val s1 = if (i >= 0) eval(xs, in2.drop(i + 1), x :: res) else Nil val s2 = eval(xs, in2, res) if (s1.size > s2.size) s1 else s2 } object Input { // XMJYAUZ;MZJAWXU def unapply(line: String) = line.split(";").toList match { case left :: right :: Nil => Some(left.toList, right.toList) case _ => None } } }
zelca/codeeval
src/CommonSequence.scala
Scala
mit
892
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.scalajs object LinkingInfo { import scala.scalajs.runtime.linkingInfo /** Returns true if we are linking for production, false otherwise. * * `productionMode` is always equal to `!developmentMode`. * * This ends up being constant-folded to a constant at link-time. So * constant-folding, inlining, and other local optimizations can be * leveraged with this "constant" to write code that should only be * executed in production mode or development mode. * * A typical usage of this method is: * {{{ * val warningsLogger = * if (productionMode) new NullLogger * else new ConsoleLogger * }}} * * At link-time, `productionMode` will either be a constant true, in which * case the above snippet folds into * {{{ * val warningsLogger = new NullLogger * }}} * or a constant false, in which case it folds into * {{{ * val warningsLogger = new ConsoleLogger. * }}} * * @see [[developmentMode]] */ @inline def productionMode: Boolean = linkingInfo.productionMode /** Returns true if we are linking for development, false otherwise. * * `developmentMode` is always equal to `!productionMode`. * * This ends up being constant-folded to a constant at link-time. So * constant-folding, inlining, and other local optimizations can be * leveraged with this "constant" to write code that should only be * executed in production mode or development mode. * * A typical usage of this method is: * {{{ * if (developmentMode) { * performExpensiveSanityChecks() * } * }}} * * At link-time, `developmentMode` will either be a constant true, in which * case the above snippet folds into * {{{ * performExpensiveSanityChecks() * }}} * or a constant false, in which case it is dead-code-eliminated away, * yielding maximum performance in production. * * @see [[productionMode]] */ @inline def developmentMode: Boolean = !productionMode /** Version (edition) of the ECMAScript Language Specification that is * assumed to be supported by the runtime. * * This is an integer that represents the *edition* of the ECMAScript * Language Specification. For example, ECMAScript 2015 is represented with * the value `6`. * * As an exception, ECMAScript 5.1 is represented with the value `5`. * * This value can be used to: * * - avoid feature tests and dead-code-eliminate polyfills (see below), or * - conditionally offer library features that depend on underlying * ECMAScript support. * * --- * * This ends up being constant-folded to a constant at link-time. So * constant-folding, inlining, and other local optimizations can be * leveraged with this "constant" to write polyfills that can be * dead-code-eliminated. * * A typical usage of this method is: * {{{ * if (esVersion >= ESVersion.ES2018 || featureTest()) * useES2018Feature() * else * usePolyfill() * }}} * * At link-time, `esVersion` will either be a constant less than * `ESVersion.ES2018`, in which case the above snippet folds into * {{{ * if (featureTest()) * useES2018Feature() * else * usePolyfill() * }}} * or a constant greater or equal to `ESVersion.ES2018`, in which case it * folds into * {{{ * useES2018Feature() * }}} */ @inline def esVersion: Int = linkingInfo.esVersion /** Returns true if we are assuming that the target platform supports * ECMAScript 6, false otherwise. * * This is `true` if and only if `esVersion >= ESVersion.ES2015`. * * --- * * This ends up being constant-folded to a constant at link-time. So * constant-folding, inlining, and other local optimizations can be * leveraged with this "constant" to write polyfills that can be * dead-code-eliminated. * * A typical usage of this method is: * {{{ * if (assumingES6 || featureTest()) * useES6Feature() * else * usePolyfill() * }}} * * At link-time, `assumingES6` will either be a constant false, in which * case the above snippet folds into * {{{ * if (featureTest()) * useES6Feature() * else * usePolyfill() * }}} * or a constant true, in which case it folds into * {{{ * useES6Feature() * }}} */ @deprecated("use esVersion >= ESVersion.ES2015 instead", "1.6.0") @inline def assumingES6: Boolean = esVersion >= ESVersion.ES2015 /** Whether Scala.js language features use ECMAScript 2015 (edition 6) * semantics or not. * * When `true`, the following semantics apply: * * - JavaScript classes are true `class`'es, therefore a) they can extend * native JavaScript `class`'es and b) they inherit static members from * their parent class. * - Lambdas for `js.Function`s that are not also `js.ThisFunction`s are * JavaScript arrow functions (`=>`). Lambdas for `js.ThisFunction`s are * `function` functions. * - Throwable classes are proper JavaScript error classes, recognized as * such by debuggers. * - In Script (`NoModule`) mode, top-level exports are defined as `let`s. * * When `false`, the following semantics apply: * * - All classes defined in Scala.js are `function`s instead of `class`'es. * Non-native JS classes cannot extend native JS `class`'es and they do * not inherit static members from their parent class. * - All lambdas for `js.Function`s are `function`s. * - Throwable classes have JavaScript's `Error.prototype` in their * prototype chain, but they are not considered proper error classes. * - In Script (`NoModule`) mode, top-level exports are defined as `var`s. * * Prefer reading this value instead of `esVersion` to determine which * semantics apply. * * For example, it can be used in tests whose results depend on which * semantics are used. * * --- * * This ends up being constant-folded to a constant at link-time. So * constant-folding, inlining, and other local optimizations can be * leveraged with this "constant" to write alternatives that can be * dead-code-eliminated. * * A typical usage of this method is: * {{{ * if (useECMAScript2015Semantics) * implementationWithES2015Semantics() * else * implementationWithoutES2015Semantics() * }}} * * At link-time, `useECMAScript2015Semantics` will either be a constant * true, in which case the above snippet folds into * {{{ * implementationWithES2015Semantics() * }}} * or a constant false, in which case it folds into * {{{ * implementationWithoutES2015Semantics() * }}} */ @inline def useECMAScript2015Semantics: Boolean = linkingInfo.assumingES6 // name mismatch for historical reasons /** Constants for the value of `esVersion`. */ object ESVersion { /** ECMAScrîpt 5.1. */ final val ES5_1 = 5 /** ECMAScript 2015 (6th edition). */ final val ES2015 = 6 /** ECMAScript 2016 (7th edition). * * Contains the following notable features: * * - The `**` operator for numbers * - `async`/`await` */ final val ES2016 = 7 /** ECMAScript 2017 (8th edition). * * Contains the following notable features: * * - Async functions * - Shared Memory and Atomics (via `SharedArrayBuffer`) * - `Object.values`, `Object.entries`, and `Object.getOwnPropertyDescriptors` */ final val ES2017 = 8 /** ECMAScript 2018 (9th edition). * * Contains the following notable features: * * - Asynchronous iteration via the `AsyncIterator` protocol and async generators * - Regular expression features: the dotAll flag `'s'`, named capture groups, * Unicode property escapes (`\\p{}` and `\\P{}`) and look-behind assertions * - Rest parameter and spread operator support for object properties */ final val ES2018 = 9 /** ECMAScript 2019 (10th edition). * * Contains the following notable features: * * - Minor additions to the built-in library functions */ final val ES2019 = 10 /** ECMAScript 2020 (11th edition). * * Contains the following notable features: * * - Dynamic `import()` calls * - `BigInt` * - `globalThis` * - `export * as ns from 'module'` * - `import.meta` */ final val ES2020 = 11 /** ECMAScript 2021 (12th edition). * * Contains the following notable features: * * - `WeakRef` and `FinalizationRegistry` * - `AggregateError` * - Separators for numeric literals (e.g., `1_000`) */ final val ES2021 = 12 } }
scala-js/scala-js
library/src/main/scala/scala/scalajs/LinkingInfo.scala
Scala
apache-2.0
9,189
package utils import javax.inject.Inject import play.api.http.HttpFilters import play.api.mvc.EssentialFilter /** * Provides filters. */ //class Filters @Inject() (csrfFilter: play.filters.csrf.CSRFFilter, securityHeadersFilter: SecurityHeadersFilter) extends HttpFilters { class Filters @Inject() (csrfFilter: play.filters.csrf.CSRFFilter, noCacheFilter: NoCacheFilter) extends HttpFilters { override def filters = Seq(csrfFilter, noCacheFilter) //, securityHeadersFilter }
scify/DemocracIT-Web
app/utils/Filters.scala
Scala
apache-2.0
483
/* ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================================================================== */ package org.orbeon.oxf.fr.excel import java.{lang => jl} /** * Excel converts numbers to text with different rules to those of java, so * `Double.toString(value)` won't do. * * - No more than 15 significant figures are output (java does 18). * - The sign char for the exponent is included even if positive * - Special values (`NaN` and `Infinity`) get rendered like the ordinary * number that the bit pattern represents. * - Denormalized values (between ±2<sup>-1074</sup> and ±2<sup>-1022</sup> * are displayed as "0" * * See tests for IEEE 64-bit Double Rendering Comparison. * * *Note*: * * Excel has inconsistent rules for the following numeric operations: * * - Conversion to string (as handled here) * - Rendering numerical quantities in the cell grid. * - Conversion from text * - General arithmetic * * Excel's text to number conversion is not a true *inverse* of this operation. The * allowable ranges are different. Some numbers that don't correctly convert to text actually * __do__ get handled properly when used in arithmetic evaluations. */ object NumberToTextConverter { import Private._ /** * Converts the supplied <tt>value</tt> to the text representation that Excel would give if * the value were to appear in an unformatted cell, or as a literal number in a formula.<br> * Note - the results from this method differ slightly from those of <tt>Double.toString()</tt> * In some special cases Excel behaves quite differently. This function attempts to reproduce * those results. */ def toText(value: Double): String = rawDoubleBitsToText(java.lang.Double.doubleToLongBits(value)) def rawDoubleBitsToText(pRawBits: Long): String = { var rawBits = pRawBits var isNegative = rawBits < 0 // sign bit is in the same place for long and double if (isNegative) rawBits &= 0x7FFFFFFFFFFFFFFFL if (rawBits == 0) return if (isNegative) "-0" else "0" val ed = ExpandedDouble(rawBits) if (ed.binaryExponent < -1022) { // value is 'denormalised' which means it is less than 2^-1022 // excel displays all these numbers as zero, even though calculations work OK return if (isNegative) "-0" else "0" } if (ed.binaryExponent == 1024) { // Special number NaN /Infinity // Normally one would not create HybridDecimal objects from these values // except in these cases Excel really tries to render them as if they were normal numbers if (rawBits == ExcelNaNBits) return "3.484840871308E+308" // This is where excel really gets it wrong // Special numbers like Infinity and NaN are interpreted according to // the standard rules below. isNegative = false // except that the sign bit is ignored } val nd = ed.normaliseBaseTen val sb = new jl.StringBuilder(MaxTextLen + 1) if (isNegative) sb.append('-') convertToText(sb, nd) sb.toString } private object Private { val ExcelNaNBits = 0xFFFF0420003C0000L val MaxTextLen = 20 def convertToText(sb: jl.StringBuilder, pnd: NormalisedDecimal): Unit = { val rnd = pnd.roundUnits var decExponent = rnd.getDecimalExponent val decimalDigits = if (Math.abs(decExponent) > 98) { val r = rnd.getSignificantDecimalDigitsLastDigitRounded if (r.length == 16) { // rounding caused carry decExponent += 1 } r } else rnd.getSignificantDecimalDigits val countSigDigits = countSignificantDigits(decimalDigits) if (decExponent < 0) formatLessThanOne(sb, decimalDigits, decExponent, countSigDigits) else formatGreaterThanOne(sb, decimalDigits, decExponent, countSigDigits) } def formatLessThanOne(sb: jl.StringBuilder, decimalDigits: String, decExponent: Int, countSigDigits: Int): Unit = { val nLeadingZeros = -decExponent - 1 val normalLength = 2 + nLeadingZeros + countSigDigits // 2 == "0.".length() if (needsScientificNotation(normalLength)) { sb.append(decimalDigits.charAt(0)) if (countSigDigits > 1) { sb.append('.') sb.append(decimalDigits.subSequence(1, countSigDigits)) } sb.append("E-") appendExp(sb, -decExponent) return } sb.append("0.") for (_ <- nLeadingZeros until 0 by -1) sb.append('0') sb.append(decimalDigits.subSequence(0, countSigDigits)) } def formatGreaterThanOne(sb: jl.StringBuilder, decimalDigits: String, decExponent: Int, countSigDigits: Int): Unit = { if (decExponent > 19) { // scientific notation sb.append(decimalDigits.charAt(0)) if (countSigDigits > 1) { sb.append('.') sb.append(decimalDigits.subSequence(1, countSigDigits)) } sb.append("E+") appendExp(sb, decExponent) return } val nFractionalDigits = countSigDigits - decExponent - 1 if (nFractionalDigits > 0) { sb.append(decimalDigits.subSequence(0, decExponent + 1)) sb.append('.') sb.append(decimalDigits.subSequence(decExponent + 1, countSigDigits)) return } sb.append(decimalDigits.subSequence(0, countSigDigits)) for (_ <- -nFractionalDigits until 0 by -1) sb.append('0') } def needsScientificNotation(nDigits: Int): Boolean = nDigits > MaxTextLen def countSignificantDigits(sb: String): Int = { var result = sb.length - 1 while (sb.charAt(result) == '0') { result -= 1 if (result < 0) throw new RuntimeException("No non-zero digits found") } result + 1 } def appendExp(sb: jl.StringBuilder, v: Int): Unit = if (v < 10) { sb.append('0') sb.append(('0' + v).toChar) } else { sb.append(v) } } }
orbeon/orbeon-forms
form-runner/jvm/src/main/scala/org/orbeon/oxf/fr/excel/NumberToTextConverter.scala
Scala
lgpl-2.1
6,861
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package spark.mllib.clustering import scala.collection.mutable.ArrayBuffer import scala.util.Random import spark.{SparkContext, RDD} import spark.SparkContext._ import spark.Logging import spark.mllib.util.MLUtils import org.jblas.DoubleMatrix /** * K-means clustering with support for multiple parallel runs and a k-means++ like initialization * mode (the k-means|| algorithm by Bahmani et al). When multiple concurrent runs are requested, * they are executed together with joint passes over the data for efficiency. * * This is an iterative algorithm that will make multiple passes over the data, so any RDDs given * to it should be cached by the user. */ class KMeans private ( var k: Int, var maxIterations: Int, var runs: Int, var initializationMode: String, var initializationSteps: Int, var epsilon: Double) extends Serializable with Logging { private type ClusterCenters = Array[Array[Double]] def this() = this(2, 20, 1, KMeans.K_MEANS_PARALLEL, 5, 1e-4) /** Set the number of clusters to create (k). Default: 2. */ def setK(k: Int): KMeans = { this.k = k this } /** Set maximum number of iterations to run. Default: 20. */ def setMaxIterations(maxIterations: Int): KMeans = { this.maxIterations = maxIterations this } /** * Set the initialization algorithm. This can be either "random" to choose random points as * initial cluster centers, or "k-means||" to use a parallel variant of k-means++ * (Bahmani et al., Scalable K-Means++, VLDB 2012). Default: k-means||. */ def setInitializationMode(initializationMode: String): KMeans = { if (initializationMode != KMeans.RANDOM && initializationMode != KMeans.K_MEANS_PARALLEL) { throw new IllegalArgumentException("Invalid initialization mode: " + initializationMode) } this.initializationMode = initializationMode this } /** * Set the number of runs of the algorithm to execute in parallel. We initialize the algorithm * this many times with random starting conditions (configured by the initialization mode), then * return the best clustering found over any run. Default: 1. */ def setRuns(runs: Int): KMeans = { if (runs <= 0) { throw new IllegalArgumentException("Number of runs must be positive") } this.runs = runs this } /** * Set the number of steps for the k-means|| initialization mode. This is an advanced * setting -- the default of 5 is almost always enough. Default: 5. */ def setInitializationSteps(initializationSteps: Int): KMeans = { if (initializationSteps <= 0) { throw new IllegalArgumentException("Number of initialization steps must be positive") } this.initializationSteps = initializationSteps this } /** * Set the distance threshold within which we've consider centers to have converged. * If all centers move less than this Euclidean distance, we stop iterating one run. */ def setEpsilon(epsilon: Double): KMeans = { this.epsilon = epsilon this } /** * Train a K-means model on the given set of points; `data` should be cached for high * performance, because this is an iterative algorithm. */ def train(data: RDD[Array[Double]]): KMeansModel = { // TODO: check whether data is persistent; this needs RDD.storageLevel to be publicly readable val sc = data.sparkContext val centers = if (initializationMode == KMeans.RANDOM) { initRandom(data) } else { initKMeansParallel(data) } val active = Array.fill(runs)(true) val costs = Array.fill(runs)(0.0) var activeRuns = new ArrayBuffer[Int] ++ (0 until runs) var iteration = 0 // Execute iterations of Lloyd's algorithm until all runs have converged while (iteration < maxIterations && !activeRuns.isEmpty) { type WeightedPoint = (DoubleMatrix, Long) def mergeContribs(p1: WeightedPoint, p2: WeightedPoint): WeightedPoint = { (p1._1.addi(p2._1), p1._2 + p2._2) } val activeCenters = activeRuns.map(r => centers(r)).toArray val costAccums = activeRuns.map(_ => sc.accumulator(0.0)) // Find the sum and count of points mapping to each center val totalContribs = data.mapPartitions { points => val runs = activeCenters.length val k = activeCenters(0).length val dims = activeCenters(0)(0).length val sums = Array.fill(runs, k)(new DoubleMatrix(dims)) val counts = Array.fill(runs, k)(0L) for (point <- points; (centers, runIndex) <- activeCenters.zipWithIndex) { val (bestCenter, cost) = KMeans.findClosest(centers, point) costAccums(runIndex) += cost sums(runIndex)(bestCenter).addi(new DoubleMatrix(point)) counts(runIndex)(bestCenter) += 1 } val contribs = for (i <- 0 until runs; j <- 0 until k) yield { ((i, j), (sums(i)(j), counts(i)(j))) } contribs.iterator }.reduceByKey(mergeContribs).collectAsMap() // Update the cluster centers and costs for each active run for ((run, i) <- activeRuns.zipWithIndex) { var changed = false for (j <- 0 until k) { val (sum, count) = totalContribs((i, j)) if (count != 0) { val newCenter = sum.divi(count).data if (MLUtils.squaredDistance(newCenter, centers(run)(j)) > epsilon * epsilon) { changed = true } centers(run)(j) = newCenter } } if (!changed) { active(run) = false logInfo("Run " + run + " finished in " + (iteration + 1) + " iterations") } costs(run) = costAccums(i).value } activeRuns = activeRuns.filter(active(_)) iteration += 1 } val bestRun = costs.zipWithIndex.min._2 new KMeansModel(centers(bestRun)) } /** * Initialize `runs` sets of cluster centers at random. */ private def initRandom(data: RDD[Array[Double]]): Array[ClusterCenters] = { // Sample all the cluster centers in one pass to avoid repeated scans val sample = data.takeSample(true, runs * k, new Random().nextInt()) Array.tabulate(runs)(r => sample.slice(r * k, (r + 1) * k)) } /** * Initialize `runs` sets of cluster centers using the k-means|| algorithm by Bahmani et al. * (Bahmani et al., Scalable K-Means++, VLDB 2012). This is a variant of k-means++ that tries * to find with dissimilar cluster centers by starting with a random center and then doing * passes where more centers are chosen with probability proportional to their squared distance * to the current cluster set. It results in a provable approximation to an optimal clustering. * * The original paper can be found at http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf. */ private def initKMeansParallel(data: RDD[Array[Double]]): Array[ClusterCenters] = { // Initialize each run's center to a random point val seed = new Random().nextInt() val sample = data.takeSample(true, runs, seed) val centers = Array.tabulate(runs)(r => ArrayBuffer(sample(r))) // On each step, sample 2 * k points on average for each run with probability proportional // to their squared distance from that run's current centers for (step <- 0 until initializationSteps) { val centerArrays = centers.map(_.toArray) val sumCosts = data.flatMap { point => for (r <- 0 until runs) yield (r, KMeans.pointCost(centerArrays(r), point)) }.reduceByKey(_ + _).collectAsMap() val chosen = data.mapPartitionsWithIndex { (index, points) => val rand = new Random(seed ^ (step << 16) ^ index) for { p <- points r <- 0 until runs if rand.nextDouble() < KMeans.pointCost(centerArrays(r), p) * 2 * k / sumCosts(r) } yield (r, p) }.collect() for ((r, p) <- chosen) { centers(r) += p } } // Finally, we might have a set of more than k candidate centers for each run; weigh each // candidate by the number of points in the dataset mapping to it and run a local k-means++ // on the weighted centers to pick just k of them val centerArrays = centers.map(_.toArray) val weightMap = data.flatMap { p => for (r <- 0 until runs) yield ((r, KMeans.findClosest(centerArrays(r), p)._1), 1.0) }.reduceByKey(_ + _).collectAsMap() val finalCenters = (0 until runs).map { r => val myCenters = centers(r).toArray val myWeights = (0 until myCenters.length).map(i => weightMap.getOrElse((r, i), 0.0)).toArray LocalKMeans.kMeansPlusPlus(r, myCenters, myWeights, k, 30) } finalCenters.toArray } } /** * Top-level methods for calling K-means clustering. */ object KMeans { // Initialization mode names val RANDOM = "random" val K_MEANS_PARALLEL = "k-means||" def train( data: RDD[Array[Double]], k: Int, maxIterations: Int, runs: Int, initializationMode: String) : KMeansModel = { new KMeans().setK(k) .setMaxIterations(maxIterations) .setRuns(runs) .setInitializationMode(initializationMode) .train(data) } def train(data: RDD[Array[Double]], k: Int, maxIterations: Int, runs: Int): KMeansModel = { train(data, k, maxIterations, runs, K_MEANS_PARALLEL) } def train(data: RDD[Array[Double]], k: Int, maxIterations: Int): KMeansModel = { train(data, k, maxIterations, 1, K_MEANS_PARALLEL) } /** * Return the index of the closest point in `centers` to `point`, as well as its distance. */ private[mllib] def findClosest(centers: Array[Array[Double]], point: Array[Double]) : (Int, Double) = { var bestDistance = Double.PositiveInfinity var bestIndex = 0 for (i <- 0 until centers.length) { val distance = MLUtils.squaredDistance(point, centers(i)) if (distance < bestDistance) { bestDistance = distance bestIndex = i } } (bestIndex, bestDistance) } /** * Return the K-means cost of a given point against the given cluster centers. */ private[mllib] def pointCost(centers: Array[Array[Double]], point: Array[Double]): Double = { var bestDistance = Double.PositiveInfinity for (i <- 0 until centers.length) { val distance = MLUtils.squaredDistance(point, centers(i)) if (distance < bestDistance) { bestDistance = distance } } bestDistance } def main(args: Array[String]) { if (args.length < 4) { println("Usage: KMeans <master> <input_file> <k> <max_iterations> [<runs>]") System.exit(1) } val (master, inputFile, k, iters) = (args(0), args(1), args(2).toInt, args(3).toInt) val runs = if (args.length >= 5) args(4).toInt else 1 val sc = new SparkContext(master, "KMeans") val data = sc.textFile(inputFile).map(line => line.split(' ').map(_.toDouble)).cache() val model = KMeans.train(data, k, iters, runs) val cost = model.computeCost(data) println("Cluster centers:") for (c <- model.clusterCenters) { println(" " + c.mkString(" ")) } println("Cost: " + cost) System.exit(0) } }
bavardage/spark
mllib/src/main/scala/spark/mllib/clustering/KMeans.scala
Scala
apache-2.0
12,036
/* * Licensed to Elasticsearch under one or more contributor * license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright * ownership. Elasticsearch licenses this file to you under * the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.elasticsearch.spark.integration; import java.{ util => ju, lang => jl } import java.sql.Timestamp import java.math.BigInteger import java.util.concurrent.TimeUnit import scala.collection.JavaConversions.propertiesAsScalaMap import org.apache.spark.SparkConf import org.apache.spark.SparkException import org.apache.spark.SparkContext import org.apache.spark.sql.IntegerType import org.apache.spark.sql.Row import org.apache.spark.sql.SQLContext import org.apache.spark.sql.StringType import org.apache.spark.sql.StructField import org.apache.spark.sql.StructType import org.apache.spark.sql.TimestampType import org.apache.spark.sql.DecimalType import org.elasticsearch.hadoop.mr.RestUtils import org.elasticsearch.hadoop.util.TestSettings import org.elasticsearch.hadoop.util.TestUtils import org.elasticsearch.spark._ import org.elasticsearch.spark.rdd.EsSpark import org.elasticsearch.spark.sql._ import org.elasticsearch.spark.sql.sqlContextFunctions import org.hamcrest.Matchers.containsString import org.hamcrest.Matchers.is import org.hamcrest.Matchers.not import org.junit.AfterClass import org.junit.Assert._ import org.junit.Assume._ import org.junit.BeforeClass import org.junit.FixMethodOrder import org.junit.runner.RunWith import org.junit.runners.MethodSorters import org.junit.runners.Parameterized import org.junit.runners.Parameterized.Parameters import org.elasticsearch.hadoop.cfg.ConfigurationOptions._ import org.junit.Test import javax.xml.bind.DatatypeConverter import org.apache.spark.sql.catalyst.expressions.GenericRow import java.util.Arrays import java.nio.file.Path import java.io.File import org.apache.commons.io.FileUtils import org.apache.hadoop.mapred.JobContext import org.elasticsearch.hadoop.util.StringUtils import scala.collection.JavaConverters.asScalaBufferConverter import com.esotericsoftware.kryo.io.{ Output => KryoOutput } import com.esotericsoftware.kryo.io.{ Input => KryoInput } import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException import org.apache.spark.sql.catalyst.types.ArrayType case class KeyValue(key: Int, value: String) object AbstractScalaEsScalaSparkSQL { @transient val conf = new SparkConf().setAll(TestSettings.TESTING_PROPS).setMaster("local").setAppName("estest"); @transient var cfg: SparkConf = null @transient var sc: SparkContext = null @transient var sqc: SQLContext = null @BeforeClass def setup() { conf.setAll(TestSettings.TESTING_PROPS); sc = new SparkContext(conf) sqc = new SQLContext(sc) } @AfterClass def cleanup() { if (sc != null) { sc.stop // give jetty time to clean its act up Thread.sleep(TimeUnit.SECONDS.toMillis(3)) } } @Parameters def testParams(): ju.Collection[Array[jl.Object]] = { val list = new ju.ArrayList[Array[jl.Object]]() list.add(Array("default_", jl.Boolean.FALSE)) list.add(Array("with_meta_", jl.Boolean.TRUE)) list } } @FixMethodOrder(MethodSorters.NAME_ASCENDING) @RunWith(classOf[Parameterized]) class AbstractScalaEsScalaSparkSQL(prefix: String, readMetadata: jl.Boolean) extends Serializable { val sc = AbstractScalaEsScalaSparkSQL.sc val sqc = AbstractScalaEsScalaSparkSQL.sqc val cfg = Map(ES_READ_METADATA -> readMetadata.toString()) @Test def test1KryoScalaEsRow() { val kryo = SparkUtils.sparkSerializer(sc.getConf) val row = new ScalaEsRow(StringUtils.tokenize("foo,bar,tar").asScala) val storage = Array.ofDim[Byte](512) val output = new KryoOutput(storage) val input = new KryoInput(storage) kryo.writeClassAndObject(output, row) val serialized = kryo.readClassAndObject(input).asInstanceOf[ScalaEsRow] println(serialized.rowOrder) } @Test def testBasicRead() { val schemaRDD = artistsAsSchemaRDD assertTrue(schemaRDD.count > 300) schemaRDD.registerTempTable("datfile") println(schemaRDD.schemaString) //schemaRDD.take(5).foreach(println) val results = sqc.sql("SELECT name FROM datfile WHERE id >=1 AND id <=10") //results.take(5).foreach(println) } @Test def testEsSchemaRDD1Write() { val schemaRDD = artistsAsSchemaRDD val target = wrapIndex("sparksql-test/scala-basic-write") schemaRDD.saveToEs(target, cfg) assertTrue(RestUtils.exists(target)) assertThat(RestUtils.get(target + "/_search?"), containsString("345")) } @Test def testEsSchemaRDD1WriteWithMapping() { val schemaRDD = artistsAsSchemaRDD val target = wrapIndex("sparksql-test/scala-basic-write-id-mapping") val newCfg = collection.mutable.Map(cfg.toSeq: _*) += (ES_MAPPING_ID -> "id", ES_MAPPING_EXCLUDE -> "url") schemaRDD.saveToEs(target, newCfg) assertTrue(RestUtils.exists(target)) assertThat(RestUtils.get(target + "/_search?"), containsString("345")) assertThat(RestUtils.exists(target + "/1"), is(true)) assertThat(RestUtils.get(target + "/_search?"), not(containsString("url"))) } //@Test def testEsSchemaRDDWarning() { val schemaRDD = artistsAsSchemaRDD val target = "sparksql-test/scala-basic-write-id-mapping" EsSpark.saveToEs(schemaRDD, target, cfg) } @Test def testEsSchemaRDD2Read() { val target = wrapIndex("sparksql-test/scala-basic-write") val schemaRDD = sqc.esRDD(target) assertTrue(schemaRDD.count > 300) val schema = schemaRDD.schemaString assertTrue(schema.contains("id: long")) assertTrue(schema.contains("name: string")) assertTrue(schema.contains("pictures: string")) assertTrue(schema.contains("time: long")) assertTrue(schema.contains("url: string")) //schemaRDD.take(5).foreach(println) val tempTable = wrapIndex("basicRead") schemaRDD.registerTempTable(tempTable) val nameRDD = sqc.sql("SELECT name FROM " + tempTable + " WHERE id >= 1 AND id <=10") nameRDD.take(7).foreach(println) assertEquals(10, nameRDD.count) } @Test def testEsSchemaRDD3WriteWithRichMapping() { val input = TestUtils.sampleArtistsDat() val data = sc.textFile(input) val schema = StructType(Seq(StructField("id", IntegerType, false), StructField("name", StringType, false), StructField("url", StringType, true), StructField("pictures", StringType, true), StructField("time", TimestampType, true), StructField("nested", StructType(Seq(StructField("id", IntegerType, false), StructField("name", StringType, false), StructField("url", StringType, true), StructField("pictures", StringType, true), StructField("time", TimestampType, true))), true))) val rowRDD = data.map(_.split("\\t")).map(r => Row(r(0).toInt, r(1), r(2), r(3), new Timestamp(DatatypeConverter.parseDateTime(r(4)).getTimeInMillis()), Row(r(0).toInt, r(1), r(2), r(3), new Timestamp(DatatypeConverter.parseDateTime(r(4)).getTimeInMillis())))) val schemaRDD = sqc.applySchema(rowRDD, schema) val target = "sparksql-test/scala-basic-write-rich-mapping-id-mapping" schemaRDD.saveToEs(target, Map(ES_MAPPING_ID -> "id")) assertTrue(RestUtils.exists(target)) assertThat(RestUtils.get(target + "/_search?"), containsString("345")) assertThat(RestUtils.exists(target + "/1"), is(true)) } @Test(expected = classOf[SparkException]) def testEsDataFrame3WriteDecimalType() { val schema = StructType(Seq(StructField("decimal", DecimalType(), false))) val rowRDD = sc.makeRDD(Seq(Row(new BigInteger("10")))) val dataFrame = sqc.applySchema(rowRDD, schema) val target = wrapIndex("sparksql-test/decimal-exception") dataFrame.saveToEs(target) } @Test def testEsSchemaRDD4ReadRichMapping() { val target = "sparksql-test/scala-basic-write-rich-mapping-id-mapping" val schemaRDD = sqc.esRDD(target, cfg) assertTrue(schemaRDD.count > 300) println(schemaRDD.schemaString) } private def artistsAsSchemaRDD = { val input = TestUtils.sampleArtistsDat() val data = sc.textFile(input) val schema = StructType(Seq(StructField("id", IntegerType, false), StructField("name", StringType, false), StructField("url", StringType, true), StructField("pictures", StringType, true), StructField("time", TimestampType, true))) val rowRDD = data.map(_.split("\\t")).map(r => Row(r(0).toInt, r(1), r(2), r(3), new Timestamp(DatatypeConverter.parseDateTime(r(4)).getTimeInMillis()))) val schemaRDD = sqc.applySchema(rowRDD, schema) schemaRDD } private def artistsAsBasicSchemaRDD = { val input = TestUtils.sampleArtistsDat() val data = sc.textFile(input) val schema = StructType(Seq(StructField("id", IntegerType, false), StructField("name", StringType, false), StructField("url", StringType, true), StructField("pictures", StringType, true))) val rowRDD = data.map(_.split("\\t")).map(r => Row(r(0).toInt, r(1), r(2), r(3))) val schemaRDD = sqc.applySchema(rowRDD, schema) schemaRDD } @Test def testEsSchemaRDD50ReadAsDataSource() { val target = wrapIndex("sparksql-test/scala-basic-write") var options = "resource \\"" + target + "\\"" if (readMetadata) { options = options + " ,read_metadata \\"true\\"" } val table = wrapIndex("sqlbasicread1") val schemaRDD = sqc.sql("CREATE TEMPORARY TABLE " + table + " USING org.elasticsearch.spark.sql " + " OPTIONS (" + options + ")") val dsCfg = collection.mutable.Map(cfg.toSeq: _*) += ("path" -> target) val allRDD = sqc.sql("SELECT * FROM " + table + " WHERE id >= 1 AND id <=10") println(allRDD.schemaString) val nameRDD = sqc.sql("SELECT name FROM " + table + " WHERE id >= 1 AND id <=10") println(nameRDD.schemaString) assertEquals(10, nameRDD.count) nameRDD.take(7).foreach(println) } @Test def testEsSchemaFromDocsWithDifferentProperties() { val target = wrapIndex("spark-test/scala-sql-varcols") val trip1 = Map("reason" -> "business", "airport" -> "SFO") val trip2 = Map("participants" -> 5, "airport" -> "OTP") sc.makeRDD(Seq(trip1, trip2)).saveToEs(target, cfg) val table = wrapIndex("sqlvarcol") val schemaRDD = sqc.sql("CREATE TEMPORARY TABLE " + table + " USING org.elasticsearch.spark.sql " + " OPTIONS (resource '" + target + "')"); val allResults = sqc.sql("SELECT * FROM " + table) assertEquals(2, allResults.count()) println(allResults.schemaString) val filter = sqc.sql("SELECT * FROM " + table + " WHERE airport = 'OTP'") assertEquals(1, filter.count()) val nullColumns = sqc.sql("SELECT reason, airport FROM " + table + " ORDER BY airport") val rows = nullColumns.take(2) assertEquals("[null,OTP]", rows(0).toString()) assertEquals("[business,SFO]", rows(1).toString()) } @Test def testJsonLoadAndSavedToEs() { val input = sqc.jsonFile(this.getClass.getResource("/simple.json").toURI().toString()) input.printSchema println(input.schema) input.saveToEs("spark-test/json-file") val basic = sqc.jsonFile(this.getClass.getResource("/basic.json").toURI().toString()) basic.printSchema println(basic.schema) basic.saveToEs("spark-test/json-file") } @Test def testJsonLoadAndSavedToEsSchema() { assumeFalse(readMetadata) val input = sqc.jsonFile(this.getClass.getResource("/multi-level-doc.json").toURI().toString()) println(input.schema.treeString) println(input.schema) val table = wrapIndex("json_file_schema") val target = wrapIndex("spark-test/json-file-schema") input.saveToEs(target, cfg) val sample = input.take(1)(0).toString() val schemaRDD = sqc.sql("CREATE TEMPORARY TABLE " + table + " USING org.elasticsearch.spark.sql " + " OPTIONS (resource '" + target + "')"); println("Reading information from Elastic") val allResults = sqc.sql("SELECT * FROM " + table) println("JSON schema") println(input.schema.treeString) println("Elasticsearch schema") println(allResults.schema.treeString) val dfload = allResults.take(1)(0).toString() assertEquals(input.schema.treeString.replace(" integer ", " long "), allResults.schema.treeString) assertEquals(sample, dfload) } @Test def testKeyValueParquetFile() { // Parquet is compiled against Hadoop 2 so check whether that's the case or not assumeTrue(classOf[JobContext].isInterface()) //val items = 128 //val rdd = sc.parallelize(1 to items).flatMap(i => Seq.fill(items)(KeyValue(i, i.toString))) val outputParquet = "bin/keyvaluerdd.parquet" FileUtils.deleteDirectory(new File(outputParquet)) // running into https://issues.apache.org/jira/browse/SPARK-5281 //sqc.createSchemaRDD(rdd).saveAsParquetFile(outputParquet) val schemaRDD = artistsAsBasicSchemaRDD schemaRDD.saveAsParquetFile(outputParquet) sqc.parquetFile(outputParquet).registerTempTable("testparquet") val select = sqc.sql("SELECT * FROM testparquet") println(select.schema) select.saveToEs("test/parquet") } @Test def testNested() { val mapping = """{ "nested": { | "properties": { | "name": { "type": "string" }, | "employees": { | "type": "nested", | "properties": { | "name": {"type": "string"}, | "salary": {"type": "long"} | } | } | } | } | } """.stripMargin val index = wrapIndex("sparksql-test-nested-simple") val indexAndType = s"$index/nested" RestUtils.touch(index) RestUtils.putMapping(indexAndType, mapping.getBytes(StringUtils.UTF_8)) val data = """{"name":"nested-simple","employees":[{"name":"anne","salary":6},{"name":"bob","salary":100}, {"name":"charlie","salary":15}] }""".stripMargin sc.makeRDD(Seq(data)).saveJsonToEs(indexAndType) val df = sqc.esRDD(index) val dataType = df.schema("employees").dataType assertEquals("array", dataType.typeName) val array = dataType.asInstanceOf[ArrayType] assertEquals("struct", array.elementType.typeName) val struct = array.elementType.asInstanceOf[StructType] assertEquals("string", struct("name").dataType.typeName) assertEquals("long", struct("salary").dataType.typeName) val head = df.first() val nested = head.getAs[Seq[Row]](0); assertThat(nested.size, is(3)) println(nested) assertEquals(nested(0).getString(0), "anne") assertEquals(nested(0).getLong(1), 6) } @Test def testNestedEmptyArray() { val json = """{"foo" : 5, "nested": { "bar" : [], "what": "now" } }""" val index = wrapIndex("sparksql-test/empty-nested-array") sc.makeRDD(Seq(json)).saveJsonToEs(index) val df = sqc.esRDD(index) println(df.schema) println(df.first()) } @Test def testDoubleNestedArray() { val json = """{"foo" : [5,6], "nested": { "bar" : [{"date":"2015-01-01", "scores":[1,2]},{"date":"2015-01-01", "scores":[3,4]}], "what": "now" } }""" val index = wrapIndex("sparksql-test/double-nested-array") sc.makeRDD(Seq(json)).saveJsonToEs(index) val df = sqc.esRDD(index, Map(ES_READ_FIELD_AS_ARRAY_INCLUDE -> "nested.bar,foo,nested.bar.scores")) assertEquals("array", df.schema("foo").dataType.typeName) val bar = df.schema("nested").dataType.asInstanceOf[StructType]("bar") assertEquals("array", bar.dataType.typeName) val scores = bar.dataType.asInstanceOf[ArrayType].elementType.asInstanceOf[StructType]("scores") assertEquals("array", scores.dataType.typeName) df.printSchema() println(df.first()) } @Test def testArrayExcludes() { val json = """{"foo" : 6, "nested": { "bar" : [{"date":"2015-01-01", "scores":[1,2]},{"date":"2015-01-01", "scores":[3,4]}], "what": "now" } }""" val index = wrapIndex("sparksql-test/nested-array-exclude") sc.makeRDD(Seq(json)).saveJsonToEs(index) val df = sqc.esRDD(index, Map(ES_READ_FIELD_EXCLUDE -> "nested.bar")) df.printSchema() println(df.first()) } @Test def testMultiDepthArray() { val json = """{"rect":{"type":"multipoint","coordinates":[[[50,32],[69,32],[69,50],[50,50],[50,32]]]}}""" val index = wrapIndex("sparksql-test/geo") sc.makeRDD(Seq(json)).saveJsonToEs(index) val df = sqc.esRDD(index, Map(ES_READ_FIELD_AS_ARRAY_INCLUDE -> "rect.coordinates:2")) val coords = df.schema("rect").dataType.asInstanceOf[StructType]("coordinates") assertEquals("array", coords.dataType.typeName) val nested = coords.dataType.asInstanceOf[ArrayType].elementType assertEquals("array", nested.typeName) assertEquals("long", nested.asInstanceOf[ArrayType].elementType.typeName) val head = df.first() println(head) } def wrapIndex(index: String) = { prefix + index } }
costin/elasticsearch-hadoop
spark/sql-12/src/itest/scala/org/elasticsearch/spark/integration/AbstractScalaEsSparkSQL.scala
Scala
apache-2.0
17,551
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler.cluster.mesos import org.apache.mesos.Protos.{ContainerInfo, Image, NetworkInfo, Parameter, Volume} import org.apache.mesos.Protos.ContainerInfo.{DockerInfo, MesosInfo} import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.deploy.mesos.config.{NETWORK_LABELS, NETWORK_NAME} import org.apache.spark.internal.Logging /** * A collection of utility functions which can be used by both the * MesosSchedulerBackend and the [[MesosFineGrainedSchedulerBackend]]. */ private[mesos] object MesosSchedulerBackendUtil extends Logging { /** * Parse a comma-delimited list of volume specs, each of which * takes the form [host-dir:]container-dir[:rw|:ro]. */ def parseVolumesSpec(volumes: String): List[Volume] = { volumes.split(",").map(_.split(":")).flatMap { spec => val vol: Volume.Builder = Volume .newBuilder() .setMode(Volume.Mode.RW) spec match { case Array(container_path) => Some(vol.setContainerPath(container_path)) case Array(container_path, "rw") => Some(vol.setContainerPath(container_path)) case Array(container_path, "ro") => Some(vol.setContainerPath(container_path) .setMode(Volume.Mode.RO)) case Array(host_path, container_path) => Some(vol.setContainerPath(container_path) .setHostPath(host_path)) case Array(host_path, container_path, "rw") => Some(vol.setContainerPath(container_path) .setHostPath(host_path)) case Array(host_path, container_path, "ro") => Some(vol.setContainerPath(container_path) .setHostPath(host_path) .setMode(Volume.Mode.RO)) case spec => logWarning(s"Unable to parse volume specs: $volumes. " + "Expected form: \\"[host-dir:]container-dir[:rw|:ro](, ...)\\"") None } } .map { _.build() } .toList } /** * Parse a comma-delimited list of port mapping specs, each of which * takes the form host_port:container_port[:udp|:tcp] * * Note: * the docker form is [ip:]host_port:container_port, but the DockerInfo * message has no field for 'ip', and instead has a 'protocol' field. * Docker itself only appears to support TCP, so this alternative form * anticipates the expansion of the docker form to allow for a protocol * and leaves open the chance for mesos to begin to accept an 'ip' field */ def parsePortMappingsSpec(portmaps: String): List[DockerInfo.PortMapping] = { portmaps.split(",").map(_.split(":")).flatMap { spec: Array[String] => val portmap: DockerInfo.PortMapping.Builder = DockerInfo.PortMapping .newBuilder() .setProtocol("tcp") spec match { case Array(host_port, container_port) => Some(portmap.setHostPort(host_port.toInt) .setContainerPort(container_port.toInt)) case Array(host_port, container_port, protocol) => Some(portmap.setHostPort(host_port.toInt) .setContainerPort(container_port.toInt) .setProtocol(protocol)) case spec => logWarning(s"Unable to parse port mapping specs: $portmaps. " + "Expected form: \\"host_port:container_port[:udp|:tcp](, ...)\\"") None } } .map { _.build() } .toList } /** * Parse a list of docker parameters, each of which * takes the form key=value */ private def parseParamsSpec(params: String): List[Parameter] = { // split with limit of 2 to avoid parsing error when '=' // exists in the parameter value params.split(",").map(_.split("=", 2)).flatMap { spec: Array[String] => val param: Parameter.Builder = Parameter.newBuilder() spec match { case Array(key, value) => Some(param.setKey(key).setValue(value)) case spec => logWarning(s"Unable to parse arbitary parameters: $params. " + "Expected form: \\"key=value(, ...)\\"") None } } .map { _.build() } .toList } def containerInfo(conf: SparkConf): ContainerInfo = { val containerType = if (conf.contains("spark.mesos.executor.docker.image") && conf.get("spark.mesos.containerizer", "docker") == "docker") { ContainerInfo.Type.DOCKER } else { ContainerInfo.Type.MESOS } val containerInfo = ContainerInfo.newBuilder() .setType(containerType) conf.getOption("spark.mesos.executor.docker.image").map { image => val forcePullImage = conf .getOption("spark.mesos.executor.docker.forcePullImage") .exists(_.equals("true")) val portMaps = conf .getOption("spark.mesos.executor.docker.portmaps") .map(parsePortMappingsSpec) .getOrElse(List.empty) val params = conf .getOption("spark.mesos.executor.docker.parameters") .map(parseParamsSpec) .getOrElse(List.empty) if (containerType == ContainerInfo.Type.DOCKER) { containerInfo .setDocker(dockerInfo(image, forcePullImage, portMaps, params)) } else { containerInfo.setMesos(mesosInfo(image, forcePullImage)) } val volumes = conf .getOption("spark.mesos.executor.docker.volumes") .map(parseVolumesSpec) volumes.foreach(_.foreach(containerInfo.addVolumes(_))) } conf.get(NETWORK_NAME).map { name => val networkLabels = MesosProtoUtils.mesosLabels(conf.get(NETWORK_LABELS).getOrElse("")) val info = NetworkInfo.newBuilder() .setName(name) .setLabels(networkLabels) .build() containerInfo.addNetworkInfos(info) } containerInfo.build() } private def dockerInfo( image: String, forcePullImage: Boolean, portMaps: List[ContainerInfo.DockerInfo.PortMapping], params: List[Parameter]): DockerInfo = { val dockerBuilder = ContainerInfo.DockerInfo.newBuilder() .setImage(image) .setForcePullImage(forcePullImage) portMaps.foreach(dockerBuilder.addPortMappings(_)) params.foreach(dockerBuilder.addParameters(_)) dockerBuilder.build } private def mesosInfo(image: String, forcePullImage: Boolean): MesosInfo = { val imageProto = Image.newBuilder() .setType(Image.Type.DOCKER) .setDocker(Image.Docker.newBuilder().setName(image)) .setCached(!forcePullImage) ContainerInfo.MesosInfo.newBuilder() .setImage(imageProto) .build } }
stanzhai/spark
resource-managers/mesos/src/main/scala/org/apache/spark/scheduler/cluster/mesos/MesosSchedulerBackendUtil.scala
Scala
apache-2.0
7,360
package org.eichelberger.sfc.utils import java.awt.Color import java.io.{PrintStream, PrintWriter} import org.eichelberger.sfc.SpaceFillingCurve._ trait RenderTarget { def qw(s: String) = "\\"" + s + "\\"" def beforeRendering(sfc: RenderSource): Unit = {} def beforeSlice(sfc: RenderSource, slice: OrdinalVector): Unit = {} def beforeRow(sfc: RenderSource, row: OrdinalNumber): Unit = {} def beforeCol(sfc: RenderSource, col: OrdinalNumber): Unit = {} def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = {} def afterCol(sfc: RenderSource, col: OrdinalNumber): Unit = {} def afterRow(sfc: RenderSource, row: OrdinalNumber): Unit = {} def afterSlice(sfc: RenderSource, slice: OrdinalVector): Unit = {} def afterRendering(sfc: RenderSource): Unit = {} } trait RenderSource { this: SpaceFillingCurve => def getCurveName: String def numCells: Long = size def useSlices: Boolean = true def indexBounds: Seq[OrdinalPair] = precisions.toSeq.map(p => OrdinalPair(0L, (1L << p) - 1L)) def nonTerminalIndexBounds: Seq[OrdinalPair] = if (precisions.size > 2) indexBounds.dropRight(2) else Seq(OrdinalPair(0, 0)) def terminalIndexBounds: Seq[OrdinalPair] = indexBounds.takeRight(Math.min(2, precisions.size)) def renderSlices(target: RenderTarget) = { // loop over all dimensions higher than 2 val slices = combinationsIterator(nonTerminalIndexBounds) while (slices.hasNext) { // identify the context (combination of dimensions > 2) val slice: OrdinalVector = slices.next() target.beforeSlice(this, slice) // dump this 1- or 2-d slice var row = -1L val cellItr = combinationsIterator(terminalIndexBounds) while (cellItr.hasNext) { val cell: OrdinalVector = cellItr.next() val fullVec = indexBounds.size match { case 1 | 2 => cell case _ => slice + cell } val idx = index(fullVec) // switch rows if (row != cell.toSeq.head) { if (row != -1L) target.afterRow(this, row) row = cell.toSeq.head target.beforeRow(this, row) } // print cell target.renderCell(this, idx, cell) } target.afterRow(this, row) // finish this slice target.afterSlice(this, slice) } } def renderWhole(target: RenderTarget) = { // dump this 1- or 2-d slice var row = -1L val cellItr = combinationsIterator(indexBounds) while (cellItr.hasNext) { val cell: OrdinalVector = cellItr.next() val idx = index(cell) // switch rows if (row != cell.toSeq.head) { if (row != -1L) target.afterRow(this, row) row = cell.toSeq.head target.beforeRow(this, row) } // print cell target.renderCell(this, idx, cell) } target.afterRow(this, row) } def render(target: RenderTarget) = { target.beforeRendering(this) if (useSlices) renderSlices(target) else renderWhole(target) target.afterRendering(this) } } // dump the layers of the SFC cell-indexes to STDOUT class ScreenRenderTarget extends RenderTarget { val pw: PrintStream = System.out var numRows: Long = 0L var numCols: Long = 0L override def beforeRendering(sfc: RenderSource): Unit = {} override def beforeSlice(sfc: RenderSource, slice: OrdinalVector): Unit = { pw.println(s"\\n[${sfc.getCurveName}: SLICE $slice ]") val (nr: Long, nc: Long) = sfc.terminalIndexBounds.size match { case 0 => throw new Exception("Cannot print an empty SFC") case 1 => (1, sfc.terminalIndexBounds(0).max + 1L) case 2 => (sfc.terminalIndexBounds(0).max + 1L, sfc.terminalIndexBounds(1).max + 1L) } numRows = nr; numCols = nc } override def beforeRow(sfc: RenderSource, row: OrdinalNumber): Unit = { pw.print(s" ${format(row)} | ") } override def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = { pw.print(s"${format(index)} | ") } override def afterRow(sfc: RenderSource, row: OrdinalNumber): Unit = { pw.println() } override def afterSlice(sfc: RenderSource, slice: OrdinalVector): Unit = { // print the X axis separate(pw, numCols.toInt) pw.println() pw.print(" | ") for (x <- 0 until numCols.toInt) { pw.print(s"${format(x)} | ") } pw.println() } def format(x: Long): String = x.formatted("%4d") def separate(pw: PrintStream, numCols: Int): Unit = { val line = "------+"*numCols pw.print(s" +$line") } } object ShadeRampEndpoint { def apply(index: OrdinalNumber, color: Color): ShadeRampEndpoint = { val (hue, saturation, brightness) = { var arr: Array[Float] = null Color.RGBtoHSB(color.getRed, color.getGreen, color.getBlue, arr) (arr(0), arr(1), arr(2)) } new ShadeRampEndpoint(index, hue, saturation, brightness) } } import ShadeRampEndpoint._ case class ShadeRampEndpoint(index: OrdinalNumber, hue: Float, saturation: Float, brightness: Float) case class ShadeRamp(endLow: ShadeRampEndpoint, endHigh: ShadeRampEndpoint) { val indexSpan = endHigh.index - endLow.index val slopeHue = (endHigh.hue - endLow.hue) / indexSpan.toDouble val slopeSaturation = (endHigh.saturation - endLow.saturation) / indexSpan.toDouble val slopeBrightness = (endHigh.brightness - endLow.brightness) / indexSpan.toDouble val InvisibleColor = new Color(0, 0, 0, 0) def getColor(index: OrdinalNumber): Color = { if (index < endLow.index) return InvisibleColor if (index > endHigh.index) return InvisibleColor val dist = index - endLow.index val h = endLow.hue + dist * slopeHue val s = endLow.saturation + dist * slopeSaturation val b = endLow.brightness + dist * slopeBrightness Color.getHSBColor(h.toFloat, s.toFloat, b.toFloat) } def toHexByte(i: Int): String = (if (i < 16) "0" else "") + java.lang.Integer.toHexString(i) def getColorHex(index: OrdinalNumber): String = { val color = getColor(index) toHexByte(color.getRed) + toHexByte(color.getGreen) + toHexByte(color.getBlue) + toHexByte(color.getAlpha) } } // dump the layers of the SFC cell-indexes to STDOUT suitable for Graphviz rendering // // to render correctly: // neato -n input.dot -Tpng -o output.png class GraphvizRenderTarget extends RenderTarget { val pw: PrintStream = System.out val ptsSpacing = 75 var numRows: Long = 0L var numCols: Long = 0L var numSlice = 0 val drawNumbers = true val drawArrows = true val cellShadingRamp: Option[ShadeRamp] = None override def beforeRendering(sfc: RenderSource): Unit = { pw.println("// to render correctly:\\n// neato -n input.dot -Tpng -o output.png") pw.println("digraph G {") pw.println("\\toutputorder=\\"nodesfirst\\"") pw.println("\\tnode [ shape=\\"square\\" width=\\"1.058\\" labelloc=\\"c\\" fontsize=\\"30\\" color=\\"#000000\\"]") } override def beforeSlice(sfc: RenderSource, slice: OrdinalVector): Unit = { pw.println(s"\\n\\tsubgraph {") pw.println("\\tedge [ constraint=\\"false\\" tailclip=\\"false\\" headclip=\\"false\\" color=\\"#000000FF\\" ]") //pw.println(s"<h1>${sfc.getCurveName}: SLICE $slice</h1>") //@TODO resolve how to print slice titles val (nr: Long, nc: Long) = sfc.terminalIndexBounds.size match { case 0 => throw new Exception("Cannot print an empty SFC") case 1 => (1, sfc.terminalIndexBounds(0).max + 1L) case 2 => (sfc.terminalIndexBounds(0).max + 1L, sfc.terminalIndexBounds(1).max + 1L) } numRows = nr; numCols = nc } override def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = { if (drawArrows) if (index >= 1L) pw.println(s"\\t\\tnode_${index - 1L} -> node_$index") val (x: Long, y: Long) = point.size match { case 1 => (numSlice * (numRows + 1) * ptsSpacing + point(0) * ptsSpacing, 0L) case 2 => (numSlice * (numRows + 1) * ptsSpacing + point(0) * ptsSpacing, point(1) * ptsSpacing) } val shading = cellShadingRamp.map(ramp => "style=\\"filled\\" fillcolor=\\"#" + ramp.getColorHex(index) + "\\"").getOrElse("") val label = "label=\\"" + (if (drawNumbers) index.toString else "") + "\\"" pw.println(s"\\t\\tnode_$index [ $label pos=${qw(x.toString + "," + y.toString)} $shading ]") } override def afterSlice(sfc: RenderSource, slice: OrdinalVector): Unit = { pw.println(s"\\t}") // end subgraph numSlice = numSlice + 1 } override def afterRendering(sfc: RenderSource): Unit = { pw.println("}") // end graph } } // write an include file suitable for use in a larger // Persistence of Vision Raytracer scene // // only position information is written; no styling class PovrayRenderTarget extends RenderTarget { val pw: PrintStream = System.out var povCurveName: String = "UNKNOWN" override def beforeRendering(sfc: RenderSource): Unit = { povCurveName = sfc.getCurveName.replaceAll("[^a-zA-Z0-9]", "_").toUpperCase pw.println("// include file for POV-Ray") pw.println(s"// curve name: ${sfc.getCurveName}") pw.println("\\n// curve_cells[CellNo][0]: X coord") pw.println("// curve_cells[CellNo][1]: Y coord") pw.println("// curve_cells[CellNo][2]: Z coord") pw.println(s"\\n#declare num_curve_cells = ${sfc.numCells};") pw.println(s"\\n#declare curve_cells = array[${sfc.numCells}][3];") } override def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = { val items = (0 to 2).map { i => s"#declare curve_cells[$index][$i] = ${point(i)};" } pw.println(items.mkString(" ")) } } // this is one of the simplest output formats, being a // plain CSV that dumps the index and the coordinates class CSVRenderTarget extends RenderTarget { val pw: PrintStream = System.out override def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = { val items = index +: point.x pw.println(items.mkString(",")) } } // brute-force-and-ignorance JSON output class JSONRenderTarget extends RenderTarget { val pw: PrintStream = System.out override def beforeRendering(sfc: RenderSource): Unit = { val jsCurveName = sfc.getCurveName.replaceAll("[^a-zA-Z0-9]", "_").toUpperCase pw.println(s"var sfc__$jsCurveName = {") pw.println(s"\\tname: ${qw(sfc.getCurveName)},") pw.println(s"\\tnum_dimensions: ${sfc.indexBounds.size},") pw.println(s"\\tbounds: [") pw.println(s"\\t\\t${sfc.indexBounds.map(pair => s"{ min: ${pair.min}, max: ${pair.max} }").mkString(",\\n\\t\\t")}") pw.println(s"\\t],") pw.println(s"\\tnodes: [") } override def renderCell(sfc: RenderSource, index: OrdinalNumber, point: OrdinalVector): Unit = { pw.println(s"\\t\\t{") pw.println(s"\\t\\t\\tindex: $index,") pw.println(s"\\t\\t\\tpoint: [${point.x.mkString(", ")}]") pw.println(s"\\t\\t},") } override def afterRendering(sfc: RenderSource): Unit = { pw.println("\\t]\\n};") } }
cne1x/sfseize
src/main/scala/org/eichelberger/sfc/utils/RenderSource.scala
Scala
apache-2.0
11,013
package mesosphere.util.state import mesosphere.marathon.state.{ Timestamp, EntityStore, MarathonState } import org.apache.mesos.Protos import org.apache.mesos.Protos.FrameworkID import org.slf4j.LoggerFactory import scala.concurrent.duration.Duration import scala.concurrent.{ Await, Future } /** * Utility class for keeping track of a framework ID */ class FrameworkIdUtil(mStore: EntityStore[FrameworkId], timeout: Duration, key: String = "id") { private[this] val log = LoggerFactory.getLogger(getClass) def fetch(): Option[FrameworkID] = { Await.result(mStore.fetch(key), timeout).map(_.toProto) } def store(proto: FrameworkID): FrameworkId = { log.info(s"Store framework id: $proto") val frameworkId = FrameworkId(proto.getValue) Await.result(mStore.modify(key) { _ => frameworkId }, timeout) } def expunge(): Future[Boolean] = { log.info(s"Expunge framework id!") mStore.expunge(key) } } //TODO: move logic from FrameworkID to FrameworkId (which also implies moving this class) case class FrameworkId(id: String) extends MarathonState[Protos.FrameworkID, FrameworkId] { override def mergeFromProto(message: FrameworkID): FrameworkId = { FrameworkId(message.getValue) } override def mergeFromProto(bytes: Array[Byte]): FrameworkId = { mergeFromProto(Protos.FrameworkID.parseFrom(bytes)) } override def toProto: FrameworkID = { Protos.FrameworkID.newBuilder().setValue(id).build() } override def version: Timestamp = Timestamp.zero }
ss75710541/marathon
src/main/scala/mesosphere/util/state/FrameworkIdUtil.scala
Scala
apache-2.0
1,514
package fpinscala.structuringprograms case class Box(height: Double, width: Double) object Answers { def greaterBy(x: Box, y: Box, f: Box => Double): Box = if (f(x) > f(y)) x else y type Pred[A] = A => Boolean def wider(x: Box, y: Box): Box = greaterBy(x, y, _.width) def taller(x: Box, y: Box) = greaterBy(x, y, _.height) object Absolute1 { def absolute(f: Int => Int): Int => Int = n => f(n).abs // This uses the built-in `abs` method on `Int` } def absolute[A](f: A => Int): A => Int = a => f(a).abs def divisibleBy(k: Int): Pred[Int] = _ % k == 0 val even = divisibleBy(2) object Predicates { val _divisibleBy3And5: Pred[Int] = n => divisibleBy(3)(n) && divisibleBy(5)(n) val _divisibleBy3Or5: Pred[Int] = n => divisibleBy(3)(n) || divisibleBy(5)(n) def lift[A](f: (Boolean, Boolean) => Boolean, g: Pred[A], h: Pred[A]): Pred[A] = a => f(g(a), h(a)) val divisibleBy3And5: Pred[Int] = lift(_ && _, divisibleBy(3), divisibleBy(5)) val divisibleBy3Or5: Pred[Int] = lift(_ || _, divisibleBy(3), divisibleBy(5)) } /* Calling `divisibleBy(0)` results in an error. But we get different results for these two expressions: lift(_ || _, divisibleBy(2), divisibleBy(0)) (n: Int) => divisibleBy(2)(n) || divisibleBy(0)(n) Try them with different inputs. Why do you think one of them fails with an error for even numbers and the other one just returns `true` without failing? Do you think this has any implications for referential transparency? Make a note of your thoughts and revisit this question after reading the chapter on "strictness and laziness". */ def curry[A,B,C](f: (A, B) => C): A => B => C = a => b => f(a, b) /* The `Function2` trait has a `curried` method already. */ def uncurry[A,B,C](f: A => B => C): (A, B) => C = (a, b) => f(a)(b) /* There is a method on the `Function` object in the standard library, `Function.uncurried` that you can use for uncurrying. Note that we can go back and forth between the two forms. We can curry and uncurry and the two forms are in some sense "the same". In FP jargon, we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form), a term we inherit from category theory. */ def compose[A,B,C](f: B => C, g: A => B): A => C = a => f(g(a)) def lift3[A,B,C,D,E](f: (B, C, D) => E)(g: A => B, h: A => C, i: A => D): A => E = a => f(g(a), h(a), i(a)) object Lift3ReusingLift { def lift3[A,B,C,D,E](f: (B, C, D) => E)(g: A => B, h: A => C, i: A => D): A => E = a => lift[A,C,D,E](f(g(a), _, _))(h, i)(a) } def fibRec(n: Int): Int = if (n < 2) n else fib(n - 1) + fib(n - 2) // The recursive definition is very slow, and uses binary recursion (calls itself twice). // Here is one example of a tail-recursive definition: def fib(n: Int): Int = { def loop(n: Int, x: Int, y: Int): Int = if (n == 0) y else loop(n - 1, y, x + y) loop(n, 0, 1) } // 0 and 1 are the first two numbers in the sequence, so we start the accumulators with those. // At every iteration, we add the two numbers to get the next one. def iterateWhile[A](a: A)(f: A => A, p: Pred[A]): A = if (p(a)) iterateWhile(f(a))(f, p) else a def lift[A,B,C,D](f: (B, C) => D)(g: A => B, h: A => C): A => D = a => f(g(a), h(a)) }
ryo-murai/fpinscala-exercises
answers/src/main/scala/fpinscala/structuringprograms/Answers.scala
Scala
mit
3,373
package colossus //where should this go... import service._ import metrics._ import MetricAddress._ import akka.actor._ import akka.util.Timeout import scala.concurrent.duration._ import protocols.http._ import java.net.InetSocketAddress import net.liftweb.json._ class JsonMetricSenderActor(io: IOSystem, host: String, port: Int, path: String) extends Actor with ActorLogging { import MetricSender._ val config = ClientConfig( address = new InetSocketAddress(host, port), name = "json-sender", requestTimeout = 750.milliseconds ) implicit val _io = io implicit val timeout = Timeout(1.seconds) import context.dispatcher val client = AsyncServiceClient(config, new HttpClientCodec) def receive = { case Send(map, gtags, timestamp) => { val body = compact(render(map.addTags(gtags).toJson)) val request = HttpRequest(HttpMethod.Post, path, Some(body)) client.send(request).map{response => response.head.code match { case HttpCodes.OK => {} case other => log.warning(s"got error from aggregator: $response") }} } } } case class JsonMetricSender(host: String, port: Int, path: String, sys: IOSystem = IOSystem("json-sender")) extends MetricSender { def name = "json" def props = Props(classOf[JsonMetricSenderActor], sys, host, port, path) }
noikiy/colossus
colossus/src/main/scala/colossus/metrics/JsonMetricSender.scala
Scala
apache-2.0
1,339
/* * Licensed to Intel Corporation under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * Intel Corporation licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.nn import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule import com.intel.analytics.bigdl.tensor.Tensor import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.utils.Table import scala.reflect._ /** * Region of interest pooling * The RoIPooling uses max pooling to convert the features inside any valid region of interest * into a small feature map with a fixed spatial extent of pooledH × pooledW (e.g., 7 × 7) * an RoI is a rectangular window into a conv feature map. * Each RoI is defined by a four-tuple (x1, y1, x2, y2) that specifies its * top-left corner (x1, y1) and its bottom-right corner (x2, y2). * RoI max pooling works by dividing the h × w RoI window into an pooledH × pooledW grid of * sub-windows of approximate size h/H × w/W and then max-pooling the values in each sub-window * into the corresponding output grid cell. * Pooling is applied independently to each feature map channel * @param pooledW spatial extent in width * @param pooledH spatial extent in height * @param spatialScale spatial scale * @tparam T Numeric type. Only support float/double now */ class RoiPooling[T: ClassTag](val pooledW: Int, val pooledH: Int, val spatialScale: T) (implicit ev: TensorNumeric[T]) extends AbstractModule[Table, Tensor[T], T] { private val argmax: Tensor[T] = Tensor[T] private val gradInputTensor: Tensor[T] = Tensor[T] gradInput.insert(gradInputTensor) override def updateOutput(input: Table): Tensor[T] = { require(input.length() == 2, "there must have two tensors in the table") val data = input[Tensor[T]](1) // Input data to ROIPooling val rois = input[Tensor[T]](2) // Input label to ROIPooling require(rois.size().length > 1 && rois.size(2) == 5, "roi input shape should be (R, 5)") output.resize(rois.size(1), data.size(2), pooledH, pooledW) .fill(ev.fromType[Double](Double.MinValue)) argmax.resizeAs(output).fill(ev.fromType(-1)) val dataOffset = offset(0, 1, sizes = data.size()) val argmaxOffset = offset(0, 1, sizes = argmax.size()) val outputOffset = offset(0, 1, sizes = output.size()) val roisOffset = offset(1, sizes = rois.size()) var n = 0 val dataSize = data.size() if (classTag[T] == classTag[Double]) { val inputData = data.storage().array().asInstanceOf[Array[Double]] val argmaxData = argmax.storage().array().asInstanceOf[Array[Double]] val outputData = output.storage().array().asInstanceOf[Array[Double]] val roisDouble = rois.asInstanceOf[Tensor[Double]] while (n < rois.size(1)) { poolOneRoiDouble(n, roisDouble(n + 1), inputData, dataSize, dataOffset, argmaxData, argmaxOffset, outputData, outputOffset, roisOffset, ev.toType[Double](spatialScale)) n += 1 } } else if (classTag[T] == classTag[Float]) { val inputData = data.storage().array().asInstanceOf[Array[Float]] val argmaxData = argmax.storage().array().asInstanceOf[Array[Float]] val outputData = output.storage().array().asInstanceOf[Array[Float]] val roisFloat = rois.asInstanceOf[Tensor[Float]] while (n < rois.size(1)) { poolOneRoiFloat(n, roisFloat(n + 1), inputData, dataSize, dataOffset, argmaxData, argmaxOffset, outputData, outputOffset, roisOffset, ev.toType[Float](spatialScale)) n += 1 } } else { throw new IllegalArgumentException("currently only Double and Float types are supported") } output } private def scaleRoiFloat(roi: Tensor[Float], ind: Int, spatialScale: Float): Int = { Math.round(roi.valueAt(ind) * spatialScale) } private def poolOneRoiFloat(n: Int, roi: Tensor[Float], inputData: Array[Float], dataSize: Array[Int], dataOffset: Int, argmaxData: Array[Float], argmaxOffset: Int, outputData: Array[Float], outputOffset: Int, roisOffset: Int, spatialScale: Float): Unit = { val roiBatchInd = roi.valueAt(1).toInt val roiStartW = scaleRoiFloat(roi, 2, spatialScale) val roiStartH = scaleRoiFloat(roi, 3, spatialScale) require(roiBatchInd >= 0 && dataSize(0) > roiBatchInd) val binSizeH = Math.max(scaleRoiFloat(roi, 5, spatialScale) - roiStartH + 1, 1f) / pooledH val binSizeW = Math.max(scaleRoiFloat(roi, 4, spatialScale) - roiStartW + 1, 1f) / pooledW var batchDataIndex = offset(roiBatchInd, sizes = dataSize) var c = 0 while (c < dataSize(1)) { var ph = 0 val outputDataIndex = outputOffset * (n * dataSize(1) + c) val argmaxIndex = argmaxOffset * (n * dataSize(1) + c) while (ph < pooledH) { var pw = 0 while (pw < pooledW) { // Compute pooling region for this output unit: // start (included) = floor(ph * roi_height / pooled_height_) // end (excluded) = ceil((ph + 1) * roi_height / pooled_height_) val hstart = Math.min(Math.max(Math.floor(ph * binSizeH).toInt + roiStartH, 0), dataSize(2)) val hend = Math.min(Math.max(Math.ceil((ph + 1) * binSizeH).toInt + roiStartH, 0), dataSize(2)) val wstart = Math.min(Math.max(Math.floor(pw * binSizeW).toInt + roiStartW, 0), dataSize(3)) val wend = Math.min(Math.max(Math.ceil((pw + 1) * binSizeW).toInt + roiStartW, 0), dataSize(3)) val poolIndex = ph * pooledW + pw if ((hend <= hstart) || (wend <= wstart)) { outputData(outputDataIndex + poolIndex) = 0 argmaxData(argmaxIndex + poolIndex) = -1 } else { var h = hstart while (h < hend) { var w = wstart val hi = h * dataSize(3) while (w < wend) { val index = hi + w if (inputData(batchDataIndex + index) > outputData(outputDataIndex + poolIndex)) { outputData(outputDataIndex + poolIndex) = inputData(batchDataIndex + index) argmaxData(argmaxIndex + poolIndex) = index } w += 1 } h += 1 } } pw += 1 } ph += 1 } // Increment all data pointers by one channel c += 1 batchDataIndex += dataOffset } } private def scaleRoiDouble(roi: Tensor[Double], ind: Int, spatialScale: Double): Int = { Math.round(roi.valueAt(ind) * spatialScale).toInt } private def poolOneRoiDouble(n: Int, roi: Tensor[Double], inputData: Array[Double], dataSize: Array[Int], dataOffset: Int, argmaxData: Array[Double], argmaxOffset: Int, outputData: Array[Double], outputOffset: Int, roisOffset: Int, spatialScale: Double): Unit = { val roiBatchInd = roi.valueAt(1).toInt val roiStartW = scaleRoiDouble(roi, 2, spatialScale) val roiStartH = scaleRoiDouble(roi, 3, spatialScale) require(roiBatchInd >= 0 && dataSize(0) > roiBatchInd) val binSizeH = Math.max(scaleRoiDouble(roi, 5, spatialScale) - roiStartH + 1, 1f) / pooledH val binSizeW = Math.max(scaleRoiDouble(roi, 4, spatialScale) - roiStartW + 1, 1f) / pooledW var batchDataIndex = offset(roiBatchInd, sizes = dataSize) var c = 0 while (c < dataSize(1)) { var ph = 0 val outputDataIndex = outputOffset * (n * dataSize(1) + c) val argmaxIndex = argmaxOffset * (n * dataSize(1) + c) while (ph < pooledH) { var pw = 0 while (pw < pooledW) { // Compute pooling region for this output unit: // start (included) = floor(ph * roi_height / pooled_height_) // end (excluded) = ceil((ph + 1) * roi_height / pooled_height_) val hstart = Math.min(Math.max(Math.floor(ph * binSizeH).toInt + roiStartH, 0), dataSize(2)) val hend = Math.min(Math.max(Math.ceil((ph + 1) * binSizeH).toInt + roiStartH, 0), dataSize(2)) val wstart = Math.min(Math.max(Math.floor(pw * binSizeW).toInt + roiStartW, 0), dataSize(3)) val wend = Math.min(Math.max(Math.ceil((pw + 1) * binSizeW).toInt + roiStartW, 0), dataSize(3)) val poolIndex = ph * pooledW + pw if ((hend <= hstart) || (wend <= wstart)) { outputData(outputDataIndex + poolIndex) = 0 argmaxData(argmaxIndex + poolIndex) = -1 } else { var h = hstart while (h < hend) { var w = wstart val hi = h * dataSize(3) while (w < wend) { val index = hi + w if (inputData(batchDataIndex + index) > outputData(outputDataIndex + poolIndex)) { outputData(outputDataIndex + poolIndex) = inputData(batchDataIndex + index) argmaxData(argmaxIndex + poolIndex) = index } w += 1 } h += 1 } } pw += 1 } ph += 1 } // Increment all data pointers by one channel c += 1 batchDataIndex += dataOffset } } /** * get the data offset given n, c, h, w * @param n batch indice * @param c channel indice * @param h height indice * @param w width indice * @param sizes tensor size * @return array offset */ private def offset(n: Int, c: Int = 0, h: Int = 0, w: Int = 0, sizes: Array[Int]): Int = { require(sizes.length == 2 || sizes.length >= 4) if (sizes.length == 2) ((n * sizes(1) + c) + h) + w else ((n * sizes(1) + c) * sizes(2) + h) * sizes(3) + w } override def updateGradInput(input: Table, gradOutput: Tensor[T]): Table = { val numRois = output.size(1) if (classTag[T] == classTag[Double]) { val data = input[Tensor[Double]](1) val roisData = input[Tensor[Double]](2).storage().array() val argmaxData = argmax.storage().array().asInstanceOf[Array[Double]] val gradInputData = gradInputTensor.resizeAs(data).zero() .storage().array().asInstanceOf[Array[Double]] val gradOutputData = gradOutput.storage().array().asInstanceOf[Array[Double]] roiPoolingBackwardDouble(roisData, numRois, data, argmaxData, gradInputData, gradOutputData) } else if (classTag[T] == classTag[Float]) { val data = input[Tensor[Float]](1) val roisData = input[Tensor[Float]](2).storage().array() val argmaxData = argmax.storage().array().asInstanceOf[Array[Float]] val gradInputData = gradInputTensor.resizeAs(data).zero() .storage().array().asInstanceOf[Array[Float]] val gradOutputData = gradOutput.storage().array().asInstanceOf[Array[Float]] roiPoolingBackwardFloat(roisData, numRois, data, argmaxData, gradInputData, gradOutputData) } else { throw new IllegalArgumentException("currently only Double and Float types are supported") } gradInput } private def roiPoolingBackwardFloat(roisData: Array[Float], numRois: Int, data: Tensor[Float], argmaxData: Array[Float], gradInputData: Array[Float], gradOutputData: Array[Float]): Unit = { var roiN = 0 var c = 0 var ph = 0 var pw = 0 // Accumulate gradient over all ROIs while (roiN < numRois) { val roiBatchInd = roisData(roiN * 5).toInt // Accumulate gradients over each bin in this ROI c = 0 while (c < data.size(2)) { ph = 0 while (ph < pooledH) { pw = 0 while (pw < pooledW) { val outputOffset = ((roiN * data.size(2) + c) * pooledH + ph) * pooledW + pw val argmaxIndex = argmaxData(outputOffset) if (argmaxIndex >= 0) { val inputOffset = (roiBatchInd * data.size(2) + c) * data.size(3) * data.size(4) + argmaxIndex.toInt gradInputData(inputOffset) = gradInputData(inputOffset) + gradOutputData(outputOffset) } pw += 1 } ph += 1 } c += 1 } roiN += 1 } } private def roiPoolingBackwardDouble(roisData: Array[Double], numRois: Int, data: Tensor[Double], argmaxData: Array[Double], gradInputData: Array[Double], gradOutputData: Array[Double]): Unit = { var roiN = 0 var c = 0 var ph = 0 var pw = 0 // Accumulate gradient over all ROIs while (roiN < numRois) { val roiBatchInd = roisData(roiN * 5).toInt // Accumulate gradients over each bin in this ROI c = 0 while (c < data.size(2)) { ph = 0 while (ph < pooledH) { pw = 0 while (pw < pooledW) { val outputOffset = ((roiN * data.size(2) + c) * pooledH + ph) * pooledW + pw val argmaxIndex = argmaxData(outputOffset) if (argmaxIndex >= 0) { val inputOffset = (roiBatchInd * data.size(2) + c) * data.size(3) * data.size(4) + argmaxIndex.toInt gradInputData(inputOffset) = gradInputData(inputOffset) + gradOutputData(outputOffset) } pw += 1 } ph += 1 } c += 1 } roiN += 1 } } override def toString: String = "nn.RoiPooling" override def clearState(): this.type = { super.clearState() argmax.set() gradInputTensor.set() this } } object RoiPooling { def apply[@specialized(Float, Double) T: ClassTag]( pooled_w: Int, pooled_h: Int, spatial_scale: T)(implicit ev: TensorNumeric[T]): RoiPooling[T] = new RoiPooling[T](pooled_w, pooled_h, spatial_scale) }
SeaOfOcean/BigDL
dl/src/main/scala/com/intel/analytics/bigdl/nn/RoiPooling.scala
Scala
apache-2.0
14,465
/* * Distributed as part of Scalala, a linear algebra library. * * Copyright (C) 2008- Daniel Ramage * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 USA */ package scalala; package tensor; package dense; import org.scalacheck._ import org.scalatest._; import org.scalatest.junit._; import org.scalatest.prop._; import org.junit.runner.RunWith @RunWith(classOf[JUnitRunner]) class DenseMatrixTest extends FunSuite with Checkers { test("Slicing") { val m = DenseMatrix((0,1,2), (3,4,5)); // slice sub-matrix val s1 : mutable.Matrix[Int] = m(0 to 1, 1 to 2); assert(s1 === DenseMatrix((1,2),(4,5))); s1 += 1; assert(m === DenseMatrix((0,2,3),(3,5,6))); // slice row val s2 : DenseVectorRow[Int] = m(0, ::); assert(s2 === DenseVector(0,2,3)); s2 *= 2; assert(m === DenseMatrix((0,4,6),(3,5,6))); // slice column val s3 : DenseVectorCol[Int] = m(::, 1); assert(s3 === DenseVector(4,5)); s3 -= 1; assert(m === DenseMatrix((0,3,6),(3,4,6))); // slice rows val s4 : mutable.Matrix[Int] = m(1 to 1, ::); assert(s4 === DenseMatrix((3,4,6))); // slice columns val s5 : mutable.Matrix[Int] = m(::, 1 to 2); assert(s5 === DenseMatrix((3,6),(4,6))); // slice part of a row val s6a : DenseVectorRow[Int] = m(0, 1 to 2); s6a += 2; val s6b : mutable.VectorRow[Int] = m(0,IndexedSeq(2,1)); s6b -= 1; assert(m === DenseMatrix((0,4,7),(3,4,6))); // slice part of a column val s7a : DenseVectorCol[Int] = m(0 to 1, 0); s7a += 2; val s7b : mutable.VectorCol[Int] = m(IndexedSeq(1,0),0); s7b += 1; assert(m === DenseMatrix((3,4,7),(6,4,6))); } test("Transpose") { val m = DenseMatrix((1,2,3),(4,5,6)); // check that the double transpose gives us back the original assert(m.t.t eq m); // check static type and write-through val t : mutable.Matrix[Int] = m.t; assert(t === DenseMatrix((1,4),(2,5),(3,6))); t(0,1) = 0; assert(m === DenseMatrix((1,2,3),(0,5,6))); } test("Sliced Transpose") { val m = DenseMatrix((0, 1, 2), (3, 4, 5)); // slice sub-matrix val s1: mutable.Matrix[Int] = m(0 to 1, 1 to 2); assert(s1 === DenseMatrix((1, 2), (4, 5))); val t1: mutable.Matrix[Int] = s1.t assert(t1 === DenseMatrix((1, 4), (2, 5))); } test("Min/Max") { val m = DenseMatrix((1,0,0),(2,3,-1)); assert(m.argmin === (1,2)); assert(m.argmax === (1,1)); assert(m.min === -1); assert(m.max === 3); } test("MapValues") { val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1)); val b1 : DenseMatrix[Int] = a.mapValues(_ + 1); assert(b1 === DenseMatrix((2,1,1),(3,4,0))); val b2 : DenseMatrix[Double] = a.mapValues(_ + 1.0); assert(b2 === DenseMatrix((2.0,1.0,1.0),(3.0,4.0,0.0))); } test("Map") { val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1)); val b1 : DenseMatrix[Int] = a.mapTriples((i,j,v) => i + v); assert(b1 === DenseMatrix((1,0,0),(3,4,0))); val b2 : DenseMatrix[Double] = a.mapTriples((i,j,v) => j + v.toDouble); assert(b2 === DenseMatrix((1.0,1.0,2.0),(2.0,4.0,1.0))); } test("Triples") { val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1)); var s = 0; // foreach s = 0; for ((i,j,v) <- a.triples) s += v; assert(s === a.sum); // filter s = 0; for ((i,j,v) <- a.triples; if i % 2 == 0 || j % 2 == 0) s += v; assert(s === 1+2-1); // // map // val b1 : DenseMatrix[Double] = for ((i,j,v) <- a) yield v * 2.0; // assert(b1 === DenseMatrix((2.0,0.0,0.0),(4.0,6.0,-2.0))); // // // map with filter // val b2 : DenseMatrix[Int] = for ((i,j,v) <- a; if j == 0) yield v * 2; // assert(b2 === DenseMatrix((2,0,0),(4,0,0))); } test("horzcat") { val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1)); val result: DenseMatrix[Int] = DenseMatrix((1,0,0,1,0, 0),(2,3,-1,2,3,-1)); assert(DenseMatrix.horzcat(a,a) === result); } test("vertcat") { val a : DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1)); val result: DenseMatrix[Int] = DenseMatrix((1,0,0),(2,3,-1),(1,0,0),(2,3,-1)); assert(DenseMatrix.vertcat(a,a) === result); } test("Multiply") { val a = DenseMatrix((1, 2, 3),(4, 5, 6)); val b = DenseMatrix((7, -2, 8),(-3, -3, 1),(12, 0, 5)); val c = DenseVector(6,2,3); assert(a * b === DenseMatrix((37, -8, 25), (85, -23, 67))); assert(a * c === DenseVector(19,52)); // should be dense val x : DenseMatrix[Int] = a * a.t; assert(x === DenseMatrix((14,32),(32,77))); // should be dense val y : DenseMatrix[Int] = a.t * a; assert(y === DenseMatrix((17,22,27),(22,29,36),(27,36,45))); // should promote val z : DenseMatrix[Double] = b * (b + 1.0); assert(z === DenseMatrix((164,5,107),(-5,10,-27),(161,-7,138))); } test("Trace") { assert(DenseMatrix((1,2,3),(4,5,6)).trace === 1 + 5); assert(DenseMatrix((1,2),(3,4),(5,6)).trace == 1 + 4); assert(DenseMatrix((1,2,3),(4,5,6),(7,8,9)).trace === 1 + 5 + 9); } test("Reshape") { val m : DenseMatrix[Int] = DenseMatrix((1,2,3),(4,5,6)); val r : DenseMatrix[Int] = m.reshape(3,2); assert(m.data eq r.data); assert(r.numRows === 3); assert(r.numCols === 2); assert(r === DenseMatrix((1,5),(4,3),(2,6))); } test("Solve") { // square solve val r1 : DenseMatrix[Double] = DenseMatrix((1.0,3.0),(2.0,0.0)) \ DenseMatrix((1.0,2.0),(3.0,4.0)); assert(r1 === DenseMatrix((1.5, 2.0), (-1.0/6, 0.0))); // matrix-vector solve val r2 : DenseVector[Double] = DenseMatrix((1.0,3.0,4.0),(2.0,0.0,6.0)) \ DenseVector(1.0,3.0); assert(r2 === DenseVector(0.1813186813186811, -0.3131868131868131, 0.43956043956043944)); // wide matrix solve val r3 : DenseMatrix[Double] = DenseMatrix((1.0,3.0,4.0),(2.0,0.0,6.0)) \ DenseMatrix((1.0,2.0),(3.0,4.0)); assert(r3 === DenseMatrix((0.1813186813186811, 0.2197802197802196), (-0.3131868131868131, -0.1978021978021977), (0.43956043956043944, 0.5934065934065933))); // tall matrix solve val r4 : DenseMatrix[Double] = DenseMatrix((1.0,3.0),(2.0,0.0),(4.0,6.0)) \ DenseMatrix((1.0,4.0),(2.0,5.0),(3.0,6.0)); assert(r4 === DenseMatrix((0.9166666666666667, 1.9166666666666672), (-0.08333333333333352, -0.08333333333333436))); } }
scalala/Scalala
src/test/scala/scalala/tensor/dense/DenseMatrixTest.scala
Scala
lgpl-2.1
7,232
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.pacmin.graph /** * An observation inside of a quasitig. This is used for calling the sequence of * the finalized multig, and is generated via the transitive reduction of reads. * * @param alleles The allele strings observed at this site. * @likelihoods The likelihoods that each allele was observed. */ private[graph] case class ObservedSite(alleles: Array[String], likelihoods: Array[Double]) { assert(likelihoods.length == alleles.length, "Likelihood:allele mapping must be 1:1.") }
bigdatagenomics/PacMin
pacmin-core/src/main/scala/org/bdgenomics/pacmin/graph/ObservedSite.scala
Scala
apache-2.0
1,353
package com.codahale.jerkson.tests import com.codahale.jerkson.Json._ import java.io.ByteArrayInputStream import com.codahale.simplespec.Spec import org.junit.Test class StreamingSpec extends Spec { class `Parsing a stream of objects` { val json = """[ {"id":1, "name": "Coda"}, {"id":2, "name": "Niki"}, {"id":3, "name": "Biscuit"}, {"id":4, "name": "Louie"} ]""" @Test def `returns an iterator of stream elements` = { stream[CaseClass](new ByteArrayInputStream(json.getBytes)).toList .must(be(CaseClass(1, "Coda") :: CaseClass(2, "Niki") :: CaseClass(3, "Biscuit") :: CaseClass(4, "Louie") :: Nil)) } } }
cphylabs/jerkson-old
src/test/scala/com/codahale/jerkson/tests/StreamingSpec.scala
Scala
mit
686
package com.github.opengrabeso.mixtio package frontend.routing import io.udash._ import common.model._ import scala.scalajs.js.URIUtils class RoutingRegistryDef extends RoutingRegistry[RoutingState] { def matchUrl(url: Url): RoutingState = url2State("/" + url.value.stripPrefix("/").stripSuffix("/")) def matchState(state: RoutingState): Url = Url(state2Url(state)) object URIEncoded { def apply(s: String): String = URIUtils.encodeURIComponent(s) def unapply(s: String): Option[String] = Some(URIUtils.decodeURIComponent(s)) } object ? { def apply(prefix: String, s: Seq[FileId]) = { prefix + s.map(f => URIEncoded(f.toString)).mkString("?", "&", "") } def unapply(s: String): Option[(String, Seq[FileId])] = { val prefix = s.takeWhile(_ != '?') if (prefix.nonEmpty) { val rest = s.drop(prefix.length + 1) val parts = rest.split("&") Some((prefix, parts.flatMap(URIEncoded.unapply(_).map(FileId.parse)))) } else { None } } } private val (url2State, state2Url) = bidirectional { case "/" => SelectPageState case "/settings" => SettingsPageState case "/push" / session => PushPageState(session) case "/edit" ? s => EditPageState(s) } }
OndrejSpanel/Stravamat
frontend/src/main/scala/com/github/opengrabeso/mixtio/frontend/routing/RoutingRegistryDef.scala
Scala
gpl-2.0
1,261
def alpha[A]: (A => ?) ~> F
hmemcpy/milewski-ctfp-pdf
src/content/2.5/code/scala/snippet04.scala
Scala
gpl-3.0
27
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.concurrent.{Semaphore, TimeUnit} import java.util.concurrent.atomic.AtomicInteger import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.duration._ import org.scalatest.BeforeAndAfter import org.scalatest.matchers.must.Matchers import org.apache.spark.internal.config._ import org.apache.spark.internal.config.Deploy._ import org.apache.spark.scheduler.{SparkListener, SparkListenerStageCompleted, SparkListenerTaskEnd, SparkListenerTaskStart} import org.apache.spark.util.ThreadUtils /** * Test suite for cancelling running jobs. We run the cancellation tasks for single job action * (e.g. count) as well as multi-job action (e.g. take). We test the local and cluster schedulers * in both FIFO and fair scheduling modes. */ class JobCancellationSuite extends SparkFunSuite with Matchers with BeforeAndAfter with LocalSparkContext { override def afterEach(): Unit = { try { resetSparkContext() JobCancellationSuite.taskStartedSemaphore.drainPermits() JobCancellationSuite.taskCancelledSemaphore.drainPermits() JobCancellationSuite.twoJobsSharingStageSemaphore.drainPermits() JobCancellationSuite.executionOfInterruptibleCounter.set(0) } finally { super.afterEach() } } test("local mode, FIFO scheduler") { val conf = new SparkConf().set(SCHEDULER_MODE, "FIFO") sc = new SparkContext("local[2]", "test", conf) testCount() testTake() // Make sure we can still launch tasks. assert(sc.parallelize(1 to 10, 2).count === 10) } test("local mode, fair scheduler") { val conf = new SparkConf().set(SCHEDULER_MODE, "FAIR") val xmlPath = getClass.getClassLoader.getResource("fairscheduler.xml").getFile() conf.set(SCHEDULER_ALLOCATION_FILE, xmlPath) sc = new SparkContext("local[2]", "test", conf) testCount() testTake() // Make sure we can still launch tasks. assert(sc.parallelize(1 to 10, 2).count === 10) } test("cluster mode, FIFO scheduler") { val conf = new SparkConf().set(SCHEDULER_MODE, "FIFO") sc = new SparkContext("local-cluster[2,1,1024]", "test", conf) testCount() testTake() // Make sure we can still launch tasks. assert(sc.parallelize(1 to 10, 2).count === 10) } test("cluster mode, fair scheduler") { val conf = new SparkConf().set(SCHEDULER_MODE, "FAIR") val xmlPath = getClass.getClassLoader.getResource("fairscheduler.xml").getFile() conf.set(SCHEDULER_ALLOCATION_FILE, xmlPath) sc = new SparkContext("local-cluster[2,1,1024]", "test", conf) testCount() testTake() // Make sure we can still launch tasks. assert(sc.parallelize(1 to 10, 2).count === 10) } test("do not put partially executed partitions into cache") { // In this test case, we create a scenario in which a partition is only partially executed, // and make sure CacheManager does not put that partially executed partition into the // BlockManager. import JobCancellationSuite._ sc = new SparkContext("local", "test") // Run from 1 to 10, and then block and wait for the task to be killed. val rdd = sc.parallelize(1 to 1000, 2).map { x => if (x > 10) { taskStartedSemaphore.release() taskCancelledSemaphore.acquire() } x }.cache() val rdd1 = rdd.map(x => x) Future { taskStartedSemaphore.acquire() sc.cancelAllJobs() taskCancelledSemaphore.release(100000) } intercept[SparkException] { rdd1.count() } // If the partial block is put into cache, rdd.count() would return a number less than 1000. assert(rdd.count() === 1000) } test("job group") { sc = new SparkContext("local[2]", "test") // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) // jobA is the one to be cancelled. val jobA = Future { sc.setJobGroup("jobA", "this is a job to be cancelled") sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() } // Block until both tasks of job A have started and cancel job A. sem.acquire(2) sc.clearJobGroup() val jobB = sc.parallelize(1 to 100, 2).countAsync() sc.cancelJobGroup("jobA") val e = intercept[SparkException] { ThreadUtils.awaitResult(jobA, Duration.Inf) }.getCause assert(e.getMessage contains "cancel") // Once A is cancelled, job B should finish fairly quickly. assert(jobB.get() === 100) } test("inherited job group (SPARK-6629)") { sc = new SparkContext("local[2]", "test") // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) sc.setJobGroup("jobA", "this is a job to be cancelled") @volatile var exception: Exception = null val jobA = new Thread() { // The job group should be inherited by this thread override def run(): Unit = { exception = intercept[SparkException] { sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count() } } } jobA.start() // Block until both tasks of job A have started and cancel job A. sem.acquire(2) sc.cancelJobGroup("jobA") jobA.join(10000) assert(!jobA.isAlive) assert(exception.getMessage contains "cancel") // Once A is cancelled, job B should finish fairly quickly. val jobB = sc.parallelize(1 to 100, 2).countAsync() assert(jobB.get() === 100) } test("job group with interruption") { sc = new SparkContext("local[2]", "test") // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) // jobA is the one to be cancelled. val jobA = Future { sc.setJobGroup("jobA", "this is a job to be cancelled", interruptOnCancel = true) sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(100000); i }.count() } // Block until both tasks of job A have started and cancel job A. sem.acquire(2) sc.clearJobGroup() val jobB = sc.parallelize(1 to 100, 2).countAsync() sc.cancelJobGroup("jobA") val e = intercept[SparkException] { ThreadUtils.awaitResult(jobA, 5.seconds) }.getCause assert(e.getMessage contains "cancel") // Once A is cancelled, job B should finish fairly quickly. assert(jobB.get() === 100) } test("task reaper kills JVM if killed tasks keep running for too long") { val conf = new SparkConf() .set(TASK_REAPER_ENABLED, true) .set(TASK_REAPER_KILL_TIMEOUT.key, "5s") sc = new SparkContext("local-cluster[2,1,1024]", "test", conf) // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) // jobA is the one to be cancelled. val jobA = Future { sc.setJobGroup("jobA", "this is a job to be cancelled", interruptOnCancel = true) sc.parallelize(1 to 10000, 2).map { i => while (true) { } }.count() } // Block until both tasks of job A have started and cancel job A. sem.acquire(2) // Small delay to ensure tasks actually start executing the task body Thread.sleep(1000) sc.clearJobGroup() val jobB = sc.parallelize(1 to 100, 2).countAsync() sc.cancelJobGroup("jobA") val e = intercept[SparkException] { ThreadUtils.awaitResult(jobA, 15.seconds) }.getCause assert(e.getMessage contains "cancel") // Once A is cancelled, job B should finish fairly quickly. assert(ThreadUtils.awaitResult(jobB, 1.minute) === 100) } test("task reaper will not kill JVM if spark.task.killTimeout == -1") { val conf = new SparkConf() .set(TASK_REAPER_ENABLED, true) .set(TASK_REAPER_KILL_TIMEOUT.key, "-1") .set(TASK_REAPER_POLLING_INTERVAL.key, "1s") .set(MAX_EXECUTOR_RETRIES, 1) sc = new SparkContext("local-cluster[2,1,1024]", "test", conf) // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) // jobA is the one to be cancelled. val jobA = Future { sc.setJobGroup("jobA", "this is a job to be cancelled", interruptOnCancel = true) sc.parallelize(1 to 2, 2).map { i => val startTimeNs = System.nanoTime() while (System.nanoTime() < startTimeNs + TimeUnit.SECONDS.toNanos(10)) { } }.count() } // Block until both tasks of job A have started and cancel job A. sem.acquire(2) // Small delay to ensure tasks actually start executing the task body Thread.sleep(1000) sc.clearJobGroup() val jobB = sc.parallelize(1 to 100, 2).countAsync() sc.cancelJobGroup("jobA") val e = intercept[SparkException] { ThreadUtils.awaitResult(jobA, 15.seconds) }.getCause assert(e.getMessage contains "cancel") // Once A is cancelled, job B should finish fairly quickly. assert(ThreadUtils.awaitResult(jobB, 1.minute) === 100) } test("two jobs sharing the same stage") { // sem1: make sure cancel is issued after some tasks are launched // twoJobsSharingStageSemaphore: // make sure the first stage is not finished until cancel is issued val sem1 = new Semaphore(0) sc = new SparkContext("local[2]", "test") sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem1.release() } }) // Create two actions that would share the some stages. val rdd = sc.parallelize(1 to 10, 2).map { i => JobCancellationSuite.twoJobsSharingStageSemaphore.acquire() (i, i) }.reduceByKey(_ + _) val f1 = rdd.collectAsync() val f2 = rdd.countAsync() // Kill one of the action. Future { sem1.acquire() f1.cancel() JobCancellationSuite.twoJobsSharingStageSemaphore.release(10) } // Expect f1 to fail due to cancellation, intercept[SparkException] { f1.get() } // but f2 should not be affected f2.get() } test("interruptible iterator of shuffle reader") { // In this test case, we create a Spark job of two stages. The second stage is cancelled during // execution and a counter is used to make sure that the corresponding tasks are indeed // cancelled. import JobCancellationSuite._ sc = new SparkContext("local[2]", "test interruptible iterator") // Increase the number of elements to be proceeded to avoid this test being flaky. val numElements = 10000 val taskCompletedSem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { // release taskCancelledSemaphore when cancelTasks event has been posted if (stageCompleted.stageInfo.stageId == 1) { taskCancelledSemaphore.release(numElements) } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { if (taskEnd.stageId == 1) { // make sure tasks are completed taskCompletedSem.release() } } }) // Explicitly disable interrupt task thread on cancelling tasks, so the task thread can only be // interrupted by `InterruptibleIterator`. sc.setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, "false") val f = sc.parallelize(1 to numElements).map { i => (i, i) } .repartitionAndSortWithinPartitions(new HashPartitioner(1)) .mapPartitions { iter => taskStartedSemaphore.release() iter }.foreachAsync { x => // Block this code from being executed, until the job get cancelled. In this case, if the // source iterator is interruptible, the max number of increment should be under // `numElements`. We sleep a little to make sure that we leave enough time for the // "kill" message to be delivered to the executor (10000 * 10ms = 100s allowance for // delivery, which should be more than enough). Thread.sleep(10) taskCancelledSemaphore.acquire() executionOfInterruptibleCounter.getAndIncrement() } taskStartedSemaphore.acquire() // Job is cancelled when: // 1. task in reduce stage has been started, guaranteed by previous line. // 2. task in reduce stage is blocked as taskCancelledSemaphore is not released until // JobCancelled event is posted. // After job being cancelled, task in reduce stage will be cancelled asynchronously, thus // partial of the inputs should not get processed (It's very unlikely that Spark can process // 10000 elements between JobCancelled is posted and task is really killed). f.cancel() val e = intercept[SparkException](f.get()).getCause assert(e.getMessage.contains("cancelled") || e.getMessage.contains("killed")) // Make sure tasks are indeed completed. taskCompletedSem.acquire() assert(executionOfInterruptibleCounter.get() < numElements) } def testCount(): Unit = { // Cancel before launching any tasks { val f = sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.countAsync() Future { f.cancel() } val e = intercept[SparkException] { f.get() }.getCause assert(e.getMessage.contains("cancelled") || e.getMessage.contains("killed")) } // Cancel after some tasks have been launched { // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) val f = sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.countAsync() Future { // Wait until some tasks were launched before we cancel the job. sem.acquire() f.cancel() } val e = intercept[SparkException] { f.get() }.getCause assert(e.getMessage.contains("cancelled") || e.getMessage.contains("killed")) } } def testTake(): Unit = { // Cancel before launching any tasks { val f = sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.takeAsync(5000) Future { f.cancel() } val e = intercept[SparkException] { f.get() }.getCause assert(e.getMessage.contains("cancelled") || e.getMessage.contains("killed")) } // Cancel after some tasks have been launched { // Add a listener to release the semaphore once any tasks are launched. val sem = new Semaphore(0) sc.addSparkListener(new SparkListener { override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { sem.release() } }) val f = sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.takeAsync(5000) Future { sem.acquire() f.cancel() } val e = intercept[SparkException] { f.get() }.getCause assert(e.getMessage.contains("cancelled") || e.getMessage.contains("killed")) } } } object JobCancellationSuite { // To avoid any headaches, reset these global variables in the companion class's afterEach block val taskStartedSemaphore = new Semaphore(0) val taskCancelledSemaphore = new Semaphore(0) val twoJobsSharingStageSemaphore = new Semaphore(0) val executionOfInterruptibleCounter = new AtomicInteger(0) }
maropu/spark
core/src/test/scala/org/apache/spark/JobCancellationSuite.scala
Scala
apache-2.0
16,892