code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package org.jetbrains.plugins.scala.lang.parser.parsing import org.jetbrains.plugins.scala.lang.parser.ErrMsg import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder /* * Program parses all content in scala source file */ object Program extends Program { override protected def compilationUnit = CompilationUnit } trait Program { protected def compilationUnit: CompilationUnit def parse(builder: ScalaPsiBuilder): Int = { var parseState = 0 if ( !builder.eof() ){ parseState = compilationUnit.parse(builder) } if (!builder.eof()) { while (!builder.eof()) { builder error ErrMsg("out.of.compilation.unit") builder.advanceLexer() } } return parseState } }
gtache/intellij-lsp
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/parser/parsing/Program.scala
Scala
apache-2.0
752
// Webside Source Code Zauberstuhl.de // Copyright (C) 2016-2019 Lukas Matt <[email protected]> // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // package objects object Provider { trait Provider { def name: String = getClass.getName } case class StripeProvider() extends Provider case class BlockChainProvider() extends Provider case class EmailProvider() extends Provider }
Zauberstuhl/zauberstuhl
app/objects/Provider.scala
Scala
gpl-3.0
972
/* * Created on 2010/11/07 * Copyright (c) 2010-2011, Wei-ju Wu. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * Neither the name of Wei-ju Wu nor the names of its contributors may * be used to endorse or promote products derived from this software without * specific prior written permission. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ package org.zmpp.tads3 import org.scalatest.FlatSpec import org.scalatest.matchers.ShouldMatchers import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner import org.scalatest.BeforeAndAfterEach @RunWith(classOf[JUnitRunner]) class TadsObjectIdSpec extends FlatSpec with ShouldMatchers { "T3ObjectId" should "be equal" in { val objId42 = T3ObjectId(42) val objId43 = T3ObjectId(43) val objId42too = T3ObjectId(42) val objId42three = T3Value.create(TypeIds.VmObj, 42) val int42 = T3Value.create(TypeIds.VmInt, 42) objId42 should equal (objId42) objId42 should equal (objId42too) objId42too should equal (objId42) objId42 should equal (objId42three) objId42three should equal (objId42) objId42 should not equal (objId43) objId43 should not equal (objId42) int42 should not equal (objId42) objId42 should not equal (int42) } } @RunWith(classOf[JUnitRunner]) class ObjectSystemSpec extends FlatSpec with ShouldMatchers { "ObjectSystem" should "be initialized" in { val objectSystem = new ObjectSystem val id1 = objectSystem.newObjectId id1.value should equal (1) id1.valueType should equal (TypeIds.VmObj) } } @RunWith(classOf[JUnitRunner]) class TadsObjectSpec extends FlatSpec with ShouldMatchers with BeforeAndAfterEach { var objectSystem : ObjectSystem = null var functionSetMapper : IntrinsicFunctionSetMapper = null var vmState : TadsVMState = null override def beforeEach { objectSystem = new ObjectSystem functionSetMapper = new IntrinsicFunctionSetMapper vmState = new TadsVMState(objectSystem, functionSetMapper) } "TadsObject" should "be created" in { val obj = new TadsObject(T3ObjectId(1), vmState, false, 0, 0, false) obj.metaClass.name should equal ("tads-object") } it should "get non-existing" in { val obj = new TadsObject(T3ObjectId(1), vmState, false, 0, 0, false) obj.getProperty(2831, 0) should equal (InvalidProperty) obj.numProperties should equal (0) } it should "set non-existing" in { val obj = new TadsObject(T3ObjectId(1), vmState, false, 0, 0, false) val testVal = T3Integer(4711) obj.setProperty(2831, testVal) obj.numProperties should equal (1) obj.getProperty(2831, 0).tadsValue should equal (testVal) } it should "overwrite existing" in { val obj = new TadsObject(T3ObjectId(1), vmState, false, 0, 0, false) val testVal1 = T3Integer(4711) val testVal2 = T3Integer(4712) obj.setProperty(2831, testVal1) obj.setProperty(2831, testVal2) obj.numProperties should equal (1) obj.getProperty(2831, 0).tadsValue should equal (testVal2) } }
logicmoo/zmpp2
zmpp-tads3/src/test/scala/org/zmpp/tads3/ObjectModelTest.scala
Scala
bsd-3-clause
4,355
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams.scala.kstream import org.apache.kafka.common.serialization.Serde import org.apache.kafka.streams.kstream.{StreamJoined => StreamJoinedJ} import org.apache.kafka.streams.state.WindowBytesStoreSupplier object StreamJoined { /** * Create an instance of [[StreamJoined]] with key, value, and otherValue [[Serde]] * instances. * `null` values are accepted and will be replaced by the default serdes as defined in config. * * @tparam K key type * @tparam V value type * @tparam VO other value type * @param keySerde the key serde to use. * @param valueSerde the value serde to use. * @param otherValueSerde the otherValue serde to use. If `null` the default value serde from config will be used * @return new [[StreamJoined]] instance with the provided serdes */ def `with`[K, V, VO](implicit keySerde: Serde[K], valueSerde: Serde[V], otherValueSerde: Serde[VO] ): StreamJoinedJ[K, V, VO] = StreamJoinedJ.`with`(keySerde, valueSerde, otherValueSerde) /** * Create an instance of [[StreamJoined]] with store suppliers for the calling stream * and the other stream. Also adds the key, value, and otherValue [[Serde]] * instances. * `null` values are accepted and will be replaced by the default serdes as defined in config. * * @tparam K key type * @tparam V value type * @tparam VO other value type * @param supplier store supplier to use * @param otherSupplier other store supplier to use * @param keySerde the key serde to use. * @param valueSerde the value serde to use. * @param otherValueSerde the otherValue serde to use. If `null` the default value serde from config will be used * @return new [[StreamJoined]] instance with the provided store suppliers and serdes */ def `with`[K, V, VO]( supplier: WindowBytesStoreSupplier, otherSupplier: WindowBytesStoreSupplier )(implicit keySerde: Serde[K], valueSerde: Serde[V], otherValueSerde: Serde[VO]): StreamJoinedJ[K, V, VO] = StreamJoinedJ .`with`(supplier, otherSupplier) .withKeySerde(keySerde) .withValueSerde(valueSerde) .withOtherValueSerde(otherValueSerde) /** * Create an instance of [[StreamJoined]] with the name used for naming * the state stores involved in the join. Also adds the key, value, and otherValue [[Serde]] * instances. * `null` values are accepted and will be replaced by the default serdes as defined in config. * * @tparam K key type * @tparam V value type * @tparam VO other value type * @param storeName the name to use as a base name for the state stores of the join * @param keySerde the key serde to use. * @param valueSerde the value serde to use. * @param otherValueSerde the otherValue serde to use. If `null` the default value serde from config will be used * @return new [[StreamJoined]] instance with the provided store suppliers and serdes */ def as[K, V, VO]( storeName: String )(implicit keySerde: Serde[K], valueSerde: Serde[V], otherValueSerde: Serde[VO]): StreamJoinedJ[K, V, VO] = StreamJoinedJ.as(storeName).withKeySerde(keySerde).withValueSerde(valueSerde).withOtherValueSerde(otherValueSerde) }
TiVo/kafka
streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/kstream/StreamJoined.scala
Scala
apache-2.0
4,112
import sbt._ object Dependencies { // Compile dependencies // format: OFF private def scalaReflect(version: String) = "org.scala-lang" % "scala-reflect" % version private def scalaCompiler(version: String) = ("org.scala-lang" % "scala-compiler" % version) .exclude("org.jline", "jline") private val scalaSwing = "org.scala-lang.modules" %% "scala-swing" % "3.0.0" private val scalaParserCombinators = "org.scala-lang.modules" %% "scala-parser-combinators" % "2.1.1" private val netty = "io.netty" % "netty-codec-http" % "4.1.74.Final" private val nettyBuffer = netty.organization % "netty-buffer" % netty.revision private val nettyHandler = netty.organization % "netty-handler" % netty.revision private val nettyMqtt = netty.organization % "netty-codec-mqtt" % netty.revision private val nettyProxy = netty.organization % "netty-handler-proxy" % netty.revision private val nettyDns = netty.organization % "netty-resolver-dns" % netty.revision private val nettyEpollLinuxX86 = netty.organization % "netty-transport-native-epoll" % netty.revision classifier "linux-x86_64" private val nettyEpollLinuxArm = netty.organization % "netty-transport-native-epoll" % netty.revision classifier "linux-aarch_64" private val nettyHttp2 = netty.organization % "netty-codec-http2" % netty.revision private val nettyTcNative = netty.organization % "netty-tcnative-classes" % "2.0.50.Final" private val nettyTcNativeBoringSsl = nettyTcNative.withName("netty-tcnative-boringssl-static") private val brotli4j = "com.aayushatharva.brotli4j" % "brotli4j" % "1.6.0" private val brotli4jMacOs = brotli4j.withName("native-osx-x86_64") private val brotli4jLinuxX86 = brotli4j.withName("native-linux-x86_64") private val brotli4jLinuxArm = brotli4j.withName("native-linux-aarch64") private val brotli4jWindows = brotli4j.withName("native-windows-x86_64") private val akka = "com.typesafe.akka" %% "akka-actor" % "2.6.18" private val akkaSlf4j = akka.organization %% "akka-slf4j" % akka.revision private val config = "com.typesafe" % "config" % "1.4.2" private val saxon = "net.sf.saxon" % "Saxon-HE" % "10.6" private val slf4jApi = "org.slf4j" % "slf4j-api" % "1.7.36" private val spire = ("org.typelevel" %% "spire-macros" % "0.17.0") .exclude("org.typelevel", "machinist_2.13") .exclude("org.typelevel", "algebra_2.13") .exclude("org.scala-lang.modules", "scala-collection-compat_2.13") private val scopt = "com.github.scopt" %% "scopt" % "3.7.1" private val scalaLogging = "com.typesafe.scala-logging" %% "scala-logging" % "3.9.4" private val jackson = "com.fasterxml.jackson.core" % "jackson-databind" % "2.13.2" private val sfm = ("org.simpleflatmapper" % "lightning-csv" % "8.2.3") .exclude("org.simpleflatmapper", "ow2-asm") private val lagarto = "org.jodd" % "jodd-lagarto" % "6.0.5" private val jmespath = "io.burt" % "jmespath-jackson" % "0.5.1" private val boopickle = "io.suzaku" %% "boopickle" % "1.3.3" private val redisClient = "net.debasishg" %% "redisclient" % "3.42" private val zinc = ("org.scala-sbt" %% "zinc" % "1.6.1") .exclude("org.jline", "jline") .exclude("org.scala-sbt.jline3", "jline-terminal") .exclude("org.jline", "jline-terminal-jna") .exclude("org.jline", "jline-terminal-jansi") .exclude("org.scala-sbt.jline", "jline") .exclude("org.scala-lang.modules", "scala-parser-combinators_2.13") .exclude("org.scala-lang.modules", "scala-xml_2.13") .exclude("org.scala-sbt", "launcher-interface") .exclude("org.scala-sbt", "sbinary_2.13") .exclude("org.scala-sbt", "zinc-ivy-integration_2.13") .exclude("com.eed3si9n", "sjson-new-core_2.13") .exclude("com.eed3si9n", "sjson-new-scalajson_2.13") .exclude("com.lihaoyi", "fastparse_2.13") .exclude("com.lmax", "disruptor") .exclude("org.apache.logging.log4j", "log4j-api") .exclude("org.apache.logging.log4j", "log4j-core") private val compilerBridge = zinc.organization %% "compiler-bridge" % zinc.revision private val testInterface = zinc.organization % "test-interface" % "1.0" private val jmsApi = "javax.jms" % "javax.jms-api" % "2.0.1" private val logback = "ch.qos.logback" % "logback-classic" % "1.2.11" private val tdigest = "com.tdunning" % "t-digest" % "3.1" private val hdrHistogram = "org.hdrhistogram" % "HdrHistogram" % "2.1.12" private val caffeine = "com.github.ben-manes.caffeine" % "caffeine" % "2.9.3" private val bouncyCastle = "io.gatling" % "gatling-recorder-bc-shaded" % "1.70" private val quicklens = "com.softwaremill.quicklens" %% "quicklens" % "1.8.3" private val fastUuid = "com.eatthepath" % "fast-uuid" % "0.2.0" private val pebble = "io.pebbletemplates" % "pebble" % "3.1.5" private val jsr305 = "com.google.code.findbugs" % "jsr305" % "3.0.2" private val typetools = "net.jodah" % "typetools" % "0.6.3" // Test dependencies private val scalaTest = "org.scalatest" %% "scalatest" % "3.2.11" % Test private val scalaTestScalacheck = "org.scalatestplus" %% "scalacheck-1-15" % "3.2.11.0" % Test private val scalaTestMockito = scalaTestScalacheck.organization %% "mockito-3-4" % "3.2.10.0" % Test private val scalaCheck = "org.scalacheck" %% "scalacheck" % "1.15.4" % Test private val akkaTestKit = akka.organization %% "akka-testkit" % akka.revision % Test private val mockitoCore = "org.mockito" % "mockito-core" % "4.3.1" % Test private val activemqBroker = ("org.apache.activemq" % "activemq-broker" % "5.16.4" % Test) .exclude("org.apache.geronimo.specs", "geronimo-jms_1.1_spec") private val h2 = "com.h2database" % "h2" % "2.1.210" % Test private val jmh = "org.openjdk.jmh" % "jmh-core" % "1.27" private val junit = "org.junit.jupiter" % "junit-jupiter-api" % "5.8.2" % Test private val junitEngine = junit.organization % "junit-jupiter-engine" % junit.revision % Test private val jupiterInterface = "net.aichler" % "jupiter-interface" % "0.9.1" % Test private val jetty = "org.eclipse.jetty" % "jetty-server" % "9.4.45.v20220203" % Test private val jettyProxy = jetty.organization % "jetty-proxy" % jetty.revision % Test // Docs dependencies private val commonsIo = "commons-io" % "commons-io" % "2.11.0" private val commonsLang = "org.apache.commons" % "commons-lang3" % "3.12.0" private val commonsCodec = "commons-codec" % "commons-codec" % "1.15" // format: ON private val loggingDeps = Seq(slf4jApi, scalaLogging, logback) private val testDeps = Seq( scalaTest, scalaTestScalacheck, scalaTestMockito, scalaCheck, akkaTestKit, mockitoCore ) private val parserDeps = Seq(jackson, saxon, lagarto, jmespath) // Dependencies by module val docSamplesDependencies = Seq(commonsIo, commonsLang, commonsCodec) val nettyUtilDependencies = Seq(nettyBuffer, nettyEpollLinuxX86, nettyEpollLinuxArm, junit, junitEngine, jupiterInterface) def commonsSharedDependencies(scalaVersion: String) = Seq(scalaReflect(scalaVersion), boopickle) ++ testDeps val commonsSharedUnstableDependencies = testDeps val commonsDependencies = Seq(config, spire) ++ loggingDeps ++ testDeps val jsonpathDependencies = Seq(scalaParserCombinators, jackson) ++ testDeps val coreDependencies = Seq( akka, akkaSlf4j, sfm, caffeine, pebble, scalaParserCombinators, scopt, nettyHandler, nettyTcNative, quicklens ) ++ parserDeps ++ testDeps val defaultJavaDependencies = Seq(jsr305, junit, junitEngine, jupiterInterface) ++ testDeps val coreJavaDependencies = Seq(typetools) ++ defaultJavaDependencies val redisDependencies = redisClient +: testDeps val httpClientDependencies = Seq( netty, nettyBuffer, nettyHandler, nettyProxy, nettyDns, nettyEpollLinuxX86, nettyEpollLinuxArm, nettyHttp2, nettyTcNative, nettyTcNativeBoringSsl, brotli4j, brotli4jLinuxX86, brotli4jLinuxArm, brotli4jMacOs, brotli4jWindows, junit, junitEngine, jupiterInterface, jetty, jettyProxy ) ++ loggingDeps val httpDependencies = Seq(saxon) ++ testDeps val jmsDependencies = Seq(jmsApi, fastUuid, activemqBroker) ++ testDeps val jdbcDependencies = h2 +: testDeps val mqttDependencies = Seq(nettyHandler, nettyTcNative, nettyMqtt, nettyEpollLinuxX86, nettyEpollLinuxArm) val chartsDependencies = tdigest +: testDeps val graphiteDependencies = hdrHistogram +: testDeps val benchmarkDependencies = Seq(jmh) def compilerDependencies(scalaVersion: String) = Seq( scalaCompiler(scalaVersion), scalaReflect(scalaVersion), config, slf4jApi, logback, zinc, compilerBridge, scopt ) val recorderDependencies = Seq(scalaSwing, jackson, bouncyCastle, netty, akka) ++ testDeps val testFrameworkDependencies = Seq(testInterface) val docDependencies = Seq(activemqBroker) }
gatling/gatling
project/Dependencies.scala
Scala
apache-2.0
12,608
/* * Copyright 2015 the original author or authors. * @https://github.com/scouter-project/scouter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scouter.server.tagcnt.next; import java.io.IOException; import scouter.util.IClose; class WorkDB(file: String) extends IClose { var lastActive = 0L var table: IndexFile = null var logDate = "" var objType = "" def open() { this.table = IndexFile.open(file + "/next") } def close() { this.table.close(); } }
yuyupapa/OpenSource
scouter.server/src/scouter/server/tagcnt/next/WorkDB.scala
Scala
apache-2.0
1,083
/* * Copyright 2022 Typelevel * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.typelevel.sbt.kernel import scala.util.Try private[sbt] final case class V( major: Int, minor: Int, patch: Option[Int], prerelease: Option[String] ) extends Ordered[V] { override def toString: String = s"$major.$minor${patch.fold("")(p => s".$p")}${prerelease.fold("")(p => s"-$p")}" def isPrerelease: Boolean = prerelease.nonEmpty def isSameSeries(that: V): Boolean = this.major == that.major && this.minor == that.minor def mustBeBinCompatWith(that: V): Boolean = this >= that && !that.isPrerelease && this.major == that.major && (major > 0 || this.minor == that.minor) def compare(that: V): Int = { val x = this.major.compare(that.major) if (x != 0) return x val y = this.minor.compare(that.minor) if (y != 0) return y (this.patch, that.patch) match { case (None, None) => 0 case (None, Some(patch)) => 1 case (Some(patch), None) => -1 case (Some(thisPatch), Some(thatPatch)) => val z = thisPatch.compare(thatPatch) if (z != 0) return z (this.prerelease, that.prerelease) match { case (None, None) => 0 case (Some(_), None) => 1 case (None, Some(_)) => -1 case (Some(thisPrerelease), Some(thatPrerelease)) => // TODO not great, but not everyone uses Ms and RCs thisPrerelease.compare(thatPrerelease) } } } } private[sbt] object V { val version = """^(0|[1-9]\d*)\.(0|[1-9]\d*)(?:\.(0|[1-9]\d*))?(?:-(.+))?$""".r def apply(v: String): Option[V] = V.unapply(v) def unapply(v: String): Option[V] = v match { case version(major, minor, patch, prerelease) => Try(V(major.toInt, minor.toInt, Option(patch).map(_.toInt), Option(prerelease))).toOption case _ => None } object Tag { def unapply(v: String): Option[V] = if (v.startsWith("v")) V.unapply(v.substring(1)) else None } }
typelevel/sbt-typelevel
kernel/src/main/scala/org/typelevel/sbt/kernel/V.scala
Scala
apache-2.0
2,514
package com.karasiq.mapdb.index import com.karasiq.mapdb.MapDbConversions._ import org.mapdb.{Bind, DB} import scala.collection.JavaConversions._ import scala.collection.immutable.AbstractMap object MapDbIndex { type JavaMap[K, V] = java.util.Map[K, V] object IndexMaps { def heapHashMap[K, V](): JavaMap[K, V] = { new java.util.concurrent.ConcurrentHashMap[K, V]() } def heapTreeMap[K, V](): JavaMap[K, V] = { new java.util.concurrent.ConcurrentSkipListMap[K, V]() } def mapDbHashMap[K, V](db: DB, name: String): JavaMap[K, V] = { db.hashMap(name) } def mapDbTreeMap[K, V](db: DB, name: String): JavaMap[K, V] = { db.treeMap(name) } } private final class WrappedIndexMap[K, V](secondaryMap: JavaMap[K, V]) extends AbstractMap[K, V] { override def get(key: K): Option[V] = { Option(secondaryMap.get(key)) } override def iterator: Iterator[(K, V)] = { secondaryMap.keySet().toIterator.collect { case key if secondaryMap.containsKey(key) ⇒ key → secondaryMap.get(key) } } private def exception(): Nothing = throw new IllegalArgumentException("Couldn't modify index map") override def +[B1 >: V](kv: (K, B1)): Map[K, B1] = exception() override def -(key: K): Map[K, V] = exception() } def secondaryKey[K, V, SK](map: Bind.MapWithModificationListener[K, V], function: (K, V) ⇒ SK, secondaryMap: JavaMap[SK, K] = IndexMaps.heapHashMap()): Map[SK, K] = { Bind.secondaryKey(map, secondaryMap, function) new WrappedIndexMap(secondaryMap) } def secondaryValue[K, V, SV](map: Bind.MapWithModificationListener[K, V], function: (K, V) ⇒ SV, secondaryMap: JavaMap[K, SV] = IndexMaps.heapHashMap()): Map[K, SV] = { Bind.secondaryValue(map, secondaryMap, function) new WrappedIndexMap(secondaryMap) } }
Karasiq/mapdbutils
src/main/scala/com/karasiq/mapdb/index/MapDbIndex.scala
Scala
mit
1,866
package colang.ast.parsed import colang.SourceCode import colang.ast.raw import colang.issues.Issue /** * Represents a compiler component that performs semantic analysis of the code, establishes symbol references and * does the last issue check. */ trait Analyzer { /** * Performs the code analysis. * @param symbolDefs raw top-level symbol definitions * @param eof source code fragment pointing to the end of source CO file * @return (populated root namespace, found issues) */ def analyze(symbolDefs: Seq[raw.GlobalSymbolDefinition], eof: SourceCode): (RootNamespace, Seq[Issue]) } /** * Actual analyzer implementation. * Analysis is performed step-by-step, and so is mutable by nature. * The usual pattern is to first "register" a new symbol in its container, without actually analyzing anything besides * its name and type. This allows to have non-trivial forward references, and in some cases circular dependencies * between symbols. After registering all symbols, full analysis is performed, possibly in several stages for the * same reason as above. * Sometimes a symbol can't be registered because of a name conflict. In this case, the created symbol is kept and * further analysis is done as usual, but the container won't be aware of the symbol: other symbols won't be able to * reference it. Symbol in this state is called "detached". */ class AnalyzerImpl extends Analyzer { def analyze(symbolDefs: Seq[raw.GlobalSymbolDefinition], eof: SourceCode): (RootNamespace, Seq[Issue]) = { val typeDefs = (symbolDefs filter { _.isInstanceOf[raw.TypeDefinition] }). asInstanceOf[Seq[raw.TypeDefinition]] val funcDefs = (symbolDefs filter { _.isInstanceOf[raw.FunctionDefinition] }). asInstanceOf[Seq[raw.FunctionDefinition]] val varDefs = (symbolDefs filter { _.isInstanceOf[raw.statement.VariablesDefinition] }). asInstanceOf[Seq[raw.statement.VariablesDefinition]] val rootNamespace = new RootNamespace() val (types, typesIssues) = routines.registerTypes(rootNamespace, typeDefs) val (functions, functionsIssues) = routines.registerFunctions(rootNamespace, funcDefs) val emptyTypeIssues = routines.checkTypesWithoutBody(types) val (methods, methodsIssues) = routines.registerMethods(types) val (variables, globalVarInitStatements, varIssues) = routines.registerGlobalVariables(rootNamespace, varDefs) val funcBodiesIssues = routines.analyzeFunctionBodies(functions) val mainFuncIssues = routines.processMainFunction(rootNamespace, globalVarInitStatements, eof) val returnIssues = routines.checkReturnStatements(functions) val issues = typesIssues ++ functionsIssues ++ emptyTypeIssues ++ methodsIssues ++ varIssues ++ funcBodiesIssues ++ mainFuncIssues ++ returnIssues (rootNamespace, issues) } }
merkispavel/colang
src/main/scala/colang/ast/parsed/Analyzer.scala
Scala
mit
2,844
import scala.util.matching.Regex object Problema1 { def main(args: Array[String]) { val source = scala.io.Source.fromFile("A-large-practice.in") val lines = source.getLines.filter(_.length > 0) val ldn = lines.next.split(" ") val D = ldn(1).toInt val N = ldn(2).toInt var palabras = new Array[String](D) var patterns = new Array[String](N) for(i <- 0 until D){ palabras(i) = lines.next } for(i <- 0 until N){ patterns(i) = lines.next } patterns = patterns.map(_.replace("(", "[")).map(_.replace(")", "]")) for(i <- 0 until N){ val cont = Stream.iterate(0)(_ + 1).iterator for(j <- 0 until D){ if(palabras(j).matches(patterns(i))){ cont.next } } println("Case #"+(i+1)+": "+(cont.next)) } } }
Nearsoft/google-code-jam
solutions/alien-language/scala/approach4.scala
Scala
mit
916
package com.sksamuel.elastic4s.locks trait LocksApi { def acquireGlobalLock() = AcquireGlobalLockDefinition() def releaseGlobalLock() = ReleaseGlobalLockDefinition() } case class AcquireGlobalLockDefinition() case class ReleaseGlobalLockDefinition()
FabienPennequin/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/locks/LocksApi.scala
Scala
apache-2.0
256
/*** * Excerpted from "Seven Concurrency Models in Seven Weeks", * published by The Pragmatic Bookshelf. * Copyrights apply to this code. It may not be used to create training material, * courses, books, articles, and the like. Contact us if you are in doubt. * We make no guarantees that this code is fit for any purpose. * Visit http://www.pragmaticprogrammer.com/titles/pb7con for more book information. ***/ package com.paulbutcher import akka.actor._ import collection.mutable.HashMap case class ParserAvailable(parser: ActorRef) case class Batch(id: Int, pages: Seq[Page], accumulator: ActorRef) class Counter extends Actor { def receive = { case ParserAvailable(parser) => parser ! RequestBatch case Batch(id, pages, accumulator) => sender ! RequestBatch val counts = HashMap[String, Int]().withDefaultValue(0) for (page <- pages) for (word <- Words(page.text)) counts(word) += 1 accumulator ! Counts(id, counts) } }
XBOOS/concurrency
code/ActorsScala/WordCountFaultTolerant/src/main/scala/com/paulbutcher/Counter.scala
Scala
gpl-2.0
991
/** * Copyright (c) 2012-2013, Tomasz Kaczmarzyk. * * This file is part of BeanDiff. * * BeanDiff is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 3 of the License, or * (at your option) any later version. * * BeanDiff is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with BeanDiff; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ package org.beandiff.core import org.beandiff.core.model.DeepDiff import org.beandiff.core.model.Diff import org.beandiff.core.model.Diff import org.beandiff.core.model.FlatDiff import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.Path import org.beandiff.core.model.Path.EmptyPath import org.beandiff.core.model.PathChangeOrdering import org.beandiff.core.model.Property import org.beandiff.core.model.change.Change import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.NewValue import org.beandiff.core.model.change.NewValue import org.beandiff.core.model.change.Shift import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Insertion import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.change.Deletion import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.IndexProperty import org.beandiff.core.model.Self class LcsResultOptimizer( parent: DiffEngineCoordinator, lcsEngine: LcsDiffEngine) extends DiffEngine { def calculateDiff(o1: Any, o2: Any) = { val diff = lcsEngine.calculateDiff(o1, o2) diff.changes(EmptyPath) match { case None => diff case Some(_) => optimize(diff) } } private def optimize(diff: Diff): Diff = { var result = diff var skip = List[Change]() val selfChanges = diff.changes(EmptyPath).get.leafChanges.sorted(PathChangeOrdering) for { // FIXME it changed into scary crap, refactor! (path, change1) <- selfChanges (path, change2) <- selfChanges if !(skip.contains(change1) || skip.contains(change2)) } { (change1, change2) match { case (del @ Deletion(x, idx), ins @ Insertion(y, idx2)) if idx == idx2 && isInPlace(path, del, ins)(selfChanges) => { // TODO high complexity, factor out some stuff result = result.without(path, change1).without(path, change2) skip = change1 :: change2 :: skip if (lcsEngine.objTypes(x, y).allowedToDiff(x, y)) { // FIXME what if y is entity here? result = parent.calculateDiff(result, change1.targetProperty, change1.oldValue.get, change2.newValue.get) } else { result = result.withChange(path, NewValue(IndexProperty(idx), x, y)) } } case (Deletion(x, idx), Insertion(y, idx2)) if lcsEngine.objTypes(x, y).areEqual(x, y) && !selfChanges.exists(shiftBlocker(selfChanges)((path, change1), (path, change2))) => { result = result.without(path, change1).without(path, change2) // TODO add without(path, changes*) result = result.withChange(path, Shift(x, idx, idx2)) result = parent.calculateDiff(result, change1.targetProperty, x, y) skip = change1 :: change2 :: skip } case _ => {} } } result } // FIXME all of the order-checking should be simplified (computational and code wise) private def isInPlace(path: Path, del: Deletion, ins: Insertion)(allChanges: List[(Path, Change)]):Boolean = { val changesBeforeIns = allChanges.filter(isBefore((path, ins))) val withoutTheDel = changesBeforeIns.filter(_ != (path, del)) val numInserts = withoutTheDel.filter(_._2.isInstanceOf[Insertion]).size val numDels = withoutTheDel.filter(_._2.isInstanceOf[Deletion]).size numInserts == numDels } private def shiftBlocker(allChanges: List[(Path, Change)])(left: (Path, Change), right: (Path, Change))(target: (Path, Change)): Boolean = { isBetween(left, right)(target) } private def isBefore(right: (Path, Change))(target: (Path, Change)): Boolean = { PathChangeOrdering.compare(right, target) > 0 && ((right._2.targetProperty, target._2.targetProperty) match { case (IndexProperty(rightIdx), IndexProperty(targetIdx)) => rightIdx > targetIdx case _ => false }) } private def isBetween(left: (Path, Change), right: (Path, Change))(target: (Path, Change)): Boolean = { PathChangeOrdering.compare(left, target) < 0 && PathChangeOrdering.compare(target, right) < 0 } }
tkaczmarzyk/beandiff
src/main/scala/org/beandiff/core/LcsResultOptimizer.scala
Scala
lgpl-3.0
5,579
package lib.query import org.scalatestplus.play.PlaySpec import org.scalatestplus.play.guice.GuiceOneAppPerSuite class QuerySpec extends PlaySpec with GuiceOneAppPerSuite { def validateQuery(q: String, words: Seq[String], orgKeys: Seq[String]): Unit = { QueryParser(q) match { case None => fail(s"Query[$q] failed to parse") case Some(query) => { query.words must be(words) query.orgKeys must be(orgKeys) } } } "QueryParser" in { validateQuery("foo", Seq("foo"), Nil) validateQuery("FOO", Seq("FOO"), Nil) validateQuery(" foo ", Seq("foo"), Nil) validateQuery("foo bar", Seq("foo", "bar"), Nil) validateQuery("org:gilt", Nil, Seq("gilt")) validateQuery("org:gilt foo bar", Seq("foo", "bar"), Seq("gilt")) validateQuery("baz org:gilt foo bar", Seq("baz", "foo", "bar"), Seq("gilt")) validateQuery("baz org:gilt org:bryzek foo bar", Seq("baz", "foo", "bar"), Seq("gilt", "bryzek")) QueryParser("") must be(None) QueryParser(" ") must be(None) } "Part" must { "parses text" in { Part("foo") must be(Seq(Part.Text("foo"))) Part("foo bar") must be(Seq(Part.Text("foo"), Part.Text("bar"))) } "parses org key" in { Part("org:foo") must be(Seq(Part.OrgKey("foo"))) } "case insensitive on org label" in { Part("ORG:FOO") must be(Seq(Part.OrgKey("FOO"))) } "parses org key w/ nested colon" in { Part("org:foo:bar") must be(Seq(Part.OrgKey("foo:bar"))) } "leaves unknown keys as text" in { Part("foo:bar") must be(Seq(Part.Text("foo:bar"))) } "empty string raises error" in { intercept[AssertionError] { Part(" ") }.getMessage must be("assertion failed: Value must be trimmed") } } }
mbryzek/apidoc
api/test/lib/query/QuerySpec.scala
Scala
mit
1,790
package io.toolsplus.atlassian.connect.play.ws import java.net.URI import io.lemonlabs.uri.Url import io.toolsplus.atlassian.connect.play.api.models.AtlassianHost import io.toolsplus.atlassian.connect.play.auth.jwt.symmetric.JwtGenerator import io.toolsplus.atlassian.connect.play.ws.jwt.JwtSignatureCalculator import javax.inject.Inject import play.api.libs.ws.{WSClient, WSRequest, WSSignatureCalculator} /** * A helper class for obtaining pre-configured WSRequests to make authenticated requests to Atlassian hosts. * * To make requests using JWT, the app must specify the authentication type `jwt` in its app descriptor. * * To obtain a WSRequest using JWT authentication, use authenticatedAsAddon(): * {{{ * class MyRestClient @Inject()(httpClient: AtlassianConnectHttpClient) { * * def fetchIssue(issueKey: String): Future[WSResponse] = { * httpClient.authenticatedAsAddon(s"/rest/api/2/issue/{issueKey}").get * } * } * }}} */ class AtlassianConnectHttpClient @Inject()(ws: WSClient, jwtGenerator: JwtGenerator) { def authenticatedAsAddon(url: String)( implicit host: AtlassianHost): WSRequest = request(url, JwtSignatureCalculator(jwtGenerator)) private def request(url: String, signatureCalculator: WSSignatureCalculator)( implicit host: AtlassianHost) = { val requestUri = URI.create(url) val absoluteUrl = if (!requestUri.isAbsolute) absoluteRequestUrl(requestUri, host).toString else url ws.url(absoluteUrl).sign(signatureCalculator) } private def absoluteRequestUrl(requestUri: URI, host: AtlassianHost): URI = { val baseUrl = Url.parse(host.baseUrl) val requestUrl = Url.parse(requestUri.toString) URI.create( baseUrl .withPath(baseUrl.path.addParts(requestUrl.path.parts)) .withQueryString(requestUrl.query) .withFragment(requestUrl.fragment) .toString) } }
toolsplus/atlassian-connect-play
modules/core/app/io/toolsplus/atlassian/connect/play/ws/AtlassianConnectHttpClient.scala
Scala
apache-2.0
1,958
package scwebapp.util import scutil.core.implicits.* import scwebapp.data.* object AcceptanceUtil { @SuppressWarnings(Array("org.wartremover.warts.TraversableOps")) def acceptance[T](matches:Seq[T])(extract:T=>Option[(Int,QValue)]):Option[QValue] = matches .mapFilter(extract) // get the highest rank .groupBy { case (level, qvalue) => level } .toVector .sortBy { case (level, pairs) => level } .lastOption .map( // find maximum QValue in the highest rank _._2.map(_._2).max ) }
ritschwumm/scwebapp
modules/core/src/main/scala/scwebapp/util/AcceptanceUtil.scala
Scala
bsd-2-clause
510
/** * Copyright (c) 2013-2015 Patrick Nicolas - Scala for Machine Learning - All rights reserved * * The source code in this file is provided by the author for the sole purpose of illustrating the * concepts and algorithms presented in "Scala for Machine Learning". It should not be used to * build commercial applications. * ISBN: 978-1-783355-874-2 Packt Publishing. * Unless required by applicable law or agreed to in writing, software is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * Version 0.98 */ package org.scalaml.supervised /** * This package object contains the classes implementing the conditional random fields * for modeling sequential data using the Indian Institute of Technology, Bombay (IITB):<br> * - Iterator to generate sequential data for the condition random field <b>CrfSeqIter</b><br> * - Configuration of the conditional random fields with L2 regularization <b>CrfConfig</b><br> * - Definition of data sequence used in training of Conditional Random fields * <b>CrfTrainingSet</b><br> * - Implementation of the Conditional Random fields using the iitb CRF library <b>Crf</b><br> * - Definition of a CRF model <b>CrfModel</b> * @see iitb CRF library http://sourceforge.net/projects/crf/ * @note Scala for Machine Learning Chapter 7 Sequential data models / Conditional random fields */ package object crf { } // --------------------------------------- EOF -----------------------------------------
batermj/algorithm-challenger
books/cs/machine-learning/scala-for-machine-learning/1rst-edition/original-src-from-the-book/src/main/scala/org/scalaml/supervised/crf/package.scala
Scala
apache-2.0
1,544
package com.signalcollect.triplerush import org.scalatest.FlatSpec import org.scalatest.prop.Checkers import org.scalacheck.Arbitrary import com.signalcollect.util.TestAnnouncements class IgnoredBindingsSpec extends FlatSpec with Checkers with TestAnnouncements { val s1 = 1 val s2 = 2 val s3 = 3 val o1 = 101 val o2 = 102 val o3 = 103 val o4 = 104 val o5 = 105 val o6 = 106 val o7 = 107 val o8 = 108 val o9 = 109 val o10 = 110 val o11 = 111 val p1 = 1001 val p2 = 1002 val p3 = 1003 val p4 = 1004 val p5 = 1005 "ChildIdsForPattern" should "correctly return all the predicates from the root vertex" in { val tr = TripleRush() try { tr.addEncodedTriple(s1, p1, o1) tr.addEncodedTriple(s2, p1, o2) tr.addEncodedTriple(s1, p2, o3) tr.addEncodedTriple(s1, p2, o4) tr.addEncodedTriple(s3, p2, o10) tr.addEncodedTriple(s2, p3, o5) tr.addEncodedTriple(o5, p4, o6) tr.addEncodedTriple(o4, p4, o7) tr.addEncodedTriple(o3, p4, o8) tr.addEncodedTriple(o10, p4, o11) tr.addEncodedTriple(o3, p5, o9) tr.addEncodedTriple(o10, p5, o9) tr.prepareExecution val predicates = tr.childIdsForPattern(EfficientIndexPattern(0, 0, 0)).toSet assert(predicates === Set(p1, p2, p3, p4, p5)) } finally { tr.shutdown } } "An index query" should "be able to retrieve all predicates" in { val tr = TripleRush() try { tr.addEncodedTriple(s1, p1, o1) tr.addEncodedTriple(s2, p1, o2) tr.addEncodedTriple(s1, p2, o3) tr.addEncodedTriple(s1, p2, o4) tr.addEncodedTriple(s3, p2, o10) tr.addEncodedTriple(s2, p3, o5) tr.addEncodedTriple(o5, p4, o6) tr.addEncodedTriple(o4, p4, o7) tr.addEncodedTriple(o3, p4, o8) tr.addEncodedTriple(o10, p4, o11) tr.addEncodedTriple(o3, p5, o9) tr.addEncodedTriple(o10, p5, o9) tr.prepareExecution val predicates = tr.childIdsForPattern(EfficientIndexPattern(0, 0, 0)) assert(predicates.toSet === Set(p1, p2, p3, p4, p5)) } finally { tr.shutdown } } def getBindingsFor(variable: Int, bindings: Traversable[Array[Int]]): Set[Int] = { val allBindings: List[Map[Int, Int]] = bindings.toList.map(bindingsToMap(_).map(entry => (entry._1, entry._2))) val listOfSetsOfKeysWithVar: List[Set[Int]] = allBindings.map { bindings: Map[Int, Int] => bindings.filterKeys(_ == variable).values.toSet } listOfSetsOfKeysWithVar.foldLeft(Set[Int]())(_ union _) } def bindingsToMap(bindings: Array[Int]): Map[Int, Int] = { (((-1 to -bindings.length by -1).zip(bindings))).toMap } }
hicolour/triplerush
src/test/scala/com/signalcollect/triplerush/IngoredBindingsSpec.scala
Scala
apache-2.0
2,678
/* * Copyright (c) 2018 OVO Energy * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package com.ovoenergy.comms.model import java.time.ZonedDateTime import java.time.format.DateTimeFormatter import org.scalacheck.Prop._ import org.scalacheck._ import scala.util.Try object ArbitrariesSpec extends Properties("Arbitraries") with Arbitraries { property("Metadata.createdAt should be parsable as ZonedDateTime") = forAll { metadata: Metadata => Try(ZonedDateTime.parse(metadata.createdAt, DateTimeFormatter.ISO_ZONED_DATE_TIME)).isSuccess } property("ContactDetails should have at least one of the contact detail") = forAll { contactDetails: ContactDetails => contactDetails.emailAddress.isDefined || contactDetails.phoneNumber.isDefined || contactDetails.postalAddress.isDefined } property("Metadata.hierarchy should have the same traceToken") = forAll { metadata: Metadata => metadata.hierarchy.map(_.traceToken).toSet.size == 1 } property("MetadataV2.sourceMetadata should have the same traceToken") = forAll { metadata: MetadataV2 => metadata.hierarchy.map(_.traceToken).toSet.size == 1 } property("MetadataV2 root parent should be a triggered one") = forAll { metadata: MetadataV2 => metadata.hierarchy.lastOption.exists(x => x.source == x.triggerSource) } property("MetadataV3.sourceMetadata should have the same traceToken") = forAll { metadata: MetadataV3 => metadata.hierarchy.map(_.traceToken).toSet.size == 1 } property("MetadataV3 root parent should be a triggered one") = forAll { metadata: MetadataV3 => metadata.hierarchy.lastOption.exists(x => x.source == x.triggerSource) } property("TriggeredV3 should have only one metadata") = forAll { triggered: TriggeredV3 => triggered.metadata.hierarchy.lengthCompare(1) == 0 } property("TriggeredV3.metadata should have source equal to triggeredSource") = forAll { triggered: TriggeredV3 => triggered.metadata.source == triggered.metadata.triggerSource } property("TriggeredV3.deliverAt if present should be after createdAt") = forAll { triggered: TriggeredV3 => triggered.deliverAt.forall(_.isAfter(triggered.metadata.createdAt)) } property("TriggeredV3.expireAt if present should be after createdAt") = forAll { triggered: TriggeredV3 => triggered.expireAt.forall(_.isAfter(triggered.metadata.createdAt)) } property("TriggeredV4 should have only one metadata") = forAll { triggered: TriggeredV4 => triggered.metadata.hierarchy.lengthCompare(1) == 0 } property("TriggeredV4.metadata should have source equal to triggeredSource") = forAll { triggered: TriggeredV4 => triggered.metadata.source == triggered.metadata.triggerSource } property("TriggeredV4.deliverAt if present should be after createdAt") = forAll { triggered: TriggeredV4 => triggered.deliverAt.forall(_.isAfter(triggered.metadata.createdAt)) } property("TriggeredV4.expireAt if present should be after createdAt") = forAll { triggered: TriggeredV4 => triggered.expireAt.forall(_.isAfter(triggered.metadata.createdAt)) } }
ovotech/comms-kafka-messages
modules/core/src/test/scala/com/ovoenergy/comms/model/ArbitrariesSpec.scala
Scala
mit
4,170
package org.apache.spark.core.utils import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{Path, FileSystem} /** * Created by raduchilom on 22/03/15. */ object HdfsUtils { def copyJarFromHdfs(hdfsPath: String, outputFolder: String) = { // if(!config.hasPath("hdfs.namenode")){ // println("ERROR: HDFS NameNode is not set in application.conf!") // throw new Exception("HDFS NameNode is not set in application.conf!") // } val conf = new Configuration() // conf.set("fs.defaultFS", getValueFromConfig(config, "hdfs.namenode", "")) conf.set("fs.defaultFS", hdfsPath) val hdfsFileSystem = FileSystem.get(conf) hdfsFileSystem.copyToLocalFile(new Path(hdfsPath), new Path(outputFolder)) } }
linzhe/matrix
src/main/scala/org/apache/spark/core/utils/HdfsUtils.scala
Scala
apache-2.0
771
package nibbler import nibbler.evaluation.NumericalDifferentiator import org.apache.spark.rdd.RDD import org.junit.runner.RunWith import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.scalatest.matchers.ShouldMatchers import org.scalatest.mock.MockitoSugar @RunWith(classOf[JUnitRunner]) class NumericalDifferentiatorTest extends FunSuite with ShouldMatchers with MockitoSugar with SparkContextAware { test("accepts 'backward' as differentiator strategy") { try { NumericalDifferentiator("backward", 0, 1) } catch { case _: Exception => fail("shouldn't throw an exception!") } } test("accepts 'central' as differentiator strategy") { try { NumericalDifferentiator("central", 0, 1) } catch { case _: Exception => fail("shouldn't throw an exception!") } } test("indexes of variables cannot be the same") { intercept[IllegalArgumentException] { NumericalDifferentiator("central", 1, 1) } } test("validates input data set has at least two variables") { // Given val differentiator = NumericalDifferentiator("backward", 0, 1) val input: RDD[Seq[Double]] = sparkContext.parallelize(List(List(10.0, 20.0))) // Then try { differentiator.partialDerivative(input) } catch { case _: Exception => fail("no exception should be thrown") } } test("accepts data set with two variables") { // Given val differentiator = NumericalDifferentiator("backward", 0, 1) val input: RDD[Seq[Double]] = sparkContext.parallelize(List(List(10.0))) // Then intercept[IllegalArgumentException] { differentiator.partialDerivative(input) } } test("backward: differentiates the rdd according to formula") { // Given val differentiator = NumericalDifferentiator("backward", 0, 1) val input: RDD[Seq[Double]] = sparkContext.parallelize(List(List(10.0, 1.0), List(20.0, 3.0))) // When val result = differentiator.partialDerivative(input).collect() // Then result should equal(Array(5.0)) } test("central: differentiates the rdd according to formula") { // Given val differentiator = NumericalDifferentiator("central", 0, 1) val input: RDD[Seq[Double]] = sparkContext.parallelize(List(List(10.0, 1.0), List(100.0, 100.0), List(20.0, 5.0))) // When val result = differentiator.partialDerivative(input).collect() // Then result should equal(Array(2.5)) } }
pkoperek/nibbler
src/test/scala/nibbler/NumericalDifferentiatorTest.scala
Scala
gpl-3.0
2,474
package com.phasmid.laScala.values // We really do need the following: import com.phasmid.laScala.values.Rational.RationalHelper import com.phasmid.laScala.values.Rational.RationalHelper import org.scalatest.{FlatSpec, Matchers} import scala.language.postfixOps /** * @author scalaprof */ class RationalSpec extends FlatSpec with Matchers { import com.phasmid.laScala.values.FiniteIntegral.LongIsFiniteIntegral "0" should "be OK" in { Rational(0) should not be null } it should "use implicit conversion" in { val r: Rational[Long] = 0 r shouldBe Rational.zero } it should "be zero" in { Rational(0) shouldBe Rational.zero } it should "be whole" in { Rational.zero shouldBe 'whole } it should "equal 0" in { Rational.zero.toInt should be(0) } it should "equal infinity when inverted" in { Rational.zero.reciprocal shouldBe 'infinity } it should "equal BigDecimal.zero" in { Rational.zero.toBigDecimal.isSuccess shouldBe true Rational.zero.toBigDecimal.get shouldBe BigDecimal(0) } it should "equal r when added to r" in { val r = Rational[Long](22, 7) // we could choose anything here (Rational.zero + r) should be(r) } it should "equal infinity when r-interpolator has 0 denominator" in { r"1/0" shouldBe 'infinity } "1/2" should "be OK" in { Rational.half * 2 shouldBe Rational.one } it should "equal half" in { Rational("1/2") shouldBe Rational.half } it should "be half of one" in { Rational.half * 2 shouldBe Rational.one } it should "be OK using r-interpolator" in { r"1/2" * 2 shouldBe Rational.one } it should "be OK using r-interpolator with variable" in { val denominator = 2 r"1/$denominator" * denominator shouldBe Rational.one } it should "yield 0 for floor" in { Rational.half.floor shouldBe 0L } "1" should "be OK" in { Rational(1) } it should "be one" in { Rational(1) shouldBe Rational.one } it should "be one as a String" in { Rational("1") shouldBe Rational.one } it should "be positive" in { Rational.one.signum shouldBe 1 } it should "be whole" in { Rational.one shouldBe 'whole } it should "be unity" in { Rational.one shouldBe 'unity } it should "equal 1" in { Rational.one.toInt should be(1) } it should "not equal infinity when inverted" in { Rational.one.reciprocal should not be 'infinity } it should "equal itself when inverted" in { Rational.one.reciprocal should be(Rational.one) } it should "equal BigDecimal.one" in { Rational.one.toBigDecimal.isSuccess shouldBe true Rational.one.toBigDecimal.get shouldBe BigDecimal(1) } it should "equal r when multiplied by r" in { val r = Rational[Long](22, 7) // we could choose anything here (Rational.one * r) should be(r) } it should "be -1 when negated" in { val r = Rational.one -r shouldBe (Rational.one * -1) r.signum shouldBe 1 } "power" should "work" in { val ten = Rational.ten ten.power(2) should equal(Rational(100)) import FiniteIntegral.LongIsFiniteIntegral ten.power(10) should equal(Rational(10000000000L)) } "10" should "be OK" in { Rational(10) } it should "be ten" in { Rational(10) shouldBe Rational.ten } it should "be whole" in { Rational.ten shouldBe 'whole } it should "not be zero" in { Rational.ten should not be 'zero } it should "equal 10" in { Rational.ten.toInt should be(10) } it should "equal 5*2" in { (Rational.ten / 2) should be(Rational(5)) } it should "equal 10*1" in { (Rational.ten / 10) should be(Rational.one) } it should "equal BigDecimal(10)" in { Rational.ten.toBigDecimal.isSuccess shouldBe true Rational.ten.toBigDecimal.get shouldBe BigDecimal(10) } it should "equal a million when raised to 6th power" in { (Rational.ten ^ 6) should be(Rational(1000000)) } it should "barf when raised to 10th power" in { val thrown = the[FiniteIntegralException] thrownBy Rational.ten.power(10).toInt thrown.getMessage should equal("10000000000 is out of range for class scala.Int$") } "2/3" should "be OK" in { Rational(2, 3) } it should "equal -1/3 when added to -1" in { Rational(2, 3) - Rational.one should be(Rational(-1, 3)) } it should "be less than 1" in { Rational(2, 3).compare(Rational.one) should be(-1) } it should "not be whole" in { Rational(2, 3) should not be 'whole } it should "equal 2 when multiplied by 3" in { (Rational(2, 3) * 3 toInt) should be(2) } it should "equal 3/2 when inverted" in { Rational(2, 3).reciprocal should be(Rational(3, 2)) } it should "equal 5/3 when added to 1" in { (Rational.one + Rational(2, 3)) should be(Rational(5, 3)) } it should "equal 4/9 when multiplied by itself" in { val r = Rational(2, 3) (r * r) should be(Rational(4, 9)) } it should "equal 4/9 when squared" in { (Rational(2, 3) ^ 2) should be(Rational(4, 9)) } // XXX: this fails with 2.10 // it should "barf when toInt invoked" in { // an[RationalException] should be thrownBy Rational(2, 3).toInt // val thrown = the[Exception] thrownBy Rational(2, 3).toInt // thrown.getMessage should equal("2/3 is not Whole") // } "2/4" should "not be OK" in { val thrown = the[IllegalArgumentException] thrownBy new Rational(2, 4) thrown.getMessage should equal("requirement failed: Rational(2,4): arguments have common factor: 2") } it should "be OK via normalize" in { Rational.normalize(2, 4) } "Floating Point Problem" should "be OK" in { val x = Rational(1, 10) + Rational.normalize(2, 10) val y = x * 10 / 3 y shouldBe 'unity } "BigDecimal" should "convert to Rational" in { val pi = BigDecimal(math.Pi) val r = Rational(pi) r.toDouble should be(math.Pi) } it should "have a floor of 3" in { Rational(BigDecimal(math.Pi)).floor shouldBe 3 } "toString" should "be decimal when not exact: pi" in { val pi = Rational(BigDecimal(math.Pi)) pi.toString() should be("3.141592653589793") } "Rational(String)" should "work for 0.1" in { val r = Rational("0.1") r should be(Rational(1, 10)) } it should "work for 1.0e6" in { Rational("1.0e6") should be(Rational(10).power(6)) Rational("1.0E6") should be(Rational(10).power(6)) Rational("-1.0E6") should be(Rational(10).power(6).negate) } "sorting" should "work" in { val r = List(Rational(1, 2), Rational(2, 3), Rational(1, 3)) val x = r.sorted x.head shouldBe Rational(1, 3) x.tail.head shouldBe Rational(1, 2) x.tail.tail.head shouldBe Rational(2, 3) } "r-interpolator" should "work for 1/-2147483648 and -1/-2147483648" in { r"1/-2147483648".signum shouldBe -1 r"-1/-2147483648".signum shouldBe 1 r"2147483647/0" shouldBe Rational.infinity } behavior of "Rational as Fractional" val f: Fractional[Rational[Long]] = implicitly[Fractional[Rational[Long]]] it should "support zero" in { f.zero shouldBe Rational.zero } it should "support one" in { f.one shouldBe Rational.one } it should "support fromInt" in { f.fromInt(0) shouldBe Rational.zero f.fromInt(1) shouldBe Rational.one f.fromInt(-1) shouldBe Rational.one.negate } it should "support plus" in { f.plus(Rational.one, Rational.one) shouldBe Rational(2) f.plus(Rational.zero, Rational.one) shouldBe Rational.one f.plus(Rational.zero, Rational.zero) shouldBe Rational.zero } it should "support times" in { f.times(Rational.one, Rational.one) shouldBe Rational.one f.times(Rational.one, Rational.zero) shouldBe Rational.zero f.times(Rational.zero, Rational.zero) shouldBe Rational.zero // an [RationalException] should be thrownBy f.times(Rational(Long.MaxValue),2) } it should "support div" in { f.div(Rational.one, Rational.one) shouldBe Rational.one f.div(Rational.one, Rational.zero) shouldBe Rational.infinity f.div(Rational.zero, Rational.one) shouldBe Rational.zero f.div(Rational.zero, Rational.zero).isNaN shouldBe true } it should "support compare" in { f.compare(Rational.one, Rational.one) shouldBe 0 f.compare(Rational.one, Rational.zero) shouldBe 1 f.compare(Rational.zero, Rational.one) shouldBe -1 f.compare(Rational.zero, Rational.zero) shouldBe 0 } it should "support toLong" in { f.toLong(Rational.one) shouldBe 1L val half: Rational[Long] = Rational.half an[RationalException] should be thrownBy f.toLong(half) } it should "support toInt" in { f.toInt(Rational.one) shouldBe 1L an[RationalException] should be thrownBy f.toInt(Rational.half) an[FiniteIntegralException] should be thrownBy f.toInt(Rational(Long.MaxValue)) } }
rchillyard/LaScala
src/test/scala/com/phasmid/laScala/values/RationalSpec.scala
Scala
lgpl-2.1
8,819
package org.jetbrains.plugins.scala.macroAnnotations import scala.annotation.StaticAnnotation import scala.language.experimental.macros import scala.reflect.macros.whitebox /** * This annotation generates code that caches result of the function. Caches are invalidated on `dependencyItem` change. * * Computation is also guarded against recursive calls on the same data. * See * `org.jetbrains.plugins.scala.caches.RecursionManager` * `org.jetbrains.plugins.scala.caches.CachesUtil.handleRecursiveCall` * `org.jetbrains.plugins.scala.macroAnnotations.CachedMacroUtil.handleProbablyRecursiveException` * * Author: Svyatoslav Ilinskiy, Nikolay.Tropin * Date: 9/28/15. */ class CachedWithRecursionGuard(element: Any, defaultValue: => Any, dependecyItem: Object) extends StaticAnnotation { def macroTransform(annottees: Any*): Any = macro CachedWithRecursionGuard.cachedWithRecursionGuardImpl } object CachedWithRecursionGuard { import CachedMacroUtil._ def cachedWithRecursionGuardImpl(c: whitebox.Context)(annottees: c.Tree*): c.Expr[Any] = { import c.universe._ implicit val x: c.type = c def parameters: (Tree, Tree, Tree) = c.prefix.tree match { case q"new CachedWithRecursionGuard(..$params)" if params.length == 3 => (params.head, params(1), modCountParamToModTracker(c)(params(2), params.head)) case _ => abort("Wrong annotation parameters!") } val (element, defaultValue, modTracker) = parameters annottees.toList match { case DefDef(mods, name, tpParams, paramss, retTp, rhs) :: Nil => if (retTp.isEmpty) { abort("You must specify return type") } //function parameters val flatParams = paramss.flatten val parameterTypes = flatParams.map(_.tpt) val parameterNames: List[c.universe.TermName] = flatParams.map(_.name) val hasParams = flatParams.nonEmpty //generated types val dataType = if (hasParams) tq"(..$parameterTypes)" else tq"Unit" val resultType = box(c)(retTp) val elementType = psiElementType //generated names val keyId = c.freshName(name.toString + "cacheKey") val cacheStatsName = TermName(c.freshName("cacheStats")) val analyzeCaches = CachedMacroUtil.analyzeCachesEnabled(c) val defdefFQN = q"""getClass.getName ++ "." ++ ${name.toString}""" val computedValue = generateTermName("computedValue") val guard = generateTermName("guard") val defValueName = generateTermName("defaultValue") val elemName = generateTermName("element") val dataName = generateTermName("data") val keyVarName = generateTermName("key") val holderName = generateTermName("holder") val resultName = generateTermName("result") val dataForGuardName = generateTermName("dataForGuard") val dataValue = if (hasParams) q"(..$parameterNames)" else q"()" val getOrCreateCachedHolder = if (hasParams) q"$cachesUtilFQN.getOrCreateCachedMap[$elementType, $dataType, $resultType]($elemName, $keyVarName, () => $modTracker)" else q"$cachesUtilFQN.getOrCreateCachedRef[$elementType, $resultType]($elemName, $keyVarName, () => $modTracker)" val getFromHolder = if (hasParams) q"$holderName.get($dataName)" else q"$holderName.get()" val updateHolder = if (hasParams) q"$holderName.putIfAbsent($dataName, $resultName)" else q"$holderName.compareAndSet(null, $resultName)" val dataForGuardType = if (hasParams) tq"($elementType, $dataType)" else tq"$elementType" val dataForRecursionGuard = if (hasParams) q"($elemName, $dataName)" else q"$elemName" val actualCalculation = transformRhsToAnalyzeCaches(c)(cacheStatsName, retTp, rhs) val guardedCalculation = withUIFreezingGuard(c)(actualCalculation) val withProbablyRecursiveException = handleProbablyRecursiveException(c)(elemName, dataName, keyVarName, guardedCalculation) val calculationWithAllTheChecks = doPreventingRecursion(c)(withProbablyRecursiveException, guard, dataForGuardName, retTp) val updatedRhs = q""" ${if (analyzeCaches) q"$cacheStatsName.aboutToEnterCachedArea()" else EmptyTree} val $elemName = $element val $dataName = $dataValue val $dataForGuardName = $dataForRecursionGuard val $keyVarName = ${getOrCreateKey(c, hasParams)(q"$keyId", dataType, resultType)} def $defValueName: $resultType = $defaultValue val $holderName = $getOrCreateCachedHolder val fromCachedHolder = $getFromHolder if (fromCachedHolder != null) return fromCachedHolder val $guard = $recursionGuardFQN[$dataForGuardType, $cachedValueProviderResultTypeFQN[$resultType]]($keyVarName.toString) if ($guard.checkReentrancy($dataForGuardName)) return $cachesUtilFQN.handleRecursiveCall($elemName, $dataName, $keyVarName, $defValueName) val stackStamp = $recursionManagerFQN.markStack() val $resultName: $resultType = $calculationWithAllTheChecks if (stackStamp.mayCacheNow()) { $updateHolder $getFromHolder } else $resultName """ val updatedDef = DefDef(mods, name, tpParams, paramss, retTp, updatedRhs) val res = q""" ${if (analyzeCaches) q"private val $cacheStatsName = $cacheStatisticsFQN($keyId, $defdefFQN)" else EmptyTree} ..$updatedDef """ println(res) c.Expr(res) case _ => abort("You can only annotate one function!") } } }
jastice/intellij-scala
scala/macros/src/org/jetbrains/plugins/scala/macroAnnotations/CachedWithRecursionGuard.scala
Scala
apache-2.0
5,749
package com.seanshubin.detangler.report import com.seanshubin.detangler.model.Standalone trait GraphTemplateRules { def generate(graphTemplate: HtmlElement, standalone: Standalone, graphRenderResult: GraphRenderResult): HtmlElement }
SeanShubin/detangler
report/src/main/scala/com/seanshubin/detangler/report/GraphTemplateRules.scala
Scala
unlicense
238
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.matchers // prevents unnecessary recompilation class ShouldAndMustSpec {} // prevents unnecessary recompilation
dotty-staging/scalatest
scalatest-test/src/test/scala/org/scalatest/matchers/ShouldAndMustSpec.scala
Scala
apache-2.0
742
package elea import java.io.File import elea.Parser.{DataDef, StatementHandler, TermDef} import elea.rewrite.Simplifier import scopt.OptionParser /** * Command-line interface main method */ object CLI { case class Config( recordStats: Boolean = false, fromFile: Option[File] = None) val programVersion = getClass.getPackage.getImplementationVersion val manifesto = s"Elea v$programVersion - a supercompiler for theorem provers" val configParser = new OptionParser[Config]("elea") { head(manifesto) opt[Unit]('s', "stats") .action((_, config) => config.copy(recordStats = true)) .text("print out performance statistics in comments") opt[File]('f', "input-file") .action((file, config) => config.copy(fromFile = Some(file))) } val supercompiler = Simplifier.supercompilation class CLIStatementHandler(config: Config) extends StatementHandler { override def dataDef(dataDef: DataDef): DataDef = { println(dataDef.toLisp()) dataDef } override def termDef(termDef: TermDef): TermDef = { val startTime = System.nanoTime() val simplifiedDef = termDef.modifyTerm(supercompiler.run) val finishTime = System.nanoTime() println(simplifiedDef.toLisp()) if (config.recordStats) { val timeTakenMillis = (finishTime - startTime) / (1000 * 1000) println(s";; took ${timeTakenMillis}ms") } simplifiedDef } } def main(args: Array[String]): Unit = { val config = configParser .parse(args, Config()) .getOrElse(throw new IllegalArgumentException("Bad command line parameters")) require(config.fromFile.isDefined, "Please provide an input file") val input = io.Source.fromFile(config.fromFile.get).mkString run(config, input) } def run(config: Config, inputProgram: String): Unit = { implicit val startingProgram = Program.empty val handler = new CLIStatementHandler(config) println(s";; $manifesto") val startTime = System.currentTimeMillis() Parser.parseAll(inputProgram, handler) val timeTakenMillis = System.currentTimeMillis() - startTime println(s";; finished (took ${timeTakenMillis}ms)") } }
wsonnex/elea
src/main/scala/elea/CLI.scala
Scala
mit
2,281
/* * * * Copyright 2014 websudos ltd. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. * */ package com.websudos.phantom.zookeeper import java.net.InetSocketAddress import org.slf4j.{Logger, LoggerFactory} import com.datastax.driver.core.{Cluster, Session} import com.twitter.conversions.time._ import com.twitter.finagle.exp.zookeeper.ZooKeeper import com.twitter.util.{Duration, Await, Try} trait ZookeeperManager extends CassandraManager { /** * Interestingly enough binding to a port with a simple java.net.Socket or java.net.ServerSocket to check if a local ZooKeeper exists is not enough in this * day and age. We take a slightly different approach, by performing a single check when the default address is initialised. We spawn an actual ZooKeeper * Client using the finagle-zookeeper integration and attempt to connect. If the initial ping is successful, we conclude a ZooKeeper is found. Otherwise, * we conclude it doesn't exist. * * At present times the Phantom connectors are not capable of monitoring for state change system wide, e.g a move from a local ZooKeeper to an embedded and * so on, therefore this check can be done a single time, as any major state change in the system with regards to ZooKeeper going down would not affect * existing Cassandra connections and any failure in a Cassandra node is handled by the Datastax driver. */ protected[this] lazy val isLocalZooKeeperRunning: Boolean = { Try { val richClient = ZooKeeper.newRichClient(s"${defaultAddress.getHostName}:${defaultAddress.getPort}") Await.result(richClient.connect(), 2.seconds) }.toOption.nonEmpty } protected[this] val store: ClusterStore implicit val timeout: Duration def cluster: Cluster = store.cluster def session: Session = store.session val logger: Logger protected[zookeeper] val envString = "TEST_ZOOKEEPER_CONNECTOR" protected[this] val defaultAddress = new InetSocketAddress("0.0.0.0", 2181) } class DefaultZookeeperManager extends ZookeeperManager { val livePort = 9042 val embeddedPort = 9042 implicit val timeout: Duration = 2.seconds /** * This is the default way a ZooKeeper connector will obtain the HOST:IP port of the ZooKeeper coordinator(master) node. * The phantom testing utilities are capable of auto-generating a ZooKeeper instance if none is found running. * * A test instance is ephemeral with zero persistence, it will get created, populated and deleted once per test run. * Upon creation, the test instance will propagate the IP:PORT combo it found available to an environment variable. * By convention that variable is TEST_ZOOKEEPER_CONNECTOR. * * This method will try to read that variable and parse an {@link java.net.InetSocketAddress} from it. * If the environment variable is null or an InetSocketAddress cannot be parsed from it, the ZooKeeper default, localhost:2181 will be used. * @return The InetSocketAddress of the ZooKeeper master node. */ def defaultZkAddress: InetSocketAddress = if (isLocalZooKeeperRunning) { defaultAddress } else { if (System.getProperty(envString) != null) { val inetPair: String = System.getProperty(envString) val split = inetPair.split(":") Try { logger.info(s"Using ZooKeeper settings from the $envString environment variable") logger.info(s"Connecting to ZooKeeper address: ${split(0)}:${split(1)}") new InetSocketAddress(split(0), split(1).toInt) } getOrElse { logger.warn(s"Failed to parse address from $envString environment variable with value: $inetPair") defaultAddress } } else { logger.info(s"No custom settings for Zookeeper found in $envString. Using localhost:2181 as default.") defaultAddress } } lazy val logger = LoggerFactory.getLogger("com.websudos.phantom.zookeeper") val store = DefaultClusterStore /** * This will initialise the Cassandra cluster connection based on the ZooKeeper connector settings. * It will connector to ZooKeeper, fetch the Cassandra sequence of HOST:IP pairs, and create a cluster + session for the mix. */ def initIfNotInited(keySpace: String) = store.initStore(keySpace, defaultZkAddress) } object DefaultZookeeperManagers { lazy val defaultManager = new DefaultZookeeperManager }
nosheenzaza/phantom-data-centric
phantom-zookeeper/src/main/scala/com/websudos/phantom/zookeeper/ZookeeperManager.scala
Scala
gpl-2.0
4,893
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.example import org.apache.spark.util.{LocalSparkContext, SampleData} import org.scalatest.FunSuite class WikipediaTextSuite extends FunSuite with LocalSparkContext with SampleData { test("extract") { val filepath = getSampleDataFilePath() val rdd = sc.textFile(filepath) val text = WikipediaText.format(rdd) assert(text.count() === 100) assert(text.collect().apply(0).length === 71222) assert(text.collect().apply(1).length === 207) assert(text.collect().apply(98).length === 20662) assert(text.collect().apply(99).length === 7039) } test("format") { val filepath = getSampleDataFilePath() val rdd = sc.textFile(filepath) // check the number of sentences in the sample data val text = WikipediaText.format(rdd) val tokenizedSentences = WikipediaText.extract(text) assert(tokenizedSentences.count() === 10746) val collected = tokenizedSentences.collect() assert(collected(0).length === 17) assert(collected(1).length === 23) } test("extractNouns") { val filepath = getSampleDataFilePath() val rdd = sc.textFile(filepath) val text = WikipediaText.format(rdd) val nouns = WikipediaText.extractNoun(text) val collected = nouns.collect() assert(collected.length === 3300) } } class KuromojiTokenizerSuite extends FunSuite { test("tokenize010") { val text = "リーグ戦が採用されているATPツアー・ファイナルは大会規定で補欠2人が用意される。" + "原則的には年間獲得ポイントで大会出場権を得た上位8選手に次ぐ2選手が選ばれ、" + "規定で開幕前の公式会見への出席、1次リーグ終了まで欠場者が出た場合に備えることが義務付けられる。" val tokenizer = new KuromojiTokenizer val result = tokenizer.tokenize(text) assert(result.length === 78) assert(result.filter(_.getAllFeaturesArray.apply(0) == "名詞").length === 39) assert(result.filter(_.getAllFeaturesArray.apply(0) == "動詞").length === 13) } test("tokenize020") { val text = """{{出典の明記|date=2014年9月11日 (木) 10:37 (UTC)}}\\n{{WikipediaPage|ウィキペディアにおける著作権に" + "ついては、[[Wikipedia:著作権]]、[[Wikipedia:著作権問題]]、[[Wikipedia:ガイドブック 著作権に注意]]を" + "ご覧ください。}}\\n'''著作権'''(ちょさくけん)は、[[言語]]、[[音楽]]、[[絵画]]、[[建築]]、[[図形]]、" + "[[映画]]、[[写真]]、[[プログラム (コンピュータ)|コンピュータプログラム]]などの表現形式によって自らの'''" + "思想・感情を創作的に表現した[[著作物]]'''を排他的に支配する財産的な[[権利]]である。著作権は[[特許権]や" + "[[商標権]]にならぶ[[知的財産権]]の一つとして位置づけられている。\\n\\n[[著作者]]の権利には、人格的な権利" + "である[[著作者人格権]]と、財産的な権利である(狭義の)著作権とがある。両者を合わせて(広義の)著作権と" + "呼ぶ場合があるが、日本の[[著作権法]]では「著作権」という用語は狭義の財産的な権利を指して用いられており" + "(著作権法第17条第1項)、本項においても、狭義の意味で用いる。\\n\\n著作権の保護については、『[[文学的及び" + "美術的著作物の保護に関するベルヌ条約]]』(ベルヌ条約)、『[[万国著作権条約]]』、『[[著作権に関する" + "世界知的所有権機関条約]]』(WIPO著作権条約)、『[[知的所有権の貿易関連の側面に関する協定]]』(TRIPS協定)" + "などの条約が、保護の最低要件などを定めており、これらの条約の締約国が、条約上の要件を満たす形で、国内の" + "著作権保護法令を定めている。\\n\\n[[著作権者]]を表すコピーライトマーク「©」は、現在では、方式主義をとる" + "[[カンボジア]]以外では著作権の発生要件としての法的な意味はないが、著作権者をわかりやすく表すなどのために" + "広く使われている。\\n{{Main|#コピーライトマーク}}\\n\\n==権利としての特徴==\\n著作権は[[著作者]]に対して" + "付与される[[財産権]]の一種であり、著作者に対して、著作権の対象である著作物を排他的に利用する権利を認める" + "ものである。例えば、小説の作者は、その小説を排他的に出版、映画化、翻訳する権利を有しており、他人が著作者の" + "許諾なしに無断で出版、映画化、翻訳した場合には、著作権を侵害することになる。著作権は、多くの[[#支分権|" + "支分権]]から成り立っており、しばしば「権利の束」と呼ばれる。\\n\\n著作権は[[無体財産権]]であって、著作者が" + "作品の媒体たる有体物の[[所有権]]を他人に譲渡した場合でも、その行為によって著作権が消滅したり、移転したり" + "することはない。一方、無体物である著作権自体についても、その全部又は一部を譲渡することが可能である。" + "例えば、小説家は執筆原稿を出版者に譲渡しても、依然として著作者としての諸権利を有しているが、契約により" + "著作権自体を譲渡することもできる。なお、著作権は、譲渡のほかに、利用許諾によって他者に利用させることも" + "できる。\\n\\n著作権は'''相対的独占権'''あるいは排他権である。特許権や意匠権のような絶対的独占権ではない。""" val tokenizer = new KuromojiTokenizer val result = tokenizer.tokenize(text) assert(result.length === 888) assert(result.filter(_.getAllFeaturesArray.apply(0) == "名詞").length === 500) assert(result.filter(_.getAllFeaturesArray.apply(0) == "動詞").length === 56) val pronouns = result.map(_.getAllFeaturesArray.apply(0)).distinct assert(pronouns.length === 10) val first = result(0) assert(first.getSurfaceForm === "{{") assert(first.getAllFeaturesArray.apply(0) === "名詞") val surfaces = tokenizer.tokenizeWithFilter(text) val hoge = surfaces.filter(token => token.getAllFeaturesArray.apply(1) == "サ変接続") assert(surfaces.length === 643) } test("hasReading") { val text = "{{" val tokenizer = new KuromojiTokenizer val result = tokenizer.tokenize(text) assert(tokenizer.hasReading(result.apply(0)) === false) } }
yu-iskw/spark-word2vec-app
src/test/scala/org/apache/spark/example/WikipediaTextSuite.scala
Scala
apache-2.0
7,686
package io.buoyant.linkerd.protocol.http import com.twitter.finagle.Path import com.twitter.finagle.buoyant.Dst import com.twitter.finagle.http.Request import com.twitter.finagle.util.LoadService import io.buoyant.config.Parser import io.buoyant.linkerd.IdentifierInitializer import io.buoyant.linkerd.protocol.HttpIdentifierConfig import io.buoyant.router.RoutingFactory.IdentifiedRequest import io.buoyant.test.Awaits import org.scalatest.FunSuite class HeaderIdentifierConfigTest extends FunSuite with Awaits { test("sanity") { // ensure it doesn't totally blow up val _ = new HeaderIdentifierConfig().newIdentifier(Path.empty) } test("service registration") { assert(LoadService[IdentifierInitializer].exists(_.isInstanceOf[HeaderIdentifierInitializer])) } test("parse config") { val yaml = s"""|kind: io.l5d.header |header: my-header |""".stripMargin val mapper = Parser.objectMapper(yaml, Iterable(Seq(HeaderIdentifierInitializer))) val config = mapper.readValue[HttpIdentifierConfig](yaml).asInstanceOf[HeaderIdentifierConfig] val identifier = config.newIdentifier(Path.empty) val req = Request() req.headerMap.set("my-header", "/one/two/three") assert( await(identifier(req)).asInstanceOf[IdentifiedRequest[Request]].dst == Dst.Path(Path.read("/one/two/three")) ) } test("default header") { val yaml = s""" |kind: io.l5d.header """.stripMargin val mapper = Parser.objectMapper(yaml, Iterable(Seq(HeaderIdentifierInitializer))) val config = mapper.readValue[HttpIdentifierConfig](yaml).asInstanceOf[HeaderIdentifierConfig] val identifier = config.newIdentifier(Path.empty) val req = Request() req.headerMap.set("l5d-name", "/one/two/three") assert( await(identifier(req)).asInstanceOf[IdentifiedRequest[Request]].dst == Dst.Path(Path.read("/one/two/three")) ) } }
denverwilliams/linkerd
linkerd/protocol/http/src/test/scala/io/buoyant/linkerd/protocol/http/HeaderIdentifierConfigTest.scala
Scala
apache-2.0
1,948
/** ____ __ ____ ____ ____,,___ ____ __ __ ____ * ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read * ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt * (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt **/ package razie.tconf /** uniquely identifies a piece of specification, an object or an asset * * for identifying sections inside a larger document, the wpath will include like a section pointer (...#section) * * source can be domain plugin and connection ID inside the target realm, "plugin:conn" * wpath can be at minimum "class:id" or qualified "realm.class:id" * wpath could include a section Id, like "realm.class:id#section" * * @param source - the source system: inventory understands and delegates to (preferably URL) * @param wpath - unique long id of the spec * @param key - unique short id of the spec (within realm/cat) * @param realm - optionally identify a realm within the source (multi-tenancy) * @param ver - optionally identify a version of the spec * @param draft - optionally identify a certain temporary variant (i.e. autosaved by username) */ trait TSpecRef { def source: String def wpath: String def key: String def realm: String def category: String def ver: Option[String] def draft: Option[String] def ahref: Option[String] def toJson: Map[String, Any] = { Map( "source" -> source, "wpath" -> wpath, "realm" -> realm, "key" -> wpath, "class" -> category ) ++ ver.map(x => Map("ver" -> x) ).getOrElse(Map.empty) ++ draft.map(x => Map("draft" -> x) ).getOrElse(Map.empty) ++ ahref.map(x => Map("ahref" -> x) ).getOrElse(Map.empty) } } /** basic implmentation */ case class SpecRef( realm: String, wpath: String, key: String, source: String = "", ver: Option[String] = None, draft: Option[String] = None) extends TSpecRef { def ahref: Option[String] = None def category: String = wpath.replaceFirst(":.*", "") } /** * generic complete implementation for an asset ref * * (realm,plugin,conn) uniquely identify the domain and source * (cls,id,section,ver,draft) uniquely identify the asset inside there */ case class FullSpecRef( inventory: String, conn: String, cls: String, key: String, section: String, realm: String, ver: Option[String] = None, draft: Option[String] = None) extends TSpecRef { override def category: String = cls override def source: String = inventory + ":" + conn override def wpath: String = cls + ":" + key override def ahref: Option[String] = None override def toJson: Map[String, Any] = { Map( "inventory" -> inventory, "conn" -> conn, "source" -> source, "wpath" -> wpath, "realm" -> realm, "key" -> key, "class" -> category ) ++ ver.map(x => Map("ver" -> x) ).getOrElse(Map.empty) ++ draft.map(x => Map("draft" -> x) ).getOrElse(Map.empty) ++ ahref.map(x => Map("ahref" -> x) ).getOrElse(Map.empty) } } /** utilities */ object SpecRef { /** make a generic specref */ def make(realm: String, inventory: String, conn: String, cls: String, id: String, section: String = "") = { new FullSpecRef( inventory, conn, cls, id, section, realm ) } /** make a generic specref */ def fromJson(j: Map[String, Any]) = { new FullSpecRef( j.getOrElse("inventory", "").toString, j.getOrElse("conn", "").toString, j.getOrElse("class", "").toString, j.getOrElse("key", "").toString, j.getOrElse("section", "").toString, j.getOrElse("realm", "").toString ) } /** decompose into a full spec */ def full(ref: TSpecRef) = { val s1 = ref.source.split(":") val s2 = ref.wpath.split(":") val inv = if (s1.length > 1) s1(0) else "" val conn = if (s1.length > 1) s1(1) else s1(0) var cls = if (s2.length > 1) s1(0) else "" var id = if (s2.length > 1) s1(0) else s2(0) var sec = "" if (cls.contains(".")) { cls = cls.split('.')(1) } if (id.contains("#")) { id = id.split('#')(0) sec = id.split('#')(1) } new FullSpecRef( inv, conn, cls, id, sec, ref.realm, ref.ver, ref.draft ) } }
razie/diesel-rx
diesel/src/main/scala/razie/tconf/SpecRef.scala
Scala
apache-2.0
4,397
package views.html import play.templates._ import play.templates.TemplateMagic._ import play.api.templates._ import play.api.templates.PlayMagic._ import models._ import controllers._ import java.lang._ import java.util._ import scala.collection.JavaConversions._ import scala.collection.JavaConverters._ import play.api.i18n._ import play.core.j.PlayMagicForJava._ import play.mvc._ import play.data._ import play.api.data.Field import play.mvc.Http.Context.Implicit._ import views.html._ /**/ object stref extends BaseScalaTemplate[play.api.templates.HtmlFormat.Appendable,Format[play.api.templates.HtmlFormat.Appendable]](play.api.templates.HtmlFormat) with play.api.templates.Template1[Streference,play.api.templates.HtmlFormat.Appendable] { /**/ def apply/*1.2*/(s: Streference):play.api.templates.HtmlFormat.Appendable = { _display_ {import helper._ Seq[Any](format.raw/*1.18*/(""" """),format.raw/*3.1*/(""" """),_display_(Seq[Any](/*5.2*/main/*5.6*/ {_display_(Seq[Any](format.raw/*5.8*/(""" <h1>test: """),_display_(Seq[Any](/*7.16*/s/*7.17*/.structure.id)),format.raw/*7.30*/(""" </h1> """)))})),format.raw/*12.2*/(""" """))} } def render(s:Streference): play.api.templates.HtmlFormat.Appendable = apply(s) def f:((Streference) => play.api.templates.HtmlFormat.Appendable) = (s) => apply(s) def ref: this.type = this } /* -- GENERATED -- DATE: Mon Jun 02 10:52:57 EST 2014 SOURCE: /Users/matthew/git/glycomics_working/app/views/stref.scala.html HASH: 382d523b7ce19a5c42ab2b57b3ed89a352c717c7 MATRIX: 779->1|905->17|932->35|969->38|980->42|1018->44|1074->65|1083->66|1117->79|1176->107 LINES: 26->1|30->1|31->3|33->5|33->5|33->5|35->7|35->7|35->7|40->12 -- GENERATED -- */
alternativeTime/unicarb_static
target/scala-2.10/src_managed/main/views/html/stref.template.scala
Scala
gpl-3.0
1,964
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package utils import java.util.Base64 import javax.inject.Singleton @Singleton class Base64Util { def encodeString(inStr: String): String = Base64.getEncoder.encodeToString(inStr.getBytes) def decodeString(insStr: String): String = new String(Base64.getDecoder.decode(insStr)) }
hmrc/vat-registration-frontend
app/utils/Base64Util.scala
Scala
apache-2.0
900
package org.flagz import scala.reflect.ClassTag /** FlagField that supports Scala {@link Set}. */ class SetFlagField[E](defaultValue: Set[E])(implicit tag: ClassTag[E]) extends ContainerFlagField[Set[E]](defaultValue, () => Set.empty[E]) { val elementClazz = tag.runtimeClass.asInstanceOf[Class[E]] override protected def addItem(existing: Set[E], value: String): Set[E] = { existing + ContainerFlagField.itemFromString(value, elementClazz, this) } override def valueString(value: Set[E]): String = { value.mkString(",") } } /** FlagField that supports Scala {@link List}. */ class ListFlagField[E](defaultValue: List[E])(implicit tag: ClassTag[E]) extends ContainerFlagField[List[E]](defaultValue, () => Nil) { val elementClazz = tag.runtimeClass.asInstanceOf[Class[E]] override protected def addItem(existing: List[E], value: String): List[E] = { existing ::: List(ContainerFlagField.itemFromString(value, elementClazz, this)) } override def valueString(value: List[E]): String = { value.mkString(",") } } /** FlagField that supports Scala {@link Map}. */ class MapFlagField[K, V](defaultValue: Map[K, V])(implicit keyTag: ClassTag[K], valueTag: ClassTag[V]) extends ContainerFlagField[Map[K, V]](defaultValue, () => Map.empty[K, V]) { val keyClazz = keyTag.runtimeClass.asInstanceOf[Class[K]] val valueClazz = valueTag.runtimeClass.asInstanceOf[Class[V]] override protected def addItem(existing: Map[K, V], itemString: String): Map[K, V] = { val components: Array[String] = itemString.split(":") if (components.length != 2) { throw new FlagException.IllegalFormat(this, itemString, null) } val key: K = ContainerFlagField.itemFromString(components(0), keyClazz, this) val value: V = ContainerFlagField.itemFromString(components(1), valueClazz, this) existing + (key -> value) } override def valueString(value: Map[K, V]): String = { value.map { case (k, v) => (k.toString + ":" + v.toString) }.mkString(",") } }
mwitkow/java-flagz
flagz-scala/src/main/scala/org/flagz/ScalaCollectionsFlagField.scala
Scala
mit
2,018
package club.diybio.bank import club.diybio.bank.views.{MenuView, SidebarView} import org.denigma.binding.binders.{GeneralBinder, NavigationBinding} import org.denigma.binding.extensions._ import org.denigma.binding.views.BindableView import org.denigma.controls.login.{AjaxSession, LoginView} import org.querki.jquery._ import org.scalajs.dom import org.scalajs.dom.raw.HTMLElement import org.semantic.SidebarConfig import org.semantic.ui._ import scala.collection.immutable.Map import scala.scalajs.js.annotation.JSExport @JSExport("FrontEnd") object FrontEnd extends BindableView with scalajs.js.JSApp { override def name = "main" lazy val elem: HTMLElement = dom.document.body override val params: Map[String, Any] = Map.empty val sidebarParams = SidebarConfig.exclusive(false).dimPage(false).closable(false).useLegacy(false) val session = new AjaxSession() /** * Register views */ override lazy val injector = defaultInjector .register("menu"){ case (el, args) => new MenuView(el,args).withBinders(menu=>List(new GeneralBinder(menu),new NavigationBinding(menu))) } .register("sidebar"){ case (el, args) => new SidebarView(el,args).withBinder(new GeneralBinder(_)) } .register("login"){ case (el, args) => new LoginView(el,session,args).withBinder(new GeneralBinder(_)) } this.withBinder(new GeneralBinder(_)) @JSExport def main(): Unit = { this.bindView(this.viewElement) this.login("guest") //TODO: change it when session mechanism will work well } @JSExport def login(username:String): Unit = session.setUsername(username) @JSExport def showLeftSidebar() = { $(".left.sidebar").sidebar(sidebarParams).show() } @JSExport def load(content: String, into: String): Unit = { dom.document.getElementById(into).innerHTML = content } @JSExport def moveInto(from: String, into: String): Unit = { for { ins <- sq.byId(from) intoElement <- sq.byId(into) } { this.loadElementInto(intoElement, ins.innerHTML) ins.parentNode.removeChild(ins) } } }
antonkulaga/plasmid-bank
frontend/src/main/scala/club/diybio/bank/FrontEnd.scala
Scala
mpl-2.0
2,090
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.ibm.cds.spark.samples import org.apache.spark._ object HelloSpark { //main method invoked when running as a standalone Spark Application def main(args: Array[String]) { val conf = new SparkConf().setAppName("Hello Spark") val spark = new SparkContext(conf) println("Hello Spark Demo. Compute the mean and variance of a collection") val stats = computeStatsForCollection(spark); println(">>> Results: ") println(">>>>>>>Mean: " + stats._1 ); println(">>>>>>>Variance: " + stats._2); spark.stop() } //Library method that can be invoked from Jupyter Notebook def computeStatsForCollection( spark: SparkContext, countPerPartitions: Int = 100000, partitions: Int=5): (Double, Double) = { val totalNumber = math.min( countPerPartitions * partitions, Long.MaxValue).toInt; val rdd = spark.parallelize( 1 until totalNumber,partitions); (rdd.mean(), rdd.variance()) } }
cattoire/sparksamples
helloSpark/src/main/scala/com/ibm/cds/spark/samples/HelloSpark.scala
Scala
apache-2.0
1,744
object Return { def foo(x: Int): Int = return 3 }
lampepfl/dotty
tests/pos/tailcall/return.scala
Scala
apache-2.0
54
/* * Copyright 2021 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers.renewal import connectors.DataCacheConnector import controllers.actions.SuccessfulAuthAction import models.businessmatching._ import models.renewal.{FXTransactionsInLast12Months, Renewal} import org.mockito.Matchers._ import org.mockito.Mockito._ import org.scalatestplus.mockito.MockitoSugar import play.api.i18n.Messages import play.api.test.Helpers._ import services.RenewalService import uk.gov.hmrc.http.cache.client.CacheMap import utils.AmlsSpec import views.html.renewal.fx_transaction_in_last_12_months import scala.concurrent.Future class FXTransactionsInLast12MonthsControllerSpec extends AmlsSpec with MockitoSugar { trait Fixture { self => val request = addToken(authRequest) lazy val mockDataCacheConnector = mock[DataCacheConnector] lazy val mockRenewalService = mock[RenewalService] lazy val view = app.injector.instanceOf[fx_transaction_in_last_12_months] val controller = new FXTransactionsInLast12MonthsController ( dataCacheConnector = mockDataCacheConnector, authAction = SuccessfulAuthAction, ds = commonDependencies, renewalService = mockRenewalService, cc = mockMcc, fx_transaction_in_last_12_months = view ) val cacheMap = mock[CacheMap] when(mockDataCacheConnector.fetchAll(any())(any())) .thenReturn(Future.successful(Some(cacheMap))) def setupBusinessMatching(activities: Set[BusinessActivity]) = when { cacheMap.getEntry[BusinessMatching](BusinessMatching.key) } thenReturn Some(BusinessMatching(activities = Some(BusinessActivities(activities)))) } val emptyCache = CacheMap("", Map.empty) "RenewalForeignExchangeTransactionsController" must { "load the page 'How many foreign exchange transactions'" in new Fixture { when(controller.dataCacheConnector.fetch[Renewal](any(), any()) (any(), any())).thenReturn(Future.successful(None)) val result = controller.get()(request) status(result) must be(OK) contentAsString(result) must include(Messages("renewal.msb.fx.transactions.expected.title")) } "load the page 'How many foreign exchange transactions' with pre populated data" in new Fixture { when(controller.dataCacheConnector.fetch[Renewal](any(), any()) (any(), any())).thenReturn(Future.successful(Some(Renewal( fxTransactionsInLast12Months = Some(FXTransactionsInLast12Months("12345678963")))))) val result = controller.get()(request) status(result) must be(OK) contentAsString(result) must include("12345678963") } "Show error message when user has not filled the mandatory fields" in new Fixture { val newRequest = requestWithUrlEncodedBody( "fxTransaction" -> "" ) when { cacheMap.getEntry[Renewal](Renewal.key) } thenReturn Some(Renewal()) when(mockRenewalService.updateRenewal(any(), any())(any(), any())) .thenReturn(Future.successful(emptyCache)) val result = controller.post()(newRequest) status(result) must be(BAD_REQUEST) contentAsString(result) must include (Messages("error.required.renewal.fx.transactions.in.12months")) } trait FlowFixture extends Fixture { val newRequest = requestWithUrlEncodedBody( "fxTransaction" -> "12345678963" ) when { cacheMap.getEntry[Renewal](Renewal.key) } thenReturn Some(Renewal()) when(mockRenewalService.updateRenewal(any(), any())(any(), any())) .thenReturn(Future.successful(mock[CacheMap])) } "Successfully save data in mongoCache and navigate to Next page" when { "business activities does not contain HVD or ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness)) val result = controller.post()(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.SummaryController.get.url)) } "business activities contains HVD" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, HighValueDealing)) val result = controller.post()(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.CustomersOutsideIsUKController.get().url)) } "business activities contains ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, AccountancyServices)) val result = controller.post()(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.CustomersOutsideIsUKController.get().url)) } "business activities contains HVD and ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, HighValueDealing, AccountancyServices)) val result = controller.post()(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.CustomersOutsideIsUKController.get().url)) } } "Successfully save data in mongoCache and navigate to Summary page in edit mode" when { "business activities does not contain HVD or ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness)) val result = controller.post(true)(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.SummaryController.get.url)) } "business activities contains HVD" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, HighValueDealing)) val result = controller.post(true)(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.SummaryController.get.url)) } "business activities contains ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, AccountancyServices)) val result = controller.post(true)(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.SummaryController.get.url)) } "business activities contains HVD and ASP" in new FlowFixture { setupBusinessMatching(activities = Set(MoneyServiceBusiness, HighValueDealing, AccountancyServices)) val result = controller.post(true)(newRequest) status(result) must be(SEE_OTHER) redirectLocation(result) must be(Some(controllers.renewal.routes.SummaryController.get.url)) } } } }
hmrc/amls-frontend
test/controllers/renewal/FXTransactionsInLast12MonthsControllerSpec.scala
Scala
apache-2.0
7,289
package fi.allacca import android.app.{Activity, LoaderManager} import android.database.Cursor import android.widget._ import scala.Array import android.os.Bundle import android.content.{CursorLoader, ContentUris, Loader} import android.provider.CalendarContract import android.util.Log import android.view.ViewGroup.LayoutParams import org.joda.time.{DateTime, LocalDate} import scala.annotation.tailrec import org.joda.time.format.DateTimeFormat import android.graphics.Color class AgendaCreator(activity: Activity, parent: RelativeLayout) extends LoaderManager.LoaderCallbacks[Cursor] { private val ids = new IdGenerator(parent.getId + 1) private lazy val dimensions = new ScreenParameters(activity.getResources.getDisplayMetrics) private var displayRange: (LocalDate, LocalDate) = (new LocalDate(), new LocalDate().plusDays(20)) activity.getLoaderManager.initLoader(0, null, this) activity.getLoaderManager.getLoader(0).forceLoad() override def onCreateLoader(id: Int, args: Bundle): Loader[Cursor] = { val builder = CalendarContract.Instances.CONTENT_URI.buildUpon ContentUris.appendId(builder, displayRange._1) ContentUris.appendId(builder, displayRange._2) new CursorLoader(activity, builder.build, Array("_id", "title", "begin", "end"), "", null, "begin asc") } override def onLoadFinished(loader: Loader[Cursor], data: Cursor) { val dataLoaded = data.moveToFirst() val events = if (dataLoaded) readEvents(data) else Nil val eventsByDays: Map[LocalDate, Seq[CalendarEvent]] = events.groupBy { e => new DateTime(e.startTime).toLocalDate } val daysInOrder = eventsByDays.keys.toSeq.sortBy(_.toDateTimeAtCurrentTime.getMillis) Log.d(TAG, "daysInOrder == " + daysInOrder) daysInOrder.foreach { day => val dayNameView = new TextView(activity) dayNameView.setId(ids.nextId) val dayNameParams = new RelativeLayout.LayoutParams(LayoutParams.MATCH_PARENT, LayoutParams.WRAP_CONTENT) dayNameParams.addRule(RelativeLayout.BELOW, dayNameView.getId - 1) dayNameView.setLayoutParams(dayNameParams) dayNameView.setBackgroundColor(dimensions.pavlova) dayNameView.setTextColor(Color.BLACK) dayNameView.setTextSize(dimensions.overviewContentTextSize) val dateFormat = DateTimeFormat.forPattern("d.M.yyyy") dayNameView.setText(dateFormat.print(day)) parent.addView(dayNameView) events.filter { _.isDuring(day.toDateTimeAtStartOfDay) } sortBy { _.startTime } foreach { event => Log.d(TAG, "Rendering " + event) val titleView = new TextView(activity) titleView.setId(ids.nextId) val params = new RelativeLayout.LayoutParams(LayoutParams.WRAP_CONTENT, LayoutParams.WRAP_CONTENT) params.addRule(RelativeLayout.BELOW, titleView.getId - 1) titleView.setLayoutParams(params) titleView.setTextSize(dimensions.overviewContentTextSize) titleView.setText(event.title) parent.addView(titleView) } } } def onTopReached() { Log.d(TAG, "We have scrolled to top and need to load more things of past") displayRange = (displayRange._1.minusDays(20), displayRange._2.minusDays(20)) parent.removeAllViews() activity.getLoaderManager.restartLoader(0, null, this) } def onBottomReached() { Log.d(TAG, "We have scrolled to bottom and need to load more things of future") displayRange = (displayRange._1.plusDays(10), displayRange._2.plusDays(10)) activity.getLoaderManager.restartLoader(0, null, this) } @tailrec private def readEvents(cursor: Cursor, events: Seq[CalendarEvent] = Nil): Seq[CalendarEvent] = { val newEvents = events :+ readEventFrom(cursor) if (!cursor.moveToNext()) { newEvents } else readEvents(cursor, newEvents) } private def readEventFrom(cursor: Cursor): CalendarEvent = { new CalendarEvent(title = cursor.getString(1), startTime = cursor.getLong(2), endTime = cursor.getLong(3)) } override def onLoaderReset(loader: Loader[Cursor]) {} }
timorantalaiho/allacca
src/main/scala/fi/allacca/AgendaCreator.scala
Scala
gpl-3.0
4,020
package me.snov.sns.actor import java.util.UUID import akka.actor.Status.{Failure, Success} import akka.actor.{Actor, ActorLogging, ActorRef, Props} import me.snov.sns.actor.DbActor.CmdGetConfiguration import me.snov.sns.model._ import akka.actor.PoisonPill import akka.actor.Status object SubscribeActor { def props(dbActor: ActorRef) = Props(classOf[SubscribeActor], dbActor) case class CmdSubscribe(topicArn: String, protocol: String, endpoint: String) case class CmdUnsubscribe(subscriptionArn: String) case class CmdListSubscriptions() case class CmdListSubscriptionsByTopic(topicArn: String) case class CmdFanOut(topicArn: String, message: Message) case class CmdCreateTopic(name: String) case class CmdDeleteTopic(arn: String) case class CmdListTopics() case class CmdSetSubscriptionAttributes(subscriptionArn: String, attributeName: String, attributeValue: String) case class CmdGetSubscriptionAttributes(subscriptionArn: String) } class SubscribeActor(dbActor: ActorRef) extends Actor with ActorLogging { import me.snov.sns.actor.SubscribeActor._ type TopicArn = String type SubscriptionArn = String var topics = Map[TopicArn, Topic]() var subscriptions = Map[TopicArn, List[Subscription]]() var actorPool = Map[SubscriptionArn, ActorRef]() dbActor ! CmdGetConfiguration private def fanOut(topicArn: TopicArn, message: Message) = { try { if (topics.isDefinedAt(topicArn) && subscriptions.isDefinedAt(topicArn)) { subscriptions(topicArn).foreach((s: Subscription) => { if (actorPool.isDefinedAt(s.arn)) { log.debug(s"Sending message ${message.uuid} to ${s.endpoint}") actorPool(s.arn) ! message } else { throw new RuntimeException(s"No actor for subscription ${s.endpoint}") } }) } else { throw new TopicNotFoundException(s"Topic not found: $topicArn") } Success } catch { case e: Throwable => Failure(e) } } def findSubscription(subscriptionArn: String): Option[Subscription] = { subscriptions.values.flatten.find{_.arn == subscriptionArn} } def producerFor(subscription: Subscription) = { val producer = if(subscription.isRawMessageDelivery) { context.system.actorOf(RawProducerActor.props(subscription.endpoint, subscription.arn, subscription.topicArn)) } else { context.system.actorOf(ProducerActor.props(subscription.endpoint, subscription.arn, subscription.topicArn)) } producer } def updateSubscription(subscription: Subscription) = { val updatedSubs = subscription :: subscriptions(subscription.topicArn).filter(_.arn != subscription.arn) subscriptions += (subscription.topicArn -> updatedSubs) val producer = producerFor(subscription) //update the producer actorPool += (subscription.arn -> producer) } def setAttribute(subscriptionArn: String, attributeName: String, attributeValue: String) = { log.info(s"Setting ${attributeName} to ${attributeValue} for ${subscriptionArn}") findSubscription(subscriptionArn).map { sub => val updated = sub.copy(subscriptionAttributes = Some(sub.subscriptionAttributes.getOrElse(Map.empty[String,String]) + (attributeName -> attributeValue))) updateSubscription(updated) Success } getOrElse { log.error(s"No subscription found for ${subscriptionArn}") Failure(new Exception("Not Found")) } } def getAttributes(subscriptionArn: String): Option[Map[String,String]] = { findSubscription(subscriptionArn).flatMap { sub => sub.subscriptionAttributes.map { attrs => attrs ++ Map( "SubscriptionArn" -> sub.arn, "TopicArn" -> sub.topicArn, "Owner" -> sub.owner ) } } } def subscribe(topicArn: TopicArn, protocol: String, endpoint: String): Subscription = { val subscription = Subscription(s"${topicArn}:${UUID.randomUUID}", "", topicArn, protocol, endpoint) initSubscription(subscription) save() subscription } def initSubscription(subscription: Subscription) = { val producer = producerFor(subscription) val listByTopic = subscription :: subscriptions.getOrElse(subscription.topicArn, List()) actorPool += (subscription.arn -> producer) subscriptions += (subscription.topicArn -> listByTopic) } def unsubscribe(subscriptionArn: String) = { subscriptions = subscriptions.map { case (key, topicSubscriptions) => (key, topicSubscriptions.filter((s: Subscription) => s.arn != subscriptionArn)) }.toMap save() Success } def listSubscriptionsByTopic(topicArn: TopicArn): List[Subscription] = { subscriptions.getOrElse(topicArn, List()) } def listSubscriptions(): List[Subscription] = { subscriptions.values.flatten.toList } def findOrCreateTopic(name: String): Topic = { topics.values.find(_.name == name) match { case Some(topic) => topic case None => val topic = Topic(s"arn:aws:sns:us-east-1:123456789012:$name", name) topics += (topic.arn -> topic) save() topic } } def delete(arn: TopicArn) = { if (topics.isDefinedAt(arn)) { topics -= arn if (subscriptions.isDefinedAt(arn)) { subscriptions -= arn } save() Success } else { Failure } } def load(configuration: Configuration) = { configuration.topics.foreach { topic => topics += (topic.arn -> topic) } configuration.subscriptions.foreach { initSubscription } log.info("Loaded configuration") } def save() = { dbActor ! new Configuration(subscriptions = listSubscriptions(), topics = topics.values.toList) } override def receive = { case CmdCreateTopic(name) => sender ! findOrCreateTopic(name) case CmdDeleteTopic(arn) => sender ! delete(arn) case CmdListTopics => sender ! topics.values case CmdSubscribe(topicArn, protocol, endpoint) => sender ! subscribe(topicArn, protocol, endpoint) case CmdUnsubscribe(subscriptionArn) => sender ! unsubscribe(subscriptionArn) case CmdListSubscriptionsByTopic(topicArn) => sender ! listSubscriptionsByTopic(topicArn) case CmdListSubscriptions() => sender ! listSubscriptions() case CmdFanOut(topicArn, message) => sender ! fanOut(topicArn, message) case CmdSetSubscriptionAttributes(subscriptionArn, attributeName, attributeValue) => sender ! setAttribute(subscriptionArn, attributeName, attributeValue) case CmdGetSubscriptionAttributes(subscriptionArn) => sender ! getAttributes(subscriptionArn) case configuration: Configuration => load(configuration) } }
s12v/sns
src/main/scala/me/snov/sns/actor/SubscribeActor.scala
Scala
apache-2.0
6,682
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package wvlet.airframe.http.client import wvlet.airframe.http.HttpMessage.{Request, Response} import wvlet.airframe.http.{HttpClientBackend, HttpClientConfig, HttpSyncClient, ServerAddress} /** */ object URLConnectionClientBackend extends HttpClientBackend { def newSyncClient(serverAddress: String, clientConfig: HttpClientConfig): HttpSyncClient[Request, Response] = { new URLConnectionClient( ServerAddress(serverAddress), URLConnectionClientConfig( requestFilter = clientConfig.requestFilter, retryContext = clientConfig.retryContext, codecFactory = clientConfig.codecFactory ) ) } }
wvlet/airframe
airframe-http/.jvm/src/main/scala/wvlet/airframe/http/client/URLConnectionClientBackend.scala
Scala
apache-2.0
1,206
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers.registration.returns import config.{AuthClientConnector, BaseControllerComponents, FrontendAppConfig} import controllers.BaseController import forms.AnnualStaggerForm import models.api.returns.AnnualStagger import play.api.mvc.{Action, AnyContent} import services.{ReturnsService, SessionProfile, SessionService} import views.html.returns.last_month_of_accounting_year import javax.inject.{Inject, Singleton} import scala.concurrent.{ExecutionContext, Future} @Singleton class LastMonthOfAccountingYearController @Inject()(view: last_month_of_accounting_year, val authConnector: AuthClientConnector, val sessionService: SessionService, returnsService: ReturnsService )(implicit appConfig: FrontendAppConfig, val executionContext: ExecutionContext, baseControllerComponents: BaseControllerComponents) extends BaseController with SessionProfile { val show: Action[AnyContent] = isAuthenticatedWithProfile() { implicit request => implicit profile => returnsService.getReturns.map { returns => returns.staggerStart match { case Some(stagger: AnnualStagger) => Ok(view(AnnualStaggerForm.form.fill(stagger))) case _ => Ok(view(AnnualStaggerForm.form)) } } } val submit: Action[AnyContent] = isAuthenticatedWithProfile() { implicit request => implicit profile => AnnualStaggerForm.form.bindFromRequest().fold( formWithErrors => Future.successful(BadRequest(view(formWithErrors))), annualStagger => returnsService.saveStaggerStart(annualStagger).map { _ => Redirect(routes.PaymentFrequencyController.show) } ) } }
hmrc/vat-registration-frontend
app/controllers/registration/returns/LastMonthOfAccountingYearController.scala
Scala
apache-2.0
2,597
/* * Copyright (c) 2016 Snowplow Analytics Ltd. All rights reserved. * * This program is licensed to you under the Apache License Version 2.0, * and you may not use this file except in compliance with the Apache License Version 2.0. * You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, * software distributed under the Apache License Version 2.0 is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the Apache License Version 2.0 for the specific language governing permissions and limitations there under. */ package com.snowplowanalytics package snowplow package enrich package common package adapters package registry // Java import java.net.URI import org.apache.http.client.utils.URLEncodedUtils // Scala import scala.collection.JavaConversions._ // ScalaZ import scalaz._ import Scalaz._ // Jackson import com.fasterxml.jackson.core.JsonParseException // json4s import org.json4s._ import org.json4s.jackson.JsonMethods._ // Iglu import iglu.client.{ SchemaKey, Resolver } // This project import loaders.CollectorPayload import utils.{ JsonUtils => JU } /* * Transforms a collector payload which conforms to * a known version of the Mixpanel webhook into * raw events. */ object MixpanelAdapter extends Adapter { // Vendor name for failure message private val VendorName = "Mixpanel" // Tracker Version for Mixpanel webhook private val TrackerVersion = "com.mixpanel-v1" // Expected content-type for a request body private val ContentType = "application/x-www-form-urlencoded" // Schemas for reverse-engineering a Snowplow unstructured event private val EventSchemaMap = Map( "users" -> SchemaKey("com.mixpanel", "users", "jsonschema", "1-0-0").toSchemaUri ) /** * Returns a list of events from the request * body of a Mixpanel Event. Each event will * be formatted as an individual JSON of type * JValue. * * * @param body The urlencoded string * from the Mixpanel POST request * @return a list of validated raw events */ private def payloadBodyToEvents(body: String, payload: CollectorPayload): List[Validated[RawEvent]] = { try { val parsed = toMap(URLEncodedUtils.parse(URI.create("http://localhost/?" + body), "UTF-8").toList) val users = parsed.get("users") users match { case Some(list) => val events = parse(list) for { (event, index) <- events.children.zipWithIndex } yield { for { schema <- lookupSchema(Some("users"), VendorName, index, EventSchemaMap) } yield { val qsParams = toMap(payload.querystring) val formattedEvent = event map { j => j transformField { case (key, value) => (key.replaceAll(" ", ""), value) } } RawEvent( api = payload.api, parameters = toUnstructEventParams(TrackerVersion, qsParams, schema, formattedEvent, "srv"), contentType = payload.contentType, source = payload.source, context = payload.context ) } } case _ => List(s"${VendorName} request body does not have 'users' as a key: no event to process".failNel) } } catch { case e: JsonParseException => { val exception = JU.stripInstanceEtc(e.toString).orNull List(s"$VendorName event failed to parse into JSON: [$exception]".failNel) } } } /* Converts a CollectorPayload instance into raw events. * * @param payload The CollectorPaylod containing one or mcore * raw events as collected by a Snowplow collector * @param resolver (implicit) The Iglu resolver used for * schema lookup and validation * @return a Validation boxing either a NEL of RawEvents on * Success, or a NEL of Failure Strings */ def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents = { (payload.body, payload.contentType) match { case (None, _) => s"Request body is empty: no ${VendorName} events to process".failNel case (_, None) => s"Request body provided but content type empty, expected ${ContentType} for ${VendorName}".failNel case (_, Some(ct)) if ct != ContentType => s"Content type of ${ct} provided, expected ${ContentType} for ${VendorName}".failNel case (Some(body), _) => { val events = payloadBodyToEvents(body, payload) rawEventsListProcessor(events) } } } }
nakulgan/snowplow
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/MixpanelAdapter.scala
Scala
apache-2.0
4,791
package com.blinkbox.books.quartermaster.key.public import akka.actor.ActorRefFactory import com.blinkbox.books.logging.DiagnosticExecutionContext import com.blinkbox.books.spray.{Directives => CommonDirectives, _} import org.slf4j.LoggerFactory import spray.http.HttpHeaders._ import spray.http.StatusCodes._ import spray.routing.{ExceptionHandler, HttpService, Route} import spray.util.LoggingContext class PublicApi(config: AppConfig) (implicit val actorRefFactory: ActorRefFactory) extends HttpService with CommonDirectives { implicit val executionContext = DiagnosticExecutionContext(actorRefFactory.dispatcher) implicit val timeout = config.api.timeout val log = LoggerFactory.getLogger(classOf[PublicApi]) def retrieveKey = pathEndOrSingleSlash { post { uncacheable("Hello World") } } val routes = rootPath(config.api.localUrl.path / "keys") { monitor(log) { respondWithHeader(RawHeader("Vary", "Accept, Accept-Encoding")) { retrieveKey } } } }
blinkboxbooks/key-service
public/src/main/scala/com/blinkbox/books/quartermaster/key/public/PublicApi.scala
Scala
mit
1,028
package com.crockeo.clasp import org.scalatest._ // Testing the built-in functionality of the language. // * "=" -> builtin_eq, // * "not" -> builtin_not, // * "|" -> builtin_or, // * "&" -> builtin_and, // * "^" -> builtin_xor, // * "def" -> builtin_def, // * "defn" -> builtin_defn, // * "+" -> builtin_add, // * "-" -> builtin_sub, // * "*" -> builtin_mul, // * "/" -> builtin_div, // * "[]" -> builtin_index, // * "len" -> builtin_len, // * "head" -> builtin_head, // * "tail" -> builtin_tail, // * "exec" -> builtin_exec, // * "tostr" -> builtin_tostr, // * "print" -> builtin_print, // * "if" -> builtin_if class BuiltinTests extends FunSuite { import File.runStr import Language._ import Result._ // Checking if an Either is an error. def isErr[A, B](e: Either[A, B]): Boolean = e match { case Left(_) => true case _ => false } val empt = new Context() test("builtin_eq") { assert(runStr("(= 1 2)", empt) == Right(TBool(false), empt)) assert(runStr("(= \\"a\\" \\"a\\")", empt) == Right(TBool(true), empt)) } test("builtin_gt") { assert(runStr("(> 1 2)", empt) == Right(TBool(false), empt)) assert(runStr("(> 2 2)", empt) == Right(TBool(false), empt)) assert(runStr("(> 3 2)", empt) == Right(TBool(true), empt)) } test("builtin_lt") { assert(runStr("(< 1 2)", empt) == Right(TBool(true), empt)) assert(runStr("(< 2 2)", empt) == Right(TBool(false), empt)) assert(runStr("(< 3 2)", empt) == Right(TBool(false), empt)) } test("builtin_not") { assert(runStr("(! #t)", empt) == Right(TBool(false), empt)) assert(runStr("(! #f)", empt) == Right(TBool(true), empt)) } test("builtin_or") { assert(runStr("(| #f #f)", empt) == Right(TBool(false), empt)) assert(runStr("(| #f #t)", empt) == Right(TBool(true), empt)) assert(runStr("(| #t #f)", empt) == Right(TBool(true), empt)) assert(runStr("(| #t #t)", empt) == Right(TBool(true), empt)) } test("builtin_and") { assert(runStr("(& #f #f)", empt) == Right(TBool(false), empt)) assert(runStr("(& #f #t)", empt) == Right(TBool(false), empt)) assert(runStr("(& #t #f)", empt) == Right(TBool(false), empt)) assert(runStr("(& #t #t)", empt) == Right(TBool(true), empt)) } test("builtin_xor") { assert(runStr("(^ #f #f)", empt) == Right(TBool(false), empt)) assert(runStr("(^ #f #t)", empt) == Right(TBool(true), empt)) assert(runStr("(^ #t #f)", empt) == Right(TBool(true), empt)) assert(runStr("(^ #t #t)", empt) == Right(TBool(false), empt)) } test("builtin_def") { assert(runStr("(def a b)", empt) == Right(TAtom("b"), empt + ("a" -> TAtom("b")))) } test("builtin_defn") { val fn = TFunction(List(TAtom("a")), TAtom("a")) assert(runStr("(defn test (a) a)", empt) == Right(fn, empt + ("test" -> fn))) } test("builtin_add") { assert(runStr("(+ 1 1)", empt) == Right(TInt(2), empt)) assert(runStr("(+ 1 1.0)", empt) == Right(TFloat(2.0f), empt)) assert(runStr("(+ 1 \\" \\" 2)", empt) == Right(TString("1 2"), empt)) } test("builtin_sub") { assert(runStr("(- 2 1)", empt) == Right(TInt(1), empt)) assert(runStr("(- 2.0 1)", empt) == Right(TFloat(1.0f), empt)) } test("builtin_mul") { // TODO } test("builtin_div") { // TODO } test("builtin_index") { assert(runStr("([] 0 (1 2 3))", empt) == Right(TInt(1), empt)) assert(runStr("([] 3 (1 2 3))", empt) match { case Left(_) => true case _ => false }) } test("builtin_len") { assert(Eval(parse("(len (a b c))"), empt) == Right(TInt(3), empt)) assert(isErr(File.runStr("(len 5)", empt))) } test("builtin_head") { assert(File.runStr("(head (1 2 3))", empt) == Right(TInt(1), empt)) assert(isErr(File.runStr("(head ())", empt))) } test("builtin_tail") { assert(File.runStr("(tail (1 2 3))", empt) == Right(TList(List(TInt(2), TInt(3))), empt)) assert(isErr(File.runStr("(tail ())", empt))) } test("builtin_exec") { assert(Eval(parse("(exec '(+ 1 1))"), empt) == Right(TInt(2), empt)) assert(Eval(parse("(exec '(if #t (+ 1 1) 0))"), empt) == Right(TInt(2), empt)) } test("builtin_tostr") { assert(Eval(parse("(tostr 1)"), empt) == Right(TString("1"), empt)) assert(Eval(parse("(tostr a)"), empt) == Right(TString("a"), empt)) } test("builtin_print") { // TODO: I can't actually test this properly. } test("builtin_if") { assert(Eval(parse("(if #t a b)"), empt) == Right(TAtom("a"), empt)) assert(Eval(parse("(if (= 0 0) a b)"), empt) == Right(TAtom("a"), empt)) assert(Eval(parse("(if #f a b)"), empt) == Right(TAtom("b"), empt)) assert(Eval(parse("(if (= 0 1) a b)"), empt) == Right(TAtom("b"), empt)) assert(Eval(TList(List(TAtom("if"), TBool(true), TString("asdf"), TString("fdsa"))), empt) == Right(TString("asdf"), empt)) assert(File.runStr("(if (= 0 0) \\"asdf\\" \\"fdsa\\")", empt) == Right(TString("asdf"), empt)) } }
crockeo/clasp
src/test/scala/BuiltinTests.scala
Scala
mit
5,028
package cromwell.webservice.metadata import cats.{Monoid, Semigroup} import cats.instances.map._ import cats.instances.list._ import cats.syntax.foldable._ import cromwell.core.{ExecutionStatus, WorkflowMetadataKeys, WorkflowState} import cromwell.services.metadata._ import spray.json.{JsArray, _} import scala.collection.immutable.TreeMap import scala.language.postfixOps import scala.util.{Random, Try} object MetadataComponent { implicit val MetadataComponentMonoid: Monoid[MetadataComponent] = new Monoid[MetadataComponent] { private lazy val stringKeyMapSg = implicitly[Semigroup[Map[String, MetadataComponent]]] private lazy val intKeyMapSg = implicitly[Semigroup[Map[Int, MetadataComponent]]] def combine(f1: MetadataComponent, f2: MetadataComponent): MetadataComponent = { (f1, f2) match { case (MetadataObject(v1), MetadataObject(v2)) => MetadataObject(stringKeyMapSg.combine(v1, v2)) case (MetadataList(v1), MetadataList(v2)) => MetadataList(intKeyMapSg.combine(v1, v2)) // If there's a custom ordering, use it case (v1 @ MetadataPrimitive(_, Some(o1)), v2 @ MetadataPrimitive(_, Some(o2))) if o1 == o2 => o1.max(v1, v2) // Otherwise assume it's ordered by default and take the new one case (_, o2) => o2 } } override def empty: MetadataComponent = MetadataObject.empty } val metadataPrimitiveJsonWriter: JsonWriter[MetadataPrimitive] = JsonWriter.func2Writer[MetadataPrimitive] { case MetadataPrimitive(MetadataValue(value, MetadataInt), _) => Try(value.toInt) map JsNumber.apply getOrElse JsString(value) case MetadataPrimitive(MetadataValue(value, MetadataNumber), _) => Try(value.toDouble) map JsNumber.apply getOrElse JsString(value) case MetadataPrimitive(MetadataValue(value, MetadataBoolean), _) => Try(value.toBoolean) map JsBoolean.apply getOrElse JsString(value) case MetadataPrimitive(MetadataValue(value, MetadataString), _) => JsString(value) case MetadataPrimitive(MetadataValue(_, MetadataNull), _) => JsNull } implicit val metadataComponentJsonWriter: JsonWriter[MetadataComponent] = JsonWriter.func2Writer[MetadataComponent] { case MetadataList(values) => JsArray(values.values.toVector map { _.toJson(this.metadataComponentJsonWriter) }) case MetadataObject(values) => JsObject(values.mapValues(_.toJson(this.metadataComponentJsonWriter))) case primitive: MetadataPrimitive => metadataPrimitiveJsonWriter.write(primitive) case MetadataEmptyComponent => JsObject.empty case MetadataNullComponent => JsNull case MetadataJsonComponent(jsValue) => jsValue } /* ******************************* */ /* *** Metadata Events Parsing *** */ /* ******************************* */ private val KeySeparator = MetadataKey.KeySeparator // Split on every unescaped KeySeparator val KeySplitter = s"(?<!\\\\\\\\)$KeySeparator" private val bracketMatcher = """\\[(\\d*)\\]""".r private def parseKeyChunk(chunk: String, innerValue: MetadataComponent): MetadataComponent = { chunk.indexOf('[') match { // If there's no bracket, it's an object. e.g.: "calls" case -1 => MetadataObject(Map(chunk -> innerValue)) // If there's a bracket it's a named list. e.g.: "executionEvents[0][1]" case bracketIndex => // Name: "executionEvents" val objectName = chunk.substring(0, bracketIndex) // Empty value means empty list if (innerValue == MetadataEmptyComponent) MetadataObject(Map(objectName -> MetadataList.empty)) else { // Brackets: "[0][1]" val brackets = chunk.substring(bracketIndex) // Indices as a list: List(0, 1) val listIndices = for { m <- bracketMatcher.findAllMatchIn(brackets) // It's possible for a bracket pair to be empty, in which case we just give it a random number asInt = if (m.group(1).isEmpty) Random.nextInt() else m.group(1).toInt } yield asInt // Fold into a MetadataList: MetadataList(0 -> MetadataList(1 -> innerValue)) val metadataList = listIndices.toList.foldRight(innerValue)((index, acc) => MetadataList(TreeMap(index -> acc))) MetadataObject(Map(objectName -> metadataList)) } } } private def customOrdering(event: MetadataEvent): Option[Ordering[MetadataPrimitive]] = event match { case MetadataEvent(MetadataKey(_, Some(_), key), _, _) if key == CallMetadataKeys.ExecutionStatus => Option(MetadataPrimitive.ExecutionStatusOrdering) case MetadataEvent(MetadataKey(_, None, key), _, _) if key == WorkflowMetadataKeys.Status => Option(MetadataPrimitive.WorkflowStateOrdering) case _ => None } private def toMetadataComponent(subWorkflowMetadata: Map[String, JsValue])(event: MetadataEvent) = { lazy val primitive = event.value map { MetadataPrimitive(_, customOrdering(event)) } getOrElse MetadataEmptyComponent lazy val originalKeyAndPrimitive = (event.key.key, primitive) val keyAndPrimitive: (String, MetadataComponent) = if (event.key.key.endsWith(CallMetadataKeys.SubWorkflowId)) { (for { metadataValue <- event.value subWorkflowMetadata <- subWorkflowMetadata.get(metadataValue.value) keyWithSubWorkflowMetadata = event.key.key.replace(CallMetadataKeys.SubWorkflowId, CallMetadataKeys.SubWorkflowMetadata) subWorkflowComponent = MetadataJsonComponent(subWorkflowMetadata) } yield (keyWithSubWorkflowMetadata, subWorkflowComponent)) getOrElse originalKeyAndPrimitive } else originalKeyAndPrimitive fromMetadataKeyAndPrimitive(keyAndPrimitive._1, keyAndPrimitive._2) } /** Sort events by timestamp, transform them into MetadataComponent, and merge them together. */ def apply(events: Seq[MetadataEvent], subWorkflowMetadata: Map[String, JsValue] = Map.empty): MetadataComponent = { // The `List` has a `Foldable` instance defined in scope, and because the `List`'s elements have a `Monoid` instance // defined in scope, `combineAll` can derive a sane `TimestampedJsValue` value even if the `List` of events is empty. events.toList map toMetadataComponent(subWorkflowMetadata) combineAll } def fromMetadataKeyAndPrimitive(metadataKey: String, innerComponent: MetadataComponent) = { import MetadataKey._ metadataKey.split(KeySplitter).map(_.unescapeMeta).foldRight(innerComponent)(parseKeyChunk) } } sealed trait MetadataComponent case object MetadataEmptyComponent extends MetadataComponent case object MetadataNullComponent extends MetadataComponent // Metadata Object object MetadataObject { def empty = new MetadataObject(Map.empty) def apply(kvPair: (String, MetadataComponent)*) = { new MetadataObject(kvPair.toMap) } } case class MetadataObject(v: Map[String, MetadataComponent]) extends MetadataComponent // Metadata List object MetadataList { def empty = new MetadataList(Map.empty) def apply(components: List[MetadataComponent]) = new MetadataList(components.zipWithIndex.map({case (c, i) => i -> c}).toMap) } case class MetadataList(v: Map[Int, MetadataComponent]) extends MetadataComponent // Metadata Primitive object MetadataPrimitive { val ExecutionStatusOrdering: Ordering[MetadataPrimitive] = Ordering.by { primitive: MetadataPrimitive => ExecutionStatus.withName(primitive.v.value) } val WorkflowStateOrdering: Ordering[MetadataPrimitive] = Ordering.by { primitive: MetadataPrimitive => WorkflowState.withName(primitive.v.value) } } case class MetadataPrimitive(v: MetadataValue, customOrdering: Option[Ordering[MetadataPrimitive]] = None) extends MetadataComponent // Metadata Component that owns an already computed JsValue case class MetadataJsonComponent(jsValue: JsValue) extends MetadataComponent
ohsu-comp-bio/cromwell
engine/src/main/scala/cromwell/webservice/metadata/MetadataComponent.scala
Scala
bsd-3-clause
7,782
package com.lvxingpai.model.marketplace.misc import java.util.Date import javax.validation.constraints.{ NotNull, Min } import com.lvxingpai.model.mixin.ObjectIdEnabled import org.mongodb.morphia.annotations.{ Entity, Indexed } /** * Created by zephyre on 2/18/16. */ @Entity class Coupon extends ObjectIdEnabled { /** * 属于哪个用户 */ @Min(value = 0) @Indexed(unique = true) var userId: Long = _ /** * 优惠券标题 */ @NotNull var title: String = _ /** * 优惠券描述 */ @NotNull var desc: String = _ /** * 减免多少金额 */ @Min(value = 0) var discount: Int = _ /** * 什么时候过期 */ @NotNull var expire: Date = _ /** * 是否可用 */ var available: Boolean = true }
Lvxingpai/core-model
src/main/scala/com/lvxingpai/model/marketplace/misc/Coupon.scala
Scala
apache-2.0
775
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @author Vamsi Thummala {[email protected]}, Copyright (C) 2013-2015 * */ package safe.safesets package client import akka.actor.{Actor, ActorRef, Props} import scala.concurrent.duration.FiniteDuration object Worker { def props( masterRef: ActorRef , recontactInterval: FiniteDuration , receiveTimeout: FiniteDuration , storeURI: String ): Props = Props(classOf[Worker], masterRef, recontactInterval, receiveTimeout, storeURI) def name: String = "SafeSetsWorker" } class Worker( val masterRef: ActorRef , val recontactInterval: FiniteDuration , val receiveTimeout: FiniteDuration , val storeURI: String ) extends WorkerLike with WorkerLikeConfig { val workExecutor: ActorRef = context.actorOf(WorkExecutor.props(storeURI), WorkExecutor.name) registerWorker() // registerWorker with a master context.watch(workExecutor) // watch for workExecutor termination }
wowmsi/safe
safe-lang/src/main/scala/safe/safesets/client/Worker.scala
Scala
apache-2.0
1,725
package collins.models import shared.PageParams import org.specs2._ import specification._ import play.api.test.WithApplication class AssetSpec extends mutable.Specification { "Asset Model Specification".title args(sequential = true) "The Asset Model" should { "Support CRUD Operations" in new WithApplication { "CREATE" in new mockasset { val result = Asset.create(newAsset) result.getId must beGreaterThan(1L) } "UPDATE" in new mockasset { val maybeAsset = Asset.findByTag(assetTag) maybeAsset must beSome[Asset] val realAsset = maybeAsset.get Asset.update(realAsset.copy(status = Status.New.get.id)) Asset.findByTag(assetTag).map { a => a.getStatus().getId mustEqual(Status.New.get.id) }.getOrElse(failure("Couldn't find asset but expected to")) } "DELETE" in new mockasset { Asset.findByTag(assetTag).map { a => Asset.delete(a) mustEqual 1 Asset.findById(a.getId) must beNone }.getOrElse(failure("Couldn't find asset but expected to")) } } "Support nodeclass" in new WithApplication { "nodeClass" in new mocknodeclass { val nodeclass = Asset.create(Asset(nodeclassTag, nodeclassStatus,nodeclassType)) val testAsset = Asset.create(Asset(assetTag, assetStatus, assetType)) val nodeclassMetas = createAssetMetas(nodeclass, (nodeclassMetaTags + nodeclassIdentifierTag)) val assetMetas = createAssetMetas(testAsset, nodeclassMetaTags) testAsset.nodeClass must_== Some(nodeclass) } "findSimilar" in new mocknodeclass { val assets = similarAssetData.map{case (tag, status, metatags) => { val asset = Asset.create(Asset(tag, status, AssetType.ServerNode.get)) createAssetMetas(asset, metatags) asset }} val finder = AssetFinder.empty.copy( status = Status.Unallocated, assetType = Some(AssetType.ServerNode.get) ) val expected = assets.filter{_.tag == similarAssetTag} val page = PageParams(0,10, "", "tag") Asset.findByTag(assetTag).map{asset => Asset.findSimilar(asset, page, finder, AssetSort.Distribution).items must_== expected }.getOrElse(failure("Couldn't find asset but expected to")) } } //support nodeclass "Support getters/finders" in new WithApplication { "findByTag" in new concreteasset { Asset.findByTag(assetTag) must beSome[Asset] Asset.findByTag(assetTag).get.tag mustEqual assetTag } "getAllAttributes" in new concreteasset { val maybeAsset = Asset.findByTag(assetTag) maybeAsset must beSome[Asset] val asset = maybeAsset.get val attributes = asset.getAllAttributes attributes.ipmi must beSome.which { ipmi => ipmi.dottedAddress mustEqual "10.0.0.2" ipmi.dottedGateway mustEqual "10.0.0.1" } } } // support getters/finders } // Asset should trait mockasset extends Scope { val assetTag = "tumblrtag2" val assetStatus = Status.Incomplete.get val assetType = AssetType.ServerNode.get val newAsset = Asset(assetTag, assetStatus, assetType) } trait concreteasset extends Scope { val assetTag = "tumblrtag1" val assetStatus = Status.Incomplete.get val assetType = AssetType.ServerNode.get val assetId = 1 } trait mocknodeclass extends Scope { def createAssetMetas(asset: Asset, metamap: Map[String, String]) = metamap .map{ case (k,v) => AssetMetaValue.create(AssetMetaValue(asset.id, AssetMeta.findOrCreateFromName(k).id, 0, v)) } val nodeclassTag = "test_nodeclass" val nodeclassStatus = Status.Allocated.get val nodeclassType = AssetType.Configuration.get val nodeclassIdentifierTag = ("IS_NODECLASS" -> "true") val nodeclassMetaTags = Map("FOOT1" -> "BAR", "BAZT1" -> "BAAAAZ") val assetTag = "nodeclasstest" val assetStatus = Status.Allocated.get val assetType = AssetType.ServerNode.get val similarAssetTag = "similar_asset" val similarAssetData = List[(String, Status, Map[String,String])]( (similarAssetTag,Status.Unallocated.get,nodeclassMetaTags), ("not_similar",Status.Unallocated.get,Map[String,String]()), ("similar_not_unallocated", Status.Provisioned.get,nodeclassMetaTags) ) } }
funzoneq/collins
test/collins/models/AssetSpec.scala
Scala
apache-2.0
4,449
package ca.uwo.eng.sel.cepsim.query import ca.uwo.eng.sel.cepsim.event.EventSet import ca.uwo.eng.sel.cepsim.history.{Produced, WindowAccumulated} import ca.uwo.eng.sel.cepsim.util.SimEventBaseTest import org.junit.runner.RunWith import org.mockito.Mockito._ import org.scalatest.junit.JUnitRunner import org.scalatest.mock.MockitoSugar import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.duration._ /** * Created by virso on 14-12-03. */ @RunWith(classOf[JUnitRunner]) class WindowedOperatorTest extends FlatSpec with Matchers with MockitoSugar with SimEventBaseTest { trait Fixture { val prod1 = mock[EventProducer]("prod1") val f1 = mock[Operator]("f1") val f2 = mock[Operator]("f2") val f3 = mock[Operator]("f3") doReturn("f1").when(f1).id doReturn("f2").when(f2).id doReturn("f3").when(f3).id def setup(op: WindowedOperator) = { op addInputQueue(f1) op addInputQueue(f2) op addOutputQueue(f3) } } "A WindowedOperator" should "generate an output only after the windows has elapsed" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 500) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(10, 5.0, 1.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 8.0, 2.0, prod1 -> 10.0)) val simEvent = op run (200, 10.0, 1000.0) simEvent should be (List(WindowAccumulated(op, 10, 1000, 0, EventSet(20.0, 6.5, 1.5, prod1 -> 20.0)))) op.inputQueues (f1) should be (0.0 +- 0.0001) op.inputQueues (f2) should be (0.0 +- 0.0001) op.outputQueues(f3) should be (0.0 +- 0.0001) // second run - end of the first window - these events shouldn't be considered by the function op enqueueIntoInput (f1, EventSet(10, 1000.0, 5.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 1000.0, 5.0, prod1 -> 10.0)) val simEvent2 = op run (200, 1010, 2000) simEvent2 should be (List(Produced (op, 1010, 2000, EventSet(20.0, 2000.0, 1995.0, prod1 -> 20.0)), WindowAccumulated(op, 1010, 2000, 0, EventSet(20.0, 1000.0, 5.0, prod1 -> 20.0)))) op.inputQueues (f1) should be (0.0 +- 0.0001) op.inputQueues (f2) should be (0.0 +- 0.0001) op.outputQueues(f3) should be (20.0 +- 0.0001) op.accumulatedSlot should be (0) } it should "accumulate events from consecutive runs" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 500) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(10, 5.0, 1.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 8.0, 2.0, prod1 -> 10.0)) var simEvent = op run (200, 10.0, 200.0) simEvent should be (List(WindowAccumulated(op, 10, 200.0, 0, EventSet(20.0, 6.5, 1.5, prod1 -> 20.0)))) op enqueueIntoInput (f1, EventSet(10, 200.0, 1.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 100.0, 0.0, prod1 -> 10.0)) simEvent = op run (200, 210.0, 500.0) simEvent should be (List(WindowAccumulated(op, 210, 500.0, 0, EventSet(20.0, 150.0, 0.5, prod1 -> 20.0)))) op enqueueIntoInput (f1, EventSet(10, 500.0, 1.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 500.0, 1.0, prod1 -> 10.0)) simEvent = op run (200, 510.0, 900.0) simEvent should be (List(WindowAccumulated(op, 510, 900.0, 0, EventSet(20.0, 500.0, 1.0, prod1 -> 20.0)))) // finally generate output val produced = op.run(200, 1010.0, 1100.0)(0).asInstanceOf[Produced] produced should equal (Produced(op, 1010.0, 1100.0, EventSet(60.0, 1100.0, 882.16666, prod1 -> 60.0))) } it should "generate output at each advance period" in new Fixture { def accTs(i: Int): Double = { if (i == 0) 0 else { // i = 1 -> ((1000 * 20) + (20 * 0)) / 40 // i = 2 -> ((2000 * 20) + (40 * 500) / 60 // .... ((i * 1000 * 20) + (i * 20 * accTs(i - 1))) / ((i + 1) * 20) } } val op = new WindowedOperator("w1", 10, 10 seconds, 1 second, WindowedOperator.constant(2), false, 1000) setup(op) op.init(0.0, 1000) // run 10 times until it reaches the 10 second window (0 until 10).foreach((i) => { val startTime = (i * 1000) val endTime = (i * 1000) + 200 op enqueueIntoInput (f1, EventSet(10.0, startTime, 0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10.0, startTime, 0, prod1 -> 10.0)) val simEvent = op.run(200, startTime, endTime) op.inputQueues (f1) should be (0.0 +- 0.0001) op.inputQueues (f2) should be (0.0 +- 0.0001) op.outputQueues(f3) should be (i * 2.0 +- 0.0001) op.accumulatedSlot should be (i) if (i == 0) { simEvent should be (List(WindowAccumulated(op, startTime, endTime, i, EventSet(20.0, startTime, 0.0, prod1 -> 20.0)))) } else { simEvent should be (List(Produced (op, startTime, endTime, EventSet(2.0, endTime, endTime - accTs(i - 1), prod1 -> 20.0)), WindowAccumulated(op, startTime, endTime, i, EventSet(20.0, startTime, 0.0, prod1 -> 20.0)))) } }) val simEvent = op.run(200, 10000, 11000) simEvent should be (List(Produced(op, 10000, 11000, EventSet(2, 11000, 11000 - accTs(9), prod1 -> 20.0)))) op.inputQueues (f1) should be ( 0.0 +- 0.0001) op.inputQueues (f2) should be ( 0.0 +- 0.0001) op.outputQueues(f3) should be (20.0 +- 0.0001) op.accumulatedSlot should be (0) } it should "correctly accumulate events on window slots" in new Fixture { def accTs(i: Int): Double = { if (i == 0) 0 else { // i = 1 -> ((1000 * 20) + (20 * 0)) / 40 // i = 2 -> ((2000 * 20) + (40 * 500) / 60 // .... ((i * 100 * 20) + (i * 20 * accTs(i - 1))) / ((i + 1) * 20) } } val op = new WindowedOperator("w1", 10, 1 second, 100 milliseconds, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 100) // run 10 times (0 until 10).foreach((i) => { val startTime = (i * 100) val endTime = (i * 100) + 100 op enqueueIntoInput (f1, EventSet(10.0, startTime, 0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10.0, startTime, 0, prod1 -> 10.0)) val simEvent = op.run(200, startTime, endTime) op.inputQueues (f1) should be ( 0.0 +- 0.0001) op.inputQueues (f2) should be ( 0.0 +- 0.0001) op.outputQueues(f3) should be (i * 20.0 +- 0.0001) op.dequeueFromOutput(f3, i * 20.00) if (i == 0) { simEvent should be (List(WindowAccumulated(op, startTime, endTime, i, EventSet(20.0, startTime, 0.0, prod1 -> 20.0)))) } else { simEvent should be (List(Produced (op, startTime, endTime, EventSet(i * 20, endTime, endTime - accTs(i - 1), prod1 -> 20.0)), WindowAccumulated(op, startTime, endTime, i, EventSet(20.0, startTime, 0.0, prod1 -> 20.0)))) } }) // these enqueued events are accumulated in the [1000, 1100[ window op enqueueIntoInput (f1, EventSet(5.0, 1000.0, 0, prod1 -> 5.0)) op enqueueIntoInput (f2, EventSet(5.0, 1000.0, 0, prod1 -> 5.0)) val simEvent = op.run(200, 1005, 1100) simEvent should be (List(Produced (op, 1005, 1100, EventSet(200, 1100, 1100 - accTs(9), prod1 -> 20.0)), WindowAccumulated(op, 1005, 1100, 0, EventSet(10, 1000, 0, prod1 -> 10.0)))) op.inputQueues (f1) should be ( 0.0 +- 0.0001) op.inputQueues (f2) should be ( 0.0 +- 0.0001) op.outputQueues(f3) should be (200.0 +- 0.0001) } it should "take the start time into consideration" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.constant(1), false, 1000) setup(op) op.init(200, 500) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(10.0, 500.0, 0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10.0, 500.0, 0, prod1 -> 10.0)) val simEvent = op run (200, 500, 1000) simEvent should be (List(WindowAccumulated(op, 500, 1000, 0, EventSet(20.0, 500.0, 0.0, prod1 -> 20.0)))) op.outputQueues(f3) should be (0.0 +- 0.0001) // second run - 1 second hasn't elapsed yet because the operator started its processing at time 200ms val simEvent2 = op run (200, 1000, 1500) simEvent2 should have size (0) op.outputQueues(f3) should be (0.0 +- 0.0001) // third run - now it does val simEvent3 = op run (200, 1500, 2000) simEvent3 should be (List(Produced (op, 1500, 2000, EventSet(1.0, 2000.0, 1500.0, prod1 -> 20.0)))) op.outputQueues(f3) should be (1.0 +- 0.0001) } it should "not emit anything if there is no event accumulated" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 100 milliseconds, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 100) val simEvent = op.run(200, 1000, 1100) simEvent should have size (0) op.outputQueues(f3) should be (0.0) // these events are accumulated into a new window op enqueueIntoInput (f1, EventSet(10, 1100.0, 5.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, 1100.0, 5.0, prod1 -> 10.0)) val simEvent2 = op.run(200, 1101, 1200) simEvent2 should be (List(WindowAccumulated(op, 1101, 1200, 1, EventSet(20.0, 1100.0, 5.0, prod1 -> 20.0)))) op.outputQueues(f3) should be (0.0) val simEvent3 = op.run(200, 1200, 1300) simEvent3 should be (List(Produced(op, 1200, 1300, EventSet(20.0, 1300.0, 205.0, prod1 -> 20.0)))) op.outputQueues(f3) should be (20.0 +- 0.001) } it should "skip more than one slot if needed" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 100 milliseconds, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 100) // run 10 times until it reaches the 1 second window (0 until 10).foreach((i) => { val startTime = i * 100 val endTime = (i + 1) * 100 op enqueueIntoInput (f1, EventSet(10.0, startTime, 0.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10.0, startTime, 0.0, prod1 -> 10.0)) op.run(200, i * 100, (i + 1) * 100) }) // 180 + 160 + 140 + 120 + 100 + 80 + 60 + 40 + 20 op.outputQueues(f3) should be (900.0 +- 0.0001) op.dequeueFromOutput(f3, 900.00) val simEvent = op.run(200, 1450, 1550) // it should execute 5 windows - // (0 -> 1000) = 200, (100 -> 1100) = 180, (200 -> 1200) = 160, (300 -> 1300) = 140, (400 -> 1400) = 120 // totals is zero from the second Produced because all events had already been taken into account simEvent should be (List(Produced(op, 1450, 1550, EventSet(200, 1550, 1100.0, prod1 -> 20.0)), Produced(op, 1450, 1550, EventSet(180, 1550, 1050.0, prod1 -> 0.0)), Produced(op, 1450, 1550, EventSet(160, 1550, 1000.0, prod1 -> 0.0)), Produced(op, 1450, 1550, EventSet(140, 1550, 950.0, prod1 -> 0.0)), Produced(op, 1450, 1550, EventSet(120, 1550, 900.0, prod1 -> 0.0)))) op.outputQueues(f3) should be (800.0 +- 0.0001) op.accumulatedSlot should be (4) } it should "respect the bounds of the successor buffer" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.identity(), false, 1024) setup(op) op.init(0.0, 1000) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(1000, 0.0, 0.0, prod1 -> 1000.0)) op enqueueIntoInput (f2, EventSet(1000, 0.0, 0.0, prod1 -> 1000.0)) var simEvent = op run (20000, 10.0, 1000.0) op setLimit (f3, 1000) // second run - it can generate only 1000 ouputs, because f3 has a full buffer // although, the predecessors still need to be processed and accumulated op enqueueIntoInput (f1, EventSet(1000, 1000.0, 0.0, prod1 -> 1000.0)) op enqueueIntoInput (f2, EventSet(1000, 1000.0, 0.0, prod1 -> 1000.0)) simEvent = op.run(20000, 1000, 1100) simEvent should be (List(Produced (op, 1000, 1100, EventSet(1000, 1100.0, 1100.0, prod1 -> 1000.0)), WindowAccumulated(op, 1000, 1100, 0, EventSet(2000, 1000.0, 0.0, prod1 -> 2000.0)))) op.outputQueues(f3) should be (1000.0 +- 0.0001) op.dequeueFromOutput(f3, 1000.0) // third run - still generate output (1000 events from the previous window) simEvent = op.run(20000, 1100, 1200) simEvent should be (List(Produced(op, 1100, 1200, EventSet(1000, 1200.0, 1200.0, prod1 -> 1000.0)))) op.outputQueues(f3) should be (1000.0 +- 0.0001) op.dequeueFromOutput(f3, 1000.0) } it should "respect the bounds of the successor buffer when the operator is scheduled more than once" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.identity(), false, 1024) setup(op) op.init(0.0, 1000) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(1000, 0.0, 0.0, prod1 -> 1000.0)) op enqueueIntoInput (f2, EventSet(1000, 0.0, 0.0, prod1 -> 1000.0)) var simEvent = op run (20000, 10.0, 1000.0) op setLimit (f3, 1000) // second run - it can generate only 1000 ouputs, because f3 has a full buffer // although, the predecessors still need to be processed and accumulated op enqueueIntoInput (f1, EventSet(1000, 1000.0, 0.0, prod1 -> 1000.0)) op enqueueIntoInput (f2, EventSet(1000, 1000.0, 0.0, prod1 -> 1000.0)) simEvent = op.run(20000, 1000, 1100) op.outputQueues(f3) should be (1000.0 +- 0.0001) // third run - events haven' been dequeued, so the operator cannot generate more simEvent = op.run(20000, 1100, 1200) simEvent should be (List.empty) op.outputQueues(f3) should be (1000.0 +- 0.0001) } it should "respect the bounds of the successor buffer when there are remaining events " + "and the window is closing" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 1 second, WindowedOperator.identity(), false, 1024) setup(op) op.init(0.0, 1000) // first run - process the events and accumulate op enqueueIntoInput (f1, EventSet(750.0, 0.0, 0.0, prod1 -> 750.0)) op enqueueIntoInput (f2, EventSet(750.0, 0.0, 0.0, prod1 -> 750.0)) var simEvent = op run (20000, 10.0, 1000.0) op setLimit (f3, 1000) // second run - it can generate only 1000 ouputs, because f3 has a full buffer // therefore, 500 events form the first window will not be sent to the successor op enqueueIntoInput (f1, EventSet(500.0, 1000.0, 0.0, prod1 -> 500.0)) op enqueueIntoInput (f2, EventSet(500.0, 1000.0, 0.0, prod1 -> 500.0)) simEvent = op.run(20000, 1000, 2000) simEvent should be (List(Produced (op, 1000, 2000, EventSet(1000, 2000.0, 2000.0, prod1 -> 1000.0)), WindowAccumulated(op, 1000, 2000, 0, EventSet(1000.0, 1000.0, 0.0, prod1 -> 1000.0)))) op.outputQueues(f3) should be (1000.0 +- 0.0001) op.dequeueFromOutput(f3, 1000.0) // third run - needs to send the remaining 500 operators from the first window // plus 500 from the second window simEvent = op.run(20000, 2000, 3000) simEvent should be (List(Produced(op, 2000, 3000, EventSet(500, 3000.0, 3000.0, prod1 -> 500.0)), Produced(op, 2000, 3000, EventSet(500, 3000.0, 2000.0, prod1 -> 500.0)))) op.outputQueues(f3) should be (1000.0 +- 0.0001) op.dequeueFromOutput(f3, 1000.0) } it should "hold events from many slots if the sucessor buffer is full" in new Fixture { val op = new WindowedOperator("w1", 10, 1 second, 100 milliseconds, WindowedOperator.identity(), false, 1000) setup(op) op.init(0.0, 100) // run 10 times until it reaches the 1 second window (0 until 10).foreach((i) => { val startTime = i * 100 val endTime = i * 100 + 100 op enqueueIntoInput (f1, EventSet(10, startTime, 0.0, prod1 -> 10.0)) op enqueueIntoInput (f2, EventSet(10, startTime, 0.0, prod1 -> 10.0)) op.run(200, startTime, endTime) }) op.dequeueFromOutput(f3, 900.00) op.setLimit(f3, 300) var simEvent = op.run(200, 1450, 1550) // it should execute 5 windows - // (0 -> 1000) = 200, (100 -> 1100) = 180, (200 -> 1200) = 160, (300 -> 1300) = 140, (400 -> 1400) = 120 // however, only the first window and part of the second are output simEvent should be (List(Produced(op, 1450, 1550, EventSet(200, 1550, 1100, prod1 -> 20.0)), Produced(op, 1450, 1550, EventSet(100, 1550, 1050, prod1 -> 0.0)))) op.outputQueues(f3) should be (300.0 +- 0.0001) op.dequeueFromOutput(f3, 300.0) // in the toBeSentQueue right now is // EventSet( 80, 500.0, 0.0, prod1 -> 0.0)) // EventSet(160, 550.0, 0.0, prod1 -> 0.0)) // EventSet(140, 600.0, 0.0, prod1 -> 0.0)) // EventSet(120, 650.0, 0.0, prod1 -> 0.0)) // this will produce 100 more events because the (500 -> 1500) window is closing, but they // are not sent to the successors because the operator is still catching up with late events simEvent = op.run(200, 1550, 1650) simEvent(0).asInstanceOf[Produced] should equal (Produced(op, 1550.0, 1650.0, EventSet(300.0, 1650.0, 1103.33333, prod1 -> 0.0))) op.outputQueues(f3) should be (300.0 +- 0.0001) op.dequeueFromOutput(f3, 300.0) // in the toBeSentQueue right now is // EventSet( 80, 600.0, 0.0, prod1 -> 0.0)) // EventSet(120, 650.0, 0.0, prod1 -> 0.0)) // EventSet(100, 700.0, 0.0, prod1 -> 0.0)) // 80 more events because the (600 -> 1600) window is closing simEvent = op.run(200, 1650, 1750) simEvent(0).asInstanceOf[Produced] should equal (Produced(op, 1650, 1750, EventSet(300, 1750.0, 1096.6666, prod1 -> 0.0))) op.outputQueues(f3) should be (300.0 +- 0.0001) op.dequeueFromOutput(f3, 300.0) // in the toBeSentQueue right now is // EventSet(80, 750.0, 0.0, prod1 -> 0.0)) // 60 more events because the (700 -> 1700) window is closing simEvent = op.run(200, 1750, 1850) simEvent should be (List(Produced(op, 1750, 1850, EventSet(80, 1850.0, 1100.0, prod1 -> 0.0)), Produced(op, 1750, 1850, EventSet(60, 1850.0, 1050.0, prod1 -> 0.0)))) op.outputQueues(f3) should be (140.0 +- 0.0001) } }
virsox/cepsim
cepsim-core/src/test/scala/ca/uwo/eng/sel/cepsim/query/WindowedOperatorTest.scala
Scala
mit
18,653
package ore.db.access import scala.language.{higherKinds, implicitConversions} import slick.lifted.{Query, Rep} trait QueryView[F[_, _]] { def modifyingView[T, M](fa: F[T, M])(f: Query[T, M, Seq] => Query[T, M, Seq]): F[T, M] def filterView[T, M](fa: F[T, M])(f: T => Rep[Boolean]): F[T, M] = modifyingView(fa)(_.filter(f)) def sortView[T, M, OrdT](fa: F[T, M])(f: T => OrdT)(implicit ev: OrdT => slick.lifted.Ordered): F[T, M] = modifyingView(fa)(_.sortBy(f)) } object QueryView { implicit val queryIsQueryView: QueryView[Query[*, *, Seq]] = new QueryView[Query[*, *, Seq]] { override def modifyingView[T, M](fa: Query[T, M, Seq])( f: Query[T, M, Seq] => Query[T, M, Seq] ): Query[T, M, Seq] = f(fa) } class QueryViewOps[F[_, _], T, M](private val fa: F[T, M]) extends AnyVal { def filterView(f: T => Rep[Boolean])(implicit tc: QueryView[F]): F[T, M] = tc.filterView(fa)(f) def sortView[TOrd](f: T => TOrd)(implicit tc: QueryView[F], ev: TOrd => slick.lifted.Ordered): F[T, M] = tc.sortView(fa)(f) def modifyView(f: Query[T, M, Seq] => Query[T, M, Seq])(implicit tc: QueryView[F]): F[T, M] = tc.modifyingView(fa)(f) } trait ToQueryFilterableOps { implicit def toOps[F[_, _], T, M](fa: F[T, M]): QueryViewOps[F, T, M] = new QueryViewOps[F, T, M](fa) } }
SpongePowered/Ore
db/src/main/scala/ore/db/access/QueryView.scala
Scala
mit
1,331
package com.eigengo.lift.exercise.classifiers.model import akka.actor.{ActorLogging, Actor} import akka.stream.scaladsl._ import com.eigengo.lift.Exercise.Exercise import com.eigengo.lift.exercise.UserExercises.ModelMetadata import com.eigengo.lift.exercise.UserExercisesClassifier.{UnclassifiedExercise, FullyClassifiedExercise} import com.eigengo.lift.exercise.classifiers.ExerciseModel import com.eigengo.lift.exercise._ import com.eigengo.lift.exercise.classifiers.ExerciseModel._ import com.eigengo.lift.exercise.classifiers.workflows.ClassificationAssertions._ import scala.concurrent.{ExecutionContext, Future} import scala.util.Random object RandomExerciseModel { val exercises = Map( "arms" → List("Biceps curl", "Triceps press"), "chest" → List("Chest press", "Butterfly", "Cable cross-over") ) implicit val prover = new SMTInterface { // Random model performs no query simplification def simplify(query: Query)(implicit ec: ExecutionContext) = Future(query) // Random model always claims that query is satisfiable def satisfiable(query: Query)(implicit ec: ExecutionContext) = Future(true) // Random model always claims that query is valid def valid(query: Query)(implicit ec: ExecutionContext) = Future(true) } } /** * Random exercising model. Updates are simply printed out and queries always succeed (by sending a random message to * the listening actor). */ class RandomExerciseModel(sessionProps: SessionProperties) extends ExerciseModel("random", sessionProps, for (sensor <- Sensor.sourceLocations; exercise <- RandomExerciseModel.exercises.values.flatten) yield Formula(Assert(Gesture(exercise, 0.80), sensor)))(RandomExerciseModel.prover) with Actor with ActorLogging { import RandomExerciseModel._ private val metadata = ModelMetadata(2) private def randomExercise(): Set[Fact] = { val mgk = Random.shuffle(sessionProps.muscleGroupKeys).head if (exercises.get(mgk).isEmpty) { Set.empty } else { val exerciseType = Random.shuffle(exercises.get(mgk).get).head Set(Gesture(exerciseType, 0.80)) } } // Workflow simply adds random facts to random sensors val workflow = Flow[SensorNetValue] .map { sn => val classification = randomExercise() val sensor = Random.shuffle(sn.toMap.keys).head BindToSensors(sn.toMap.map { case (location, _) => if (location == sensor) (location, classification) else (location, Set.empty[Fact]) }.toMap, sn) } // Random model evaluator always returns true! def evaluateQuery(query: Query)(current: BindToSensors, lastState: Boolean) = StableValue(result = true) // Random exercises are returned for 2% of received sensor values def makeDecision(query: Query) = Flow[QueryValue] .map { case StableValue(true) => val exercise = (query: @unchecked) match { case Formula(Assert(Gesture(nm, _), _)) => Exercise(nm, None, None) } FullyClassifiedExercise(metadata, 1.0, exercise) case _ => UnclassifiedExercise(metadata) } .map { exercise => if (Random.nextInt(50000) == 1) { Some(exercise) } else { None } } /** * We use `aroundReceive` here to print out a summary `SensorNet` message. */ override def aroundReceive(receive: Receive, msg: Any) = msg match { case event: SensorNet => event.toMap.foreach { x => (x: @unchecked) match { case (location, data: Vector[_]) => for ((AccelerometerData(_, values), point) <- data.zipWithIndex) { val xs = values.map(_.x) val ys = values.map(_.y) val zs = values.map(_.z) println(s"****** Acceleration $location@$point | X: (${xs.min}, ${xs.max}), Y: (${ys.min}, ${ys.max}), Z: (${zs.min}, ${zs.max})") } for ((RotationData(_, values), point) <- data.zipWithIndex) { val xs = values.map(_.x) val ys = values.map(_.y) val zs = values.map(_.z) println(s"****** Rotation $location@$point | X: (${xs.min}, ${xs.max}), Y: (${ys.min}, ${ys.max}), Z: (${zs.min}, ${zs.max})") } }} super.aroundReceive(receive, msg) case _ => super.aroundReceive(receive, msg) } }
teroxik/open-muvr
server/exercise/src/main/scala/com/eigengo/lift/exercise/classifiers/model/RandomExerciseModel.scala
Scala
apache-2.0
4,358
object Test { def main(args:Array[String]): Unit = { val ns = Array(3L, 3L, 3L) val a1: A = new A(ns(0)) val a2: A = new A(ns(0)) println(a1 + a2) } } class A(val u: Long) extends AnyVal { def +(other: A) = new A(other.u + u) }
yusuke2255/dotty
tests/run/t5608.scala
Scala
bsd-3-clause
251
package com.jackbeasley.wordFind.test import com.jackbeasley.wordFind.Word class WordSpec extends UnitSpec{ "A Word" should "print a breakdown of the coordinates of letters" in { val wrd = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) wrd.toString should be ("h (1,1)\\ne (2,2)\\nl (3,3)\\nl (4,4)\\no (5,5)") } it should "correctly evaluate equality" in { val wrd1 = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) val wrd2 = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) val wrd3 = new Word("hello", Array((1,1), (2,1), (5,3), (4,2), (5,5))) val wrd4 = new Word("heeeo", Array((1,1), (2,2), (3,3), (4,4), (5,5))) wrd1.equals(wrd2) should be (true) wrd2.equals(wrd1) should be (true) wrd2.equals(wrd3) should be (false) wrd3.equals(wrd2) should be (false) wrd2.equals(wrd4) should be (false) wrd4.equals(wrd2) should be (false) wrd3.equals(wrd4) should be (false) wrd4.equals(wrd3) should be (false) } it should "concatinate with ++" in { val wrd1 = new Word("he", Array((1,1), (2,2))) val wrd2 = new Word("llo", Array((3,3), (4,4), (5,5))) val wrd3 = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) wrd3.equals(wrd1 ++ wrd2) should be (true) } it should "create subwords as a string makes substrings" in { val wrd1 = new Word("he", Array((1,1), (2,2))) val wrd2 = new Word("llo", Array((3,3), (4,4), (5,5))) val wrd3 = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) wrd3.subword(0,2).equals(wrd1) should be (true) wrd3.subword(2,5).equals(wrd2) should be (true) } it should "iterate through the possible words in decreasing order" in { val wrd = new Word("hello", Array((1,1), (2,2), (3,3), (4,4), (5,5))) val testWords = Array("hello", "hell", "hel", "he", "h") var index = 0 for(word <- wrd.iterator){ word.getWord should be (testWords(index)) index += 1 } } }
jackbeasley/wordFind
src/test/scala/WordSpec.scala
Scala
mit
1,982
package shade import scala.collection.mutable.ArrayBuffer package object testModels { val bigInstance = Impression( "96298b14-1e13-a162-662b-969bd3b41ca4", Session( "c5c94985-1d91-3a8b-b36b-6791efefc38c", "dummy-user-sa9d08ahusid", "android.web", UserInfo( "71.89.145.102", "71.89.145.102", "71.89.145.102", "Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3", Some( GeoIPLocation( "us", Some("Ashburn"), Some("United States"), Some(39.0437.toFloat), Some(-77.4875.toFloat), Some(703), None, Some("VA"), Some(511)))), Some("aac636be-e42b-01d6-449b-6a0c2e5e7b09"), Some("something-65"), Some("71.89.145.102"), None, None, Some("us")), List( Offer( Some(3352251), "Some Dummy Offer Title", Advertiser( Some(137), Some("something"), "something"), "cpa", LiveDealInfo( Some(""), None, None, None), OfferCreative( "So Many Dresses!", "Daily Deals For Moms, Babies and Kids. Up to 90% OFF! Shop Now!", Some("Something.com"), Some(""), None), ArrayBuffer("viewnow"), "http://something.com/track?clickID=242323&pubID=982345&something=219&subID=something", None, true, false, false, List("us"))), 112, true, Some("light-fullscreen")) val bigInstance2 = Impression( "96298b14-1e13-a162-662b-969bd3b41ca4", Session( "c5c94985-1d91-3a8b-b36b-6791efefc38c", "dummy-user-sa9d08ahusid", "android.web", UserInfo( "71.89.145.102", "71.89.145.102", "71.89.145.102", "Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3", Some( GeoIPLocation( "us", Some("Ashburn"), Some("United States"), Some(39.0437.toFloat), Some(-77.4875.toFloat), Some(703), None, Some("VA"), Some(511)))), Some("aac636be-e42b-01d6-449b-6a0c2e5e7b09"), Some("something-65"), Some("71.89.145.102"), None, None, Some("us")), List.empty, 112, true, Some("light-fullscreen")) val contentSeq = Vector( ContentPiece.Article( id = Some(1), url = "http://google.com/", creator = "alex", title = "Hello world!", shortExcerpt = "Hello world", excerptHtml = "<b>Hello world</b>", contentHtml = Some("<h1>Sample</h1><b>Hello world</b>"), source = ContentSource.WordPress, tags = Vector("auto", "hello") ), ContentPiece.Image( id = Some(2), url = "http://google.com/", creator = "alex", photo = "http://google.com/image.png", title = Some("Image"), source = ContentSource.Tumblr, tags = Vector("google", "image") ), ContentPiece.Title( id = Some(3), url = "http://google.com/3", title = "Hello Title", creator = "alex", source = ContentSource.Tumblr, tags = Vector("title", "hello") ) ) }
kazzna/shade
src/test/scala/shade/testModels/package.scala
Scala
mit
3,400
package freecli package command package parser import api.Action import dsl.CommandDsl import freecli.parser.CliParser object ops extends ParserOps trait ParserOps { private[freecli] def parseCommandNonStrict[T]( dsl: CommandDsl[T]): CliParser[Action, CommandParsingError, T] = { dsl.foldMap(CommandParserInterpreter) } def parseCommand[T]( dsl: CommandDsl[T]): CliParser[Action, CommandParsingError, T] = { parseCommandNonStrict(dsl).failIfNotAllArgumentsUsed(args => OtherCommandErrors( additionalArgumentsFound = Some(AdditionalArgumentsFound(args.map(_.name))))) } }
pavlosgi/freecli
core/src/main/scala/freecli/command/parser/ParserOps.scala
Scala
apache-2.0
614
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.scheduler import java.io.NotSerializableException import java.util.Properties import java.util.concurrent.atomic.AtomicInteger import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map} import scala.concurrent.duration._ import scala.reflect.ClassTag import akka.actor._ import org.apache.spark._ import org.apache.spark.rdd.RDD import org.apache.spark.executor.TaskMetrics import org.apache.spark.partial.{ApproximateActionListener, ApproximateEvaluator, PartialResult} import org.apache.spark.storage.{BlockId, BlockManager, BlockManagerMaster, RDDBlockId} import org.apache.spark.util.{MetadataCleaner, MetadataCleanerType, TimeStampedHashMap} /** * The high-level scheduling layer that implements stage-oriented scheduling. It computes a DAG of * stages for each job, keeps track of which RDDs and stage outputs are materialized, and finds a * minimal schedule to run the job. It then submits stages as TaskSets to an underlying * TaskScheduler implementation that runs them on the cluster. * * In addition to coming up with a DAG of stages, this class also determines the preferred * locations to run each task on, based on the current cache status, and passes these to the * low-level TaskScheduler. Furthermore, it handles failures due to shuffle output files being * lost, in which case old stages may need to be resubmitted. Failures *within* a stage that are * not caused by shuffle file loss are handled by the TaskScheduler, which will retry each task * a small number of times before cancelling the whole stage. * * THREADING: This class runs all its logic in a single thread executing the run() method, to which * events are submitted using a synchronized queue (eventQueue). The public API methods, such as * runJob, taskEnded and executorLost, post events asynchronously to this queue. All other methods * should be private. */ private[spark] class DAGScheduler( taskSched: TaskScheduler, mapOutputTracker: MapOutputTrackerMaster, blockManagerMaster: BlockManagerMaster, env: SparkEnv) extends Logging { def this(taskSched: TaskScheduler) { this(taskSched, SparkEnv.get.mapOutputTracker.asInstanceOf[MapOutputTrackerMaster], SparkEnv.get.blockManager.master, SparkEnv.get) } taskSched.setDAGScheduler(this) // Called by TaskScheduler to report task's starting. def taskStarted(task: Task[_], taskInfo: TaskInfo) { eventProcessActor ! BeginEvent(task, taskInfo) } // Called to report that a task has completed and results are being fetched remotely. def taskGettingResult(task: Task[_], taskInfo: TaskInfo) { eventProcessActor ! GettingResultEvent(task, taskInfo) } // Called by TaskScheduler to report task completions or failures. def taskEnded( task: Task[_], reason: TaskEndReason, result: Any, accumUpdates: Map[Long, Any], taskInfo: TaskInfo, taskMetrics: TaskMetrics) { eventProcessActor ! CompletionEvent(task, reason, result, accumUpdates, taskInfo, taskMetrics) } // Called by TaskScheduler when an executor fails. def executorLost(execId: String) { eventProcessActor ! ExecutorLost(execId) } // Called by TaskScheduler when a host is added def executorGained(execId: String, host: String) { eventProcessActor ! ExecutorGained(execId, host) } // Called by TaskScheduler to cancel an entire TaskSet due to either repeated failures or // cancellation of the job itself. def taskSetFailed(taskSet: TaskSet, reason: String) { eventProcessActor ! TaskSetFailed(taskSet, reason) } // The time, in millis, to wait for fetch failure events to stop coming in after one is detected; // this is a simplistic way to avoid resubmitting tasks in the non-fetchable map stage one by one // as more failure events come in val RESUBMIT_TIMEOUT = 200.milliseconds // The time, in millis, to wake up between polls of the completion queue in order to potentially // resubmit failed stages val POLL_TIMEOUT = 10L // Warns the user if a stage contains a task with size greater than this value (in KB) val TASK_SIZE_TO_WARN = 100 private var eventProcessActor: ActorRef = _ private[scheduler] val nextJobId = new AtomicInteger(0) def numTotalJobs: Int = nextJobId.get() private val nextStageId = new AtomicInteger(0) private[scheduler] val jobIdToStageIds = new TimeStampedHashMap[Int, HashSet[Int]] private[scheduler] val stageIdToJobIds = new TimeStampedHashMap[Int, HashSet[Int]] private[scheduler] val stageIdToStage = new TimeStampedHashMap[Int, Stage] private[scheduler] val shuffleToMapStage = new TimeStampedHashMap[Int, Stage] private[spark] val stageToInfos = new TimeStampedHashMap[Stage, StageInfo] // An async scheduler event bus. The bus should be stopped when DAGSCheduler is stopped. private[spark] val listenerBus = new SparkListenerBus // Contains the locations that each RDD's partitions are cached on private val cacheLocs = new HashMap[Int, Array[Seq[TaskLocation]]] // For tracking failed nodes, we use the MapOutputTracker's epoch number, which is sent with // every task. When we detect a node failing, we note the current epoch number and failed // executor, increment it for new tasks, and use this to ignore stray ShuffleMapTask results. // // TODO: Garbage collect information about failure epochs when we know there are no more // stray messages to detect. val failedEpoch = new HashMap[String, Long] // stage id to the active job val idToActiveJob = new HashMap[Int, ActiveJob] val waiting = new HashSet[Stage] // Stages we need to run whose parents aren't done val running = new HashSet[Stage] // Stages we are running right now val failed = new HashSet[Stage] // Stages that must be resubmitted due to fetch failures // Missing tasks from each stage val pendingTasks = new TimeStampedHashMap[Stage, HashSet[Task[_]]] val activeJobs = new HashSet[ActiveJob] val resultStageToJob = new HashMap[Stage, ActiveJob] val metadataCleaner = new MetadataCleaner( MetadataCleanerType.DAG_SCHEDULER, this.cleanup, env.conf) /** * Starts the event processing actor. The actor has two responsibilities: * * 1. Waits for events like job submission, task finished, task failure etc., and calls * [[org.apache.spark.scheduler.DAGScheduler.processEvent()]] to process them. * 2. Schedules a periodical task to resubmit failed stages. * * NOTE: the actor cannot be started in the constructor, because the periodical task references * some internal states of the enclosing [[org.apache.spark.scheduler.DAGScheduler]] object, thus * cannot be scheduled until the [[org.apache.spark.scheduler.DAGScheduler]] is fully constructed. */ def start() { eventProcessActor = env.actorSystem.actorOf(Props(new Actor { /** * The main event loop of the DAG scheduler. */ def receive = { case event: DAGSchedulerEvent => logTrace("Got event of type " + event.getClass.getName) /** * All events are forwarded to `processEvent()`, so that the event processing logic can * easily tested without starting a dedicated actor. Please refer to `DAGSchedulerSuite` * for details. */ if (!processEvent(event)) { submitWaitingStages() } else { context.stop(self) } } })) } def addSparkListener(listener: SparkListener) { listenerBus.addListener(listener) } private def getCacheLocs(rdd: RDD[_]): Array[Seq[TaskLocation]] = { if (!cacheLocs.contains(rdd.id)) { val blockIds = rdd.partitions.indices.map(index=> RDDBlockId(rdd.id, index)).toArray[BlockId] val locs = BlockManager.blockIdsToBlockManagers(blockIds, env, blockManagerMaster) cacheLocs(rdd.id) = blockIds.map { id => locs.getOrElse(id, Nil).map(bm => TaskLocation(bm.host, bm.executorId)) } } cacheLocs(rdd.id) } private def clearCacheLocs() { cacheLocs.clear() } /** * Get or create a shuffle map stage for the given shuffle dependency's map side. * The jobId value passed in will be used if the stage doesn't already exist with * a lower jobId (jobId always increases across jobs.) */ private def getShuffleMapStage(shuffleDep: ShuffleDependency[_,_], jobId: Int): Stage = { shuffleToMapStage.get(shuffleDep.shuffleId) match { case Some(stage) => stage case None => val stage = newOrUsedStage(shuffleDep.rdd, shuffleDep.rdd.partitions.size, shuffleDep, jobId) shuffleToMapStage(shuffleDep.shuffleId) = stage stage } } /** * Create a Stage -- either directly for use as a result stage, or as part of the (re)-creation * of a shuffle map stage in newOrUsedStage. The stage will be associated with the provided * jobId. Production of shuffle map stages should always use newOrUsedStage, not newStage * directly. */ private def newStage( rdd: RDD[_], numTasks: Int, shuffleDep: Option[ShuffleDependency[_,_]], jobId: Int, callSite: Option[String] = None) : Stage = { val id = nextStageId.getAndIncrement() val stage = new Stage(id, rdd, numTasks, shuffleDep, getParentStages(rdd, jobId), jobId, callSite) stageIdToStage(id) = stage updateJobIdStageIdMaps(jobId, stage) stageToInfos(stage) = new StageInfo(stage) stage } /** * Create a shuffle map Stage for the given RDD. The stage will also be associated with the * provided jobId. If a stage for the shuffleId existed previously so that the shuffleId is * present in the MapOutputTracker, then the number and location of available outputs are * recovered from the MapOutputTracker */ private def newOrUsedStage( rdd: RDD[_], numTasks: Int, shuffleDep: ShuffleDependency[_,_], jobId: Int, callSite: Option[String] = None) : Stage = { val stage = newStage(rdd, numTasks, Some(shuffleDep), jobId, callSite) if (mapOutputTracker.has(shuffleDep.shuffleId)) { val serLocs = mapOutputTracker.getSerializedMapOutputStatuses(shuffleDep.shuffleId) val locs = MapOutputTracker.deserializeMapStatuses(serLocs) for (i <- 0 until locs.size) stage.outputLocs(i) = List(locs(i)) stage.numAvailableOutputs = locs.size } else { // Kind of ugly: need to register RDDs with the cache and map output tracker here // since we can't do it in the RDD constructor because # of partitions is unknown logInfo("Registering RDD " + rdd.id + " (" + rdd.origin + ")") mapOutputTracker.registerShuffle(shuffleDep.shuffleId, rdd.partitions.size) } stage } /** * Get or create the list of parent stages for a given RDD. The stages will be assigned the * provided jobId if they haven't already been created with a lower jobId. */ private def getParentStages(rdd: RDD[_], jobId: Int): List[Stage] = { val parents = new HashSet[Stage] val visited = new HashSet[RDD[_]] def visit(r: RDD[_]) { if (!visited(r)) { visited += r // Kind of ugly: need to register RDDs with the cache here since // we can't do it in its constructor because # of partitions is unknown for (dep <- r.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => parents += getShuffleMapStage(shufDep, jobId) case _ => visit(dep.rdd) } } } } visit(rdd) parents.toList } private def getMissingParentStages(stage: Stage): List[Stage] = { val missing = new HashSet[Stage] val visited = new HashSet[RDD[_]] def visit(rdd: RDD[_]) { if (!visited(rdd)) { visited += rdd if (getCacheLocs(rdd).contains(Nil)) { for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => val mapStage = getShuffleMapStage(shufDep, stage.jobId) if (!mapStage.isAvailable) { missing += mapStage } case narrowDep: NarrowDependency[_] => visit(narrowDep.rdd) } } } } } visit(stage.rdd) missing.toList } /** * Registers the given jobId among the jobs that need the given stage and * all of that stage's ancestors. */ private def updateJobIdStageIdMaps(jobId: Int, stage: Stage) { def updateJobIdStageIdMapsList(stages: List[Stage]) { if (!stages.isEmpty) { val s = stages.head stageIdToJobIds.getOrElseUpdate(s.id, new HashSet[Int]()) += jobId jobIdToStageIds.getOrElseUpdate(jobId, new HashSet[Int]()) += s.id val parents = getParentStages(s.rdd, jobId) val parentsWithoutThisJobId = parents.filter(p => !stageIdToJobIds.get(p.id).exists(_.contains(jobId))) updateJobIdStageIdMapsList(parentsWithoutThisJobId ++ stages.tail) } } updateJobIdStageIdMapsList(List(stage)) } /** * Removes job and any stages that are not needed by any other job. Returns the set of ids for * stages that were removed. The associated tasks for those stages need to be cancelled if we * got here via job cancellation. */ private def removeJobAndIndependentStages(jobId: Int): Set[Int] = { val registeredStages = jobIdToStageIds(jobId) val independentStages = new HashSet[Int]() if (registeredStages.isEmpty) { logError("No stages registered for job " + jobId) } else { stageIdToJobIds.filterKeys(stageId => registeredStages.contains(stageId)).foreach { case (stageId, jobSet) => if (!jobSet.contains(jobId)) { logError( "Job %d not registered for stage %d even though that stage was registered for the job" .format(jobId, stageId)) } else { def removeStage(stageId: Int) { // data structures based on Stage stageIdToStage.get(stageId).foreach { s => if (running.contains(s)) { logDebug("Removing running stage %d".format(stageId)) running -= s } stageToInfos -= s shuffleToMapStage.keys.filter(shuffleToMapStage(_) == s).foreach(shuffleId => shuffleToMapStage.remove(shuffleId)) if (pendingTasks.contains(s) && !pendingTasks(s).isEmpty) { logDebug("Removing pending status for stage %d".format(stageId)) } pendingTasks -= s if (waiting.contains(s)) { logDebug("Removing stage %d from waiting set.".format(stageId)) waiting -= s } if (failed.contains(s)) { logDebug("Removing stage %d from failed set.".format(stageId)) failed -= s } } // data structures based on StageId stageIdToStage -= stageId stageIdToJobIds -= stageId logDebug("After removal of stage %d, remaining stages = %d" .format(stageId, stageIdToStage.size)) } jobSet -= jobId if (jobSet.isEmpty) { // no other job needs this stage independentStages += stageId removeStage(stageId) } } } } independentStages.toSet } private def jobIdToStageIdsRemove(jobId: Int) { if (!jobIdToStageIds.contains(jobId)) { logDebug("Trying to remove unregistered job " + jobId) } else { removeJobAndIndependentStages(jobId) jobIdToStageIds -= jobId } } /** * Submit a job to the job scheduler and get a JobWaiter object back. The JobWaiter object * can be used to block until the the job finishes executing or can be used to cancel the job. */ def submitJob[T, U]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: String, allowLocal: Boolean, resultHandler: (Int, U) => Unit, properties: Properties = null): JobWaiter[U] = { // Check to make sure we are not launching a task on a partition that does not exist. val maxPartitions = rdd.partitions.length partitions.find(p => p >= maxPartitions).foreach { p => throw new IllegalArgumentException( "Attempting to access a non-existent partition: " + p + ". " + "Total number of partitions: " + maxPartitions) } val jobId = nextJobId.getAndIncrement() if (partitions.size == 0) { return new JobWaiter[U](this, jobId, 0, resultHandler) } assert(partitions.size > 0) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] val waiter = new JobWaiter(this, jobId, partitions.size, resultHandler) eventProcessActor ! JobSubmitted( jobId, rdd, func2, partitions.toArray, allowLocal, callSite, waiter, properties) waiter } def runJob[T, U: ClassTag]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, partitions: Seq[Int], callSite: String, allowLocal: Boolean, resultHandler: (Int, U) => Unit, properties: Properties = null) { val waiter = submitJob(rdd, func, partitions, callSite, allowLocal, resultHandler, properties) waiter.awaitResult() match { case JobSucceeded => {} case JobFailed(exception: Exception, _) => logInfo("Failed to run " + callSite) throw exception } } def runApproximateJob[T, U, R]( rdd: RDD[T], func: (TaskContext, Iterator[T]) => U, evaluator: ApproximateEvaluator[U, R], callSite: String, timeout: Long, properties: Properties = null) : PartialResult[R] = { val listener = new ApproximateActionListener(rdd, func, evaluator, timeout) val func2 = func.asInstanceOf[(TaskContext, Iterator[_]) => _] val partitions = (0 until rdd.partitions.size).toArray val jobId = nextJobId.getAndIncrement() eventProcessActor ! JobSubmitted( jobId, rdd, func2, partitions, allowLocal = false, callSite, listener, properties) listener.awaitResult() // Will throw an exception if the job fails } /** * Cancel a job that is running or waiting in the queue. */ def cancelJob(jobId: Int) { logInfo("Asked to cancel job " + jobId) eventProcessActor ! JobCancelled(jobId) } def cancelJobGroup(groupId: String) { logInfo("Asked to cancel job group " + groupId) eventProcessActor ! JobGroupCancelled(groupId) } /** * Cancel all jobs that are running or waiting in the queue. */ def cancelAllJobs() { eventProcessActor ! AllJobsCancelled } /** * Process one event retrieved from the event processing actor. * * @param event The event to be processed. * @return `true` if we should stop the event loop. */ private[scheduler] def processEvent(event: DAGSchedulerEvent): Boolean = { event match { case JobSubmitted(jobId, rdd, func, partitions, allowLocal, callSite, listener, properties) => var finalStage: Stage = null try { // New stage creation may throw an exception if, for example, jobs are run on a HadoopRDD // whose underlying HDFS files have been deleted. finalStage = newStage(rdd, partitions.size, None, jobId, Some(callSite)) } catch { case e: Exception => logWarning("Creating new stage failed due to exception - job: " + jobId, e) listener.jobFailed(e) return false } val job = new ActiveJob(jobId, finalStage, func, partitions, callSite, listener, properties) clearCacheLocs() logInfo("Got job " + job.jobId + " (" + callSite + ") with " + partitions.length + " output partitions (allowLocal=" + allowLocal + ")") logInfo("Final stage: " + finalStage + " (" + finalStage.name + ")") logInfo("Parents of final stage: " + finalStage.parents) logInfo("Missing parents: " + getMissingParentStages(finalStage)) if (allowLocal && finalStage.parents.size == 0 && partitions.length == 1) { // Compute very short actions like first() or take() with no parent stages locally. listenerBus.post(SparkListenerJobStart(job, Array(), properties)) runLocally(job) } else { idToActiveJob(jobId) = job activeJobs += job resultStageToJob(finalStage) = job listenerBus.post(SparkListenerJobStart(job, jobIdToStageIds(jobId).toArray, properties)) submitStage(finalStage) } case JobCancelled(jobId) => handleJobCancellation(jobId) case JobGroupCancelled(groupId) => // Cancel all jobs belonging to this job group. // First finds all active jobs with this group id, and then kill stages for them. val activeInGroup = activeJobs.filter(activeJob => groupId == activeJob.properties.get(SparkContext.SPARK_JOB_GROUP_ID)) val jobIds = activeInGroup.map(_.jobId) jobIds.foreach { handleJobCancellation } case AllJobsCancelled => // Cancel all running jobs. running.map(_.jobId).foreach { handleJobCancellation } activeJobs.clear() // These should already be empty by this point, idToActiveJob.clear() // but just in case we lost track of some jobs... case ExecutorGained(execId, host) => handleExecutorGained(execId, host) case ExecutorLost(execId) => handleExecutorLost(execId) case BeginEvent(task, taskInfo) => for ( job <- idToActiveJob.get(task.stageId); stage <- stageIdToStage.get(task.stageId); stageInfo <- stageToInfos.get(stage) ) { if (taskInfo.serializedSize > TASK_SIZE_TO_WARN * 1024 && !stageInfo.emittedTaskSizeWarning) { stageInfo.emittedTaskSizeWarning = true logWarning(("Stage %d (%s) contains a task of very large " + "size (%d KB). The maximum recommended task size is %d KB.").format( task.stageId, stageInfo.name, taskInfo.serializedSize / 1024, TASK_SIZE_TO_WARN)) } } listenerBus.post(SparkListenerTaskStart(task, taskInfo)) case GettingResultEvent(task, taskInfo) => listenerBus.post(SparkListenerTaskGettingResult(task, taskInfo)) case completion @ CompletionEvent(task, reason, _, _, taskInfo, taskMetrics) => listenerBus.post(SparkListenerTaskEnd(task, reason, taskInfo, taskMetrics)) handleTaskCompletion(completion) case TaskSetFailed(taskSet, reason) => stageIdToStage.get(taskSet.stageId).foreach { abortStage(_, reason) } case ResubmitFailedStages => if (failed.size > 0) { // Failed stages may be removed by job cancellation, so failed might be empty even if // the ResubmitFailedStages event has been scheduled. resubmitFailedStages() } case StopDAGScheduler => // Cancel any active jobs for (job <- activeJobs) { val error = new SparkException("Job cancelled because SparkContext was shut down") job.listener.jobFailed(error) listenerBus.post(SparkListenerJobEnd(job, JobFailed(error, None))) } return true } false } /** * Resubmit any failed stages. Ordinarily called after a small amount of time has passed since * the last fetch failure. */ private[scheduler] def resubmitFailedStages() { logInfo("Resubmitting failed stages") clearCacheLocs() val failed2 = failed.toArray failed.clear() for (stage <- failed2.sortBy(_.jobId)) { submitStage(stage) } } /** * Check for waiting or failed stages which are now eligible for resubmission. * Ordinarily run on every iteration of the event loop. */ private[scheduler] def submitWaitingStages() { // TODO: We might want to run this less often, when we are sure that something has become // runnable that wasn't before. logTrace("Checking for newly runnable parent stages") logTrace("running: " + running) logTrace("waiting: " + waiting) logTrace("failed: " + failed) val waiting2 = waiting.toArray waiting.clear() for (stage <- waiting2.sortBy(_.jobId)) { submitStage(stage) } } /** * Run a job on an RDD locally, assuming it has only a single partition and no dependencies. * We run the operation in a separate thread just in case it takes a bunch of time, so that we * don't block the DAGScheduler event loop or other concurrent jobs. */ protected def runLocally(job: ActiveJob) { logInfo("Computing the requested partition locally") new Thread("Local computation of job " + job.jobId) { override def run() { runLocallyWithinThread(job) } }.start() } // Broken out for easier testing in DAGSchedulerSuite. protected def runLocallyWithinThread(job: ActiveJob) { var jobResult: JobResult = JobSucceeded try { SparkEnv.set(env) val rdd = job.finalStage.rdd val split = rdd.partitions(job.partitions(0)) val taskContext = new TaskContext(job.finalStage.id, job.partitions(0), 0, runningLocally = true) try { val result = job.func(taskContext, rdd.iterator(split, taskContext)) job.listener.taskSucceeded(0, result) } finally { taskContext.executeOnCompleteCallbacks() } } catch { case e: Exception => jobResult = JobFailed(e, Some(job.finalStage)) job.listener.jobFailed(e) } finally { val s = job.finalStage stageIdToJobIds -= s.id // clean up data structures that were populated for a local job, stageIdToStage -= s.id // but that won't get cleaned up via the normal paths through stageToInfos -= s // completion events or stage abort jobIdToStageIds -= job.jobId listenerBus.post(SparkListenerJobEnd(job, jobResult)) } } /** Finds the earliest-created active job that needs the stage */ // TODO: Probably should actually find among the active jobs that need this // stage the one with the highest priority (highest-priority pool, earliest created). // That should take care of at least part of the priority inversion problem with // cross-job dependencies. private def activeJobForStage(stage: Stage): Option[Int] = { if (stageIdToJobIds.contains(stage.id)) { val jobsThatUseStage: Array[Int] = stageIdToJobIds(stage.id).toArray.sorted jobsThatUseStage.find(idToActiveJob.contains(_)) } else { None } } /** Submits stage, but first recursively submits any missing parents. */ private def submitStage(stage: Stage) { val jobId = activeJobForStage(stage) if (jobId.isDefined) { logDebug("submitStage(" + stage + ")") if (!waiting(stage) && !running(stage) && !failed(stage)) { val missing = getMissingParentStages(stage).sortBy(_.id) logDebug("missing: " + missing) if (missing == Nil) { logInfo("Submitting " + stage + " (" + stage.rdd + "), which has no missing parents") submitMissingTasks(stage, jobId.get) running += stage } else { for (parent <- missing) { submitStage(parent) } waiting += stage } } } else { abortStage(stage, "No active job for stage " + stage.id) } } /** Called when stage's parents are available and we can now do its task. */ private def submitMissingTasks(stage: Stage, jobId: Int) { logDebug("submitMissingTasks(" + stage + ")") // Get our pending tasks and remember them in our pendingTasks entry val myPending = pendingTasks.getOrElseUpdate(stage, new HashSet) myPending.clear() var tasks = ArrayBuffer[Task[_]]() if (stage.isShuffleMap) { for (p <- 0 until stage.numPartitions if stage.outputLocs(p) == Nil) { val locs = getPreferredLocs(stage.rdd, p) tasks += new ShuffleMapTask(stage.id, stage.rdd, stage.shuffleDep.get, p, locs) } } else { // This is a final stage; figure out its job's missing partitions val job = resultStageToJob(stage) for (id <- 0 until job.numPartitions if !job.finished(id)) { val partition = job.partitions(id) val locs = getPreferredLocs(stage.rdd, partition) tasks += new ResultTask(stage.id, stage.rdd, job.func, partition, locs, id) } } val properties = if (idToActiveJob.contains(jobId)) { idToActiveJob(stage.jobId).properties } else { //this stage will be assigned to "default" pool null } // must be run listener before possible NotSerializableException // should be "StageSubmitted" first and then "JobEnded" listenerBus.post(SparkListenerStageSubmitted(stageToInfos(stage), properties)) if (tasks.size > 0) { // Preemptively serialize a task to make sure it can be serialized. We are catching this // exception here because it would be fairly hard to catch the non-serializable exception // down the road, where we have several different implementations for local scheduler and // cluster schedulers. try { SparkEnv.get.closureSerializer.newInstance().serialize(tasks.head) } catch { case e: NotSerializableException => abortStage(stage, "Task not serializable: " + e.toString) running -= stage return } logInfo("Submitting " + tasks.size + " missing tasks from " + stage + " (" + stage.rdd + ")") myPending ++= tasks logDebug("New pending tasks: " + myPending) taskSched.submitTasks( new TaskSet(tasks.toArray, stage.id, stage.newAttemptId(), stage.jobId, properties)) stageToInfos(stage).submissionTime = Some(System.currentTimeMillis()) } else { logDebug("Stage " + stage + " is actually done; %b %d %d".format( stage.isAvailable, stage.numAvailableOutputs, stage.numPartitions)) running -= stage } } /** * Responds to a task finishing. This is called inside the event loop so it assumes that it can * modify the scheduler's internal state. Use taskEnded() to post a task end event from outside. */ private def handleTaskCompletion(event: CompletionEvent) { val task = event.task if (!stageIdToStage.contains(task.stageId)) { // Skip all the actions if the stage has been cancelled. return } val stage = stageIdToStage(task.stageId) def markStageAsFinished(stage: Stage) = { val serviceTime = stageToInfos(stage).submissionTime match { case Some(t) => "%.03f".format((System.currentTimeMillis() - t) / 1000.0) case _ => "Unknown" } logInfo("%s (%s) finished in %s s".format(stage, stage.name, serviceTime)) stageToInfos(stage).completionTime = Some(System.currentTimeMillis()) listenerBus.post(SparkListenerStageCompleted(stageToInfos(stage))) running -= stage } event.reason match { case Success => logInfo("Completed " + task) if (event.accumUpdates != null) { Accumulators.add(event.accumUpdates) // TODO: do this only if task wasn't resubmitted } pendingTasks(stage) -= task stageToInfos(stage).taskInfos += event.taskInfo -> event.taskMetrics task match { case rt: ResultTask[_, _] => resultStageToJob.get(stage) match { case Some(job) => if (!job.finished(rt.outputId)) { job.finished(rt.outputId) = true job.numFinished += 1 // If the whole job has finished, remove it if (job.numFinished == job.numPartitions) { idToActiveJob -= stage.jobId activeJobs -= job resultStageToJob -= stage markStageAsFinished(stage) jobIdToStageIdsRemove(job.jobId) listenerBus.post(SparkListenerJobEnd(job, JobSucceeded)) } job.listener.taskSucceeded(rt.outputId, event.result) } case None => logInfo("Ignoring result from " + rt + " because its job has finished") } case smt: ShuffleMapTask => val status = event.result.asInstanceOf[MapStatus] val execId = status.location.executorId logDebug("ShuffleMapTask finished on " + execId) if (failedEpoch.contains(execId) && smt.epoch <= failedEpoch(execId)) { logInfo("Ignoring possibly bogus ShuffleMapTask completion from " + execId) } else { stage.addOutputLoc(smt.partitionId, status) } if (running.contains(stage) && pendingTasks(stage).isEmpty) { markStageAsFinished(stage) logInfo("looking for newly runnable stages") logInfo("running: " + running) logInfo("waiting: " + waiting) logInfo("failed: " + failed) if (stage.shuffleDep.isDefined) { // We supply true to increment the epoch number here in case this is a // recomputation of the map outputs. In that case, some nodes may have cached // locations with holes (from when we detected the error) and will need the // epoch incremented to refetch them. // TODO: Only increment the epoch number if this is not the first time // we registered these map outputs. mapOutputTracker.registerMapOutputs( stage.shuffleDep.get.shuffleId, stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray, changeEpoch = true) } clearCacheLocs() if (stage.outputLocs.exists(_ == Nil)) { // Some tasks had failed; let's resubmit this stage // TODO: Lower-level scheduler should also deal with this logInfo("Resubmitting " + stage + " (" + stage.name + ") because some of its tasks had failed: " + stage.outputLocs.zipWithIndex.filter(_._1 == Nil).map(_._2).mkString(", ")) submitStage(stage) } else { val newlyRunnable = new ArrayBuffer[Stage] for (stage <- waiting) { logInfo("Missing parents for " + stage + ": " + getMissingParentStages(stage)) } for (stage <- waiting if getMissingParentStages(stage) == Nil) { newlyRunnable += stage } waiting --= newlyRunnable running ++= newlyRunnable for { stage <- newlyRunnable.sortBy(_.id) jobId <- activeJobForStage(stage) } { logInfo("Submitting " + stage + " (" + stage.rdd + "), which is now runnable") submitMissingTasks(stage, jobId) } } } } case Resubmitted => logInfo("Resubmitted " + task + ", so marking it as still running") pendingTasks(stage) += task case FetchFailed(bmAddress, shuffleId, mapId, reduceId) => // Mark the stage that the reducer was in as unrunnable val failedStage = stageIdToStage(task.stageId) running -= failedStage // TODO: Cancel running tasks in the stage logInfo("Marking " + failedStage + " (" + failedStage.name + ") for resubmision due to a fetch failure") // Mark the map whose fetch failed as broken in the map stage val mapStage = shuffleToMapStage(shuffleId) if (mapId != -1) { mapStage.removeOutputLoc(mapId, bmAddress) mapOutputTracker.unregisterMapOutput(shuffleId, mapId, bmAddress) } logInfo("The failed fetch was from " + mapStage + " (" + mapStage.name + "); marking it for resubmission") if (failed.isEmpty && eventProcessActor != null) { // Don't schedule an event to resubmit failed stages if failed isn't empty, because // in that case the event will already have been scheduled. eventProcessActor may be // null during unit tests. import env.actorSystem.dispatcher env.actorSystem.scheduler.scheduleOnce( RESUBMIT_TIMEOUT, eventProcessActor, ResubmitFailedStages) } failed += failedStage failed += mapStage // TODO: mark the executor as failed only if there were lots of fetch failures on it if (bmAddress != null) { handleExecutorLost(bmAddress.executorId, Some(task.epoch)) } case ExceptionFailure(className, description, stackTrace, metrics) => // Do nothing here, left up to the TaskScheduler to decide how to handle user failures case TaskResultLost => // Do nothing here; the TaskScheduler handles these failures and resubmits the task. case other => // Unrecognized failure - also do nothing. If the task fails repeatedly, the TaskScheduler // will abort the job. } } /** * Responds to an executor being lost. This is called inside the event loop, so it assumes it can * modify the scheduler's internal state. Use executorLost() to post a loss event from outside. * * Optionally the epoch during which the failure was caught can be passed to avoid allowing * stray fetch failures from possibly retriggering the detection of a node as lost. */ private def handleExecutorLost(execId: String, maybeEpoch: Option[Long] = None) { val currentEpoch = maybeEpoch.getOrElse(mapOutputTracker.getEpoch) if (!failedEpoch.contains(execId) || failedEpoch(execId) < currentEpoch) { failedEpoch(execId) = currentEpoch logInfo("Executor lost: %s (epoch %d)".format(execId, currentEpoch)) blockManagerMaster.removeExecutor(execId) // TODO: This will be really slow if we keep accumulating shuffle map stages for ((shuffleId, stage) <- shuffleToMapStage) { stage.removeOutputsOnExecutor(execId) val locs = stage.outputLocs.map(list => if (list.isEmpty) null else list.head).toArray mapOutputTracker.registerMapOutputs(shuffleId, locs, changeEpoch = true) } if (shuffleToMapStage.isEmpty) { mapOutputTracker.incrementEpoch() } clearCacheLocs() } else { logDebug("Additional executor lost message for " + execId + "(epoch " + currentEpoch + ")") } } private def handleExecutorGained(execId: String, host: String) { // remove from failedEpoch(execId) ? if (failedEpoch.contains(execId)) { logInfo("Host gained which was in lost list earlier: " + host) failedEpoch -= execId } } private def handleJobCancellation(jobId: Int) { if (!jobIdToStageIds.contains(jobId)) { logDebug("Trying to cancel unregistered job " + jobId) } else { val independentStages = removeJobAndIndependentStages(jobId) independentStages.foreach { taskSched.cancelTasks } val error = new SparkException("Job %d cancelled".format(jobId)) val job = idToActiveJob(jobId) job.listener.jobFailed(error) jobIdToStageIds -= jobId activeJobs -= job idToActiveJob -= jobId listenerBus.post(SparkListenerJobEnd(job, JobFailed(error, Some(job.finalStage)))) } } /** * Aborts all jobs depending on a particular Stage. This is called in response to a task set * being canceled by the TaskScheduler. Use taskSetFailed() to inject this event from outside. */ private def abortStage(failedStage: Stage, reason: String) { if (!stageIdToStage.contains(failedStage.id)) { // Skip all the actions if the stage has been removed. return } val dependentStages = resultStageToJob.keys.filter(x => stageDependsOn(x, failedStage)).toSeq stageToInfos(failedStage).completionTime = Some(System.currentTimeMillis()) for (resultStage <- dependentStages) { val job = resultStageToJob(resultStage) val error = new SparkException("Job aborted: " + reason) job.listener.jobFailed(error) jobIdToStageIdsRemove(job.jobId) idToActiveJob -= resultStage.jobId activeJobs -= job resultStageToJob -= resultStage listenerBus.post(SparkListenerJobEnd(job, JobFailed(error, Some(failedStage)))) } if (dependentStages.isEmpty) { logInfo("Ignoring failure of " + failedStage + " because all jobs depending on it are done") } } /** * Return true if one of stage's ancestors is target. */ private def stageDependsOn(stage: Stage, target: Stage): Boolean = { if (stage == target) { return true } val visitedRdds = new HashSet[RDD[_]] val visitedStages = new HashSet[Stage] def visit(rdd: RDD[_]) { if (!visitedRdds(rdd)) { visitedRdds += rdd for (dep <- rdd.dependencies) { dep match { case shufDep: ShuffleDependency[_,_] => val mapStage = getShuffleMapStage(shufDep, stage.jobId) if (!mapStage.isAvailable) { visitedStages += mapStage visit(mapStage.rdd) } // Otherwise there's no need to follow the dependency back case narrowDep: NarrowDependency[_] => visit(narrowDep.rdd) } } } } visit(stage.rdd) visitedRdds.contains(target.rdd) } /** * Synchronized method that might be called from other threads. * @param rdd whose partitions are to be looked at * @param partition to lookup locality information for * @return list of machines that are preferred by the partition */ private[spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = synchronized { // If the partition is cached, return the cache locations val cached = getCacheLocs(rdd)(partition) if (!cached.isEmpty) { return cached } // If the RDD has some placement preferences (as is the case for input RDDs), get those val rddPrefs = rdd.preferredLocations(rdd.partitions(partition)).toList if (!rddPrefs.isEmpty) { return rddPrefs.map(host => TaskLocation(host)) } // If the RDD has narrow dependencies, pick the first partition of the first narrow dep // that has any placement preferences. Ideally we would choose based on transfer sizes, // but this will do for now. rdd.dependencies.foreach { case n: NarrowDependency[_] => for (inPart <- n.getParents(partition)) { val locs = getPreferredLocs(n.rdd, inPart) if (locs != Nil) return locs } case _ => } Nil } private def cleanup(cleanupTime: Long) { Map( "stageIdToStage" -> stageIdToStage, "shuffleToMapStage" -> shuffleToMapStage, "pendingTasks" -> pendingTasks, "stageToInfos" -> stageToInfos, "jobIdToStageIds" -> jobIdToStageIds, "stageIdToJobIds" -> stageIdToJobIds). foreach { case(s, t) => { val sizeBefore = t.size t.clearOldValues(cleanupTime) logInfo("%s %d --> %d".format(s, sizeBefore, t.size)) }} } def stop() { if (eventProcessActor != null) { eventProcessActor ! StopDAGScheduler } metadataCleaner.cancel() taskSched.stop() listenerBus.stop() } }
dotunolafunmiloye/spark
core/src/main/scala/org/apache/spark/scheduler/DAGScheduler.scala
Scala
apache-2.0
44,688
package aws.daleks.ai import rx.lang.scala._ import scala.collection.JavaConverters._ import com.amazonaws.regions.Region import java.util.List import java.util.Collections import aws.daleks.RxDalek import com.amazonaws.services.machinelearning.model._ import com.amazonaws.services.machinelearning.AmazonMachineLearningClient import com.amazonaws.services.machinelearning.AmazonMachineLearning case class MLDatasourcesDalek() extends RxDalek[DataSource] { val aml = AmazonMachineLearningClient.builder().withRegion(regions).build() override def list() = aml.describeDataSources().getResults override def exterminate(ar: DataSource) = aml.deleteDataSource(new DeleteDataSourceRequest().withDataSourceId(ar.getDataSourceId)) override def describe(ar: DataSource) = Map( ("dataSourceId"->ar.getDataSourceId) ) override def isSupported() = region.isServiceSupported(AmazonMachineLearning.ENDPOINT_PREFIX) }
jfaerman/aws-daleks
archive/src/main/scala/aws/daleks/ai/MLDatasourcesDalek.scala
Scala
apache-2.0
949
package com.orendainx.trucking.simulator.simulators import akka.actor.{ActorSystem, Inbox} import better.files.File import com.orendainx.trucking.commons.models.TruckingData import com.orendainx.trucking.simulator.coordinators.ManualCoordinator import com.orendainx.trucking.simulator.depots.NoSharingDepot import com.orendainx.trucking.simulator.flows.SharedFlowManager import com.orendainx.trucking.simulator.generators.TruckAndTrafficGenerator import com.orendainx.trucking.simulator.services.DriverFactory import com.orendainx.trucking.simulator.transmitters.BufferTransmitter import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.Await import scala.concurrent.duration._ /** * This simulator requires the tick() method to be called to tick the simulator forward. Generated data can then be * retrieved using the fetch() method. * * This simulator is ideal for use within a custom NiFi processor. * * @author Edgar Orendain <[email protected]> * @see https://github.com/orendain/trucking-nifi-bundle for an example of now a custom NiFi processor uses this simulator. */ object ManualTickAndFetchSimulator { def main(args: Array[String]): Unit = { if (args.length > 0) new ManualTickAndFetchSimulator(ConfigFactory.parseFile(File(args(1)).toJava)) else new ManualTickAndFetchSimulator() } } class ManualTickAndFetchSimulator(val config: Config) extends Simulator { def this() = this(ConfigFactory.load()) // Wrap 'config' with the default config and system defaults private implicit val combinedConfig: Config = ConfigFactory.load(config).getConfig("trucking-simulator") private val system = ActorSystem("ManualTickAndFetchSimulator") // Generate the drivers to be used in the simulation and create an Inbox for accepting messages private val drivers = DriverFactory.drivers private val inbox = Inbox.create(system) // Generate the different actors in the simulation private val depot = system.actorOf(NoSharingDepot.props()) private val transmitter = system.actorOf(BufferTransmitter.props()) private val flowManager = system.actorOf(SharedFlowManager.props(transmitter)) private val dataGenerators = drivers.map { driver => system.actorOf(TruckAndTrafficGenerator.props(driver, depot, flowManager)) } private val coordinator = system.actorOf(ManualCoordinator.props(dataGenerators)) // Ensure that the system is properly terminated when the simulator is shutdown. scala.sys.addShutdownHook { stop() } /** * Trigger the simulator to tick once. */ def tick(): Unit = { coordinator ! ManualCoordinator.Tick } /** * Fetch the data generated since the last call to [[tick]]. * * As long as this method is not called immediately after a [[tick]], results should return immediately and * the specified timeout is ignored. * * @param timeout Max amount of time to wait, in milliseconds, for the simulator to respond (default: 1000 milliseconds). * @return a list of [[TruckingData]] objects generated by the simulator. */ def fetch(timeout: Int = 1000): List[TruckingData] = { inbox.send(transmitter, BufferTransmitter.Fetch) inbox.receive(timeout.milliseconds).asInstanceOf[List[TruckingData]] } /** * Manually stop the simulation, terminating the underlying system. * * @param timeout Time to wait for the system to terminate, in milliseconds (default: 5000 milliseconds). */ def stop(timeout: Int = 5000): Unit = { system.terminate() Await.result(system.whenTerminated, timeout.milliseconds) } }
orendain/trucking-iot
simulator/src/main/scala/com/orendainx/trucking/simulator/simulators/ManualTickAndFetchSimulator.scala
Scala
apache-2.0
3,585
class C { val x: ((Int, Int) => Int) = (((a, b)) => a) val y: ((Int, Int, Int) => Int) = (((a, !!)) => a) val z: ((Int, Int, Int) => Int) = (((a, NotAPatternVariableName, c)) => a) }
folone/dotty
tests/untried/neg/not-a-legal-formal-parameter-tuple.scala
Scala
bsd-3-clause
189
/* * Copyright 2013 Akiyoshi Sugiki, University of Tsukuba * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kumoi.impl.vm.net import kumoi.shell.event._ import kumoi.shell.vm.net._ import kumoi.shell.aaa._ import kumoi.core.log._ //import java.rmi.server._ import kumoi.core._ import kumoi.core.rmi._ import java.net._ import java.util.UUID import scala.xml._ import scala.xml.transform._ import org.libvirt.{Connect, Network} import kumoi.impl.vm._ import kumoi.core.or._ /** * A common HotNetwork implementation. * * @author Akiyoshi SUGIKI */ class LibvirtHotNetwork(defaultAuth: AAA) extends ORObject[HotVMNetwork] with HotVMNetwork { private val logging = Logging("Libvirt") private var vmm: LibvirtVMM = null private var conn: Connect = null private var net: Network = null def loadNetwork(v: LibvirtVMM, n: Network)(implicit auth: AAA) { vmm = v conn = v.conn net = n } def loadColdNetwork(v: LibvirtVMM, c: ColdVMNetwork)(implicit auth: AAA) { vmm = v conn = v.conn net = v.conn.networkDefineXML(c.xml.toString) } def start(implicit auth: AAA) { net.create } def shutdown(implicit auth: AAA) { net.destroy net.undefine } override def name(implicit auth: AAA) = { net.getName } override def uuid() = UUID.fromString(net.getUUIDString) // readop(this, auth) { UUID.fromString(net.getUUIDString) } def autoStart(implicit auth: AAA) = { net.getAutostart } //def autoStart_=(b: Boolean) { net.setAutostart(b) } def autoStart_=(ba: Tuple2[Boolean, AAA]) { net.setAutostart(ba._1) } def bridge(implicit auth: AAA) = { net.getBridgeName } //def conn = null def xml = { XML.loadString(net.getXMLDesc(0)) } def remove()(implicit auth: AAA) {} override def genEvent(e: Exception) = VirtualMachineNetworkError(null, null, e) }
axi-sugiki/kumoi
src/kumoi/impl/vm/net/LibvirtHotNetwork.scala
Scala
apache-2.0
2,312
/* * Copyright 2015 Roberto Tyley * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.madgag.scalagithub.commands import play.api.libs.json.Json /* { "name": "bug", "color": "f29513" } */ case class CreateLabel( name: String, color: String ) object CreateLabel { implicit val writesLabel = Json.writes[CreateLabel] }
rtyley/play-git-hub
src/main/scala/com/madgag/scalagithub/commands/CreateLabel.scala
Scala
gpl-3.0
854
package formacion.example import formacion.example.Cocodrilo._ class Cocodrilo(name: String, numLegs: Int) extends Animal(name) with AnimalAcuatico with AnimalTerrestre with Asesino { def this() { this(Cocodrilo.defaultName, Cocodrilo.defaultNumLegs) } def this(name: String) = this(name, defaultNumLegs) override def asesina() : String = { _asesina } //Type inference private def _asesina = "Ahoga a su presa" } /** * Object is like a singleton. */ object Cocodrilo { val color: String = "Green" val defaultName = "Cocodrilo" val defaultNumLegs = 4 // Factory pattern in Scala def apply(name: String, numLegs: Int): Cocodrilo = new Cocodrilo(name, numLegs) }
anavidad3/PoC-spark-scala-maven
src/test/scala/formacion/example/Cocodrilo.scala
Scala
apache-2.0
707
package dbtarzan.gui.config.connections import dbtarzan.gui.util.Validation import dbtarzan.config.connections.ConnectionData /* verify if a connection data can be saved */ object ConnectionDataValidation { private val MAXFIELDSIZE_MIN = 200 def validate(data : ConnectionData) : List[String] = List( errorIf("Empty name", _ => data.name.isEmpty), errorIf("Name must be an identifier", _ => !Validation.isIdentifier(data.name)), errorIf("Empty url", _ => data.url.isEmpty), errorIf("Url cannot contain spaces", _ => Validation.containsWhitespace(data.url)), errorIf("Url must be in URL form", _ => !Validation.isValidJdbcURL(data.url)), errorIf("Empty driver", _ => data.driver.isEmpty), // errorIf("Empty user", _ => data.user.isEmpty), // errorIf("User cannot contain spaces", _ => Validation.containsWhtitespace(data.user)), // errorIf("Empty password", _ => data.password.isEmpty), // errorIf("Password cannot contain spaces", _ => Validation.containsWhtitespace(data.password)), errorIf("Empty jar", _ => data.jar.isEmpty), errorIf("Jar cannot contain spaces", _ => Validation.containsWhitespace(data.jar)), errorIf("Max field size should be over "+MAXFIELDSIZE_MIN, _ => !Validation.isMoreThanOrNone(data.maxFieldSize, MAXFIELDSIZE_MIN)) ).flatten private def errorIf(errorText: String, conditionForError: String => Boolean): Option[String] = { Some(errorText).filter(conditionForError) } }
aferrandi/dbtarzan
src/main/scala/dbtarzan/gui/config/connections/ConnectionDataValidation.scala
Scala
apache-2.0
1,498
/* * Forward.scala * Forward sampling. * * Created By: Avi Pfeffer ([email protected]) * Creation Date: Jan 1, 2009 * * Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc. * See http://www.cra.com or email [email protected] for information. * * See http://www.github.com/p2t2/figaro for a copy of the software license. */ package com.cra.figaro.algorithm.sampling import com.cra.figaro.language._ import com.cra.figaro.library.cache.Cache import com.cra.figaro.library.cache.NoCache import com.cra.figaro.algorithm.sampling.LikelihoodWeighter class ForwardWeighter(universe: Universe, cache: Cache) extends LikelihoodWeighter(universe, cache) { override def rejectionAction() = () override def setObservation(element: Element[_], obs: Option[_]) = {} } /** * A forward sampler that generates a state by generating values for elements, making sure to generate all the * arguments of an element before the element. */ object Forward { /** * Sample the universe by generating a value for each element of the universe. Return a cache object. */ def apply(universe: Universe): Double = { apply(universe, new NoCache(universe)) } /** * Sample the universe by generating a value for each element of the universe, and provide a cache object. Return a cache object. */ def apply(universe: Universe, cache: Cache): Double = { val lw = new ForwardWeighter(universe, cache) try { lw.computeWeight(universe.activeElements) } catch { case Importance.Reject => Double.NegativeInfinity } } /** * Sample only part of the model originating from a single element */ def apply[T](element: Element[T]): Double = { val noCache = new NoCache(element.universe) val lw = new ForwardWeighter(element.universe, noCache) try { lw.computeWeight(List(element)) } catch { case Importance.Reject => Double.NegativeInfinity } } }
agarbuno/figaro
Figaro/src/main/scala/com/cra/figaro/algorithm/sampling/Forward.scala
Scala
bsd-3-clause
1,952
import play.api.mvc.Results import play.api.test.Helpers.stubControllerComponents object ActionMocks { def getWeather() = stubControllerComponents().actionBuilder { Results.Ok(Fixtures.weatherResponse) } def getWeatherList() = stubControllerComponents().actionBuilder { Results.Ok(Fixtures.findWeatherResponse) } def getWeatherForecast() = stubControllerComponents().actionBuilder { Results.Ok(Fixtures.forecastResponse) } def getGithubTokenResponse() = stubControllerComponents().actionBuilder { Results.Ok(Fixtures.githubTokenResponse) } def getGithubUserResponse() = stubControllerComponents().actionBuilder { Results.Ok(Fixtures.githubUserResponse) } } object Fixtures { val Eps = 1e-3 val findWeatherResponse: String = """ |{ | "message": "like", | "cod": "200", | "count": 5, | "list": [ | { | "id": 2950159, | "name": "Berlin", | "coord": { | "lat": 52.5244, | "lon": 13.4105 | }, | "main": { | "temp": 14, | "pressure": 1018, | "humidity": 71, | "temp_min": 14, | "temp_max": 14 | }, | "dt": 1499928600, | "wind": { | "speed": 6.2, | "deg": 280 | }, | "sys": { | "country": "DE" | }, | "rain": null, | "snow": null, | "clouds": { | "all": 75 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04d" | } | ] | }, | { | "id": 2950158, | "name": "Berlin", | "coord": { | "lat": 54.0333, | "lon": 10.45 | }, | "main": { | "temp": 14.51, | "pressure": 1019, | "humidity": 71, | "temp_min": 14, | "temp_max": 15 | }, | "dt": 1499928600, | "wind": { | "speed": 3.1, | "deg": 270 | }, | "sys": { | "country": "DE" | }, | "rain": null, | "snow": null, | "clouds": { | "all": 75 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04d" | } | ] | }, | { | "id": 3587266, | "name": "Berlin", | "coord": { | "lat": 13.5, | "lon": -88.5334 | }, | "main": { | "temp": 24, | "pressure": 1012, | "humidity": 94, | "temp_min": 24, | "temp_max": 24 | }, | "dt": 1499928600, | "wind": { | "speed": 1 | }, | "sys": { | "country": "SV" | }, | "rain": null, | "snow": null, | "clouds": { | "all": 20 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02n" | } | ] | }, | { | "id": 3614789, | "name": "Berlin", | "coord": { | "lat": 14.8333, | "lon": -88.5 | }, | "main": { | "temp": 18.67, | "pressure": 949.55, | "humidity": 96, | "temp_min": 18.67, | "temp_max": 18.67, | "sea_level": 1027.37, | "grnd_level": 949.55 | }, | "dt": 1499931001, | "wind": { | "speed": 0.88, | "deg": 229.502 | }, | "sys": { | "country": "HN" | }, | "rain": null, | "snow": null, | "clouds": { | "all": 32 | }, | "weather": [ | { | "id": 802, | "main": "Clouds", | "description": "scattered clouds", | "icon": "03n" | } | ] | }, | { | "id": 4348460, | "name": "Berlin", | "coord": { | "lat": 38.3226, | "lon": -75.2177 | }, | "main": { | "temp": 26.75, | "pressure": 1018, | "humidity": 88, | "temp_min": 26, | "temp_max": 28 | }, | "dt": 1499928840, | "wind": { | "speed": 2.6, | "deg": 230 | }, | "sys": { | "country": "US" | }, | "rain": null, | "snow": null, | "clouds": { | "all": 1 | }, | "weather": [ | { | "id": 800, | "main": "Clear", | "description": "sky is clear", | "icon": "01n" | } | ] | } | ] |} """.stripMargin val weatherResponse: String = """ | { | "coord": { | "lon": 13.41, | "lat": 52.52 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04d" | } | ], | "base": "stations", | "main": { | "temp": 17.49, | "pressure": 1020, | "humidity": 68, | "temp_min": 17, | "temp_max": 18 | }, | "visibility": 10000, | "wind": { | "speed": 0.5 | }, | "clouds": { | "all": 75 | }, | "dt": 1499322000, | "sys": { | "type": 1, | "id": 4892, | "message": 0.0053, | "country": "DE", | "sunrise": 1499309530, | "sunset": 1499369382 | }, | "id": 2950159, | "name": "Berlin", | "cod": 200 | } | """.stripMargin val forecastResponse: String = """ |{ | "cod": "200", | "message": 0.0047, | "cnt": 37, | "list": [ | { | "dt": 1499763600, | "main": { | "temp": 19.08, | "temp_min": 16.5, | "temp_max": 19.08, | "pressure": 1017.9, | "sea_level": 1023.59, | "grnd_level": 1017.9, | "humidity": 100, | "temp_kf": 2.58 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 88 | }, | "wind": { | "speed": 2.61, | "deg": 199.001 | }, | "rain": { | "3h": 1.59 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-11 09:00:00" | }, | { | "dt": 1499774400, | "main": { | "temp": 20.46, | "temp_min": 18.53, | "temp_max": 20.46, | "pressure": 1017.65, | "sea_level": 1023.35, | "grnd_level": 1017.65, | "humidity": 99, | "temp_kf": 1.94 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 92 | }, | "wind": { | "speed": 4.56, | "deg": 226.51 | }, | "rain": { | "3h": 0.515 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-11 12:00:00" | }, | { | "dt": 1499785200, | "main": { | "temp": 21.51, | "temp_min": 20.21, | "temp_max": 21.51, | "pressure": 1017.6, | "sea_level": 1023.35, | "grnd_level": 1017.6, | "humidity": 95, | "temp_kf": 1.29 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 24 | }, | "wind": { | "speed": 4.82, | "deg": 244.001 | }, | "rain": { | "3h": 0.135 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-11 15:00:00" | }, | { | "dt": 1499796000, | "main": { | "temp": 20.8, | "temp_min": 20.15, | "temp_max": 20.8, | "pressure": 1017.7, | "sea_level": 1023.37, | "grnd_level": 1017.7, | "humidity": 86, | "temp_kf": 0.65 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 48 | }, | "wind": { | "speed": 4.67, | "deg": 256.503 | }, | "rain": { | "3h": 0.0050000000000026 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-11 18:00:00" | }, | { | "dt": 1499806800, | "main": { | "temp": 18.16, | "temp_min": 18.16, | "temp_max": 18.16, | "pressure": 1018.44, | "sea_level": 1024.1, | "grnd_level": 1018.44, | "humidity": 86, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10n" | } | ], | "clouds": { | "all": 36 | }, | "wind": { | "speed": 4.33, | "deg": 258.005 | }, | "rain": { | "3h": 0.009999999999998 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-11 21:00:00" | }, | { | "dt": 1499817600, | "main": { | "temp": 15.3, | "temp_min": 15.3, | "temp_max": 15.3, | "pressure": 1018.18, | "sea_level": 1023.87, | "grnd_level": 1018.18, | "humidity": 93, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02n" | } | ], | "clouds": { | "all": 24 | }, | "wind": { | "speed": 2.66, | "deg": 246.501 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-12 00:00:00" | }, | { | "dt": 1499828400, | "main": { | "temp": 14.14, | "temp_min": 14.14, | "temp_max": 14.14, | "pressure": 1017.22, | "sea_level": 1022.91, | "grnd_level": 1017.22, | "humidity": 95, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 64 | }, | "wind": { | "speed": 1.81, | "deg": 192.002 | }, | "rain": { | "3h": 0.060000000000002 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 03:00:00" | }, | { | "dt": 1499839200, | "main": { | "temp": 15.99, | "temp_min": 15.99, | "temp_max": 15.99, | "pressure": 1016.7, | "sea_level": 1022.33, | "grnd_level": 1016.7, | "humidity": 100, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 92 | }, | "wind": { | "speed": 2.21, | "deg": 228.502 | }, | "rain": { | "3h": 1.405 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 06:00:00" | }, | { | "dt": 1499850000, | "main": { | "temp": 16.55, | "temp_min": 16.55, | "temp_max": 16.55, | "pressure": 1015.81, | "sea_level": 1021.46, | "grnd_level": 1015.81, | "humidity": 100, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 100 | }, | "wind": { | "speed": 2.26, | "deg": 221.5 | }, | "rain": { | "3h": 0.88 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 09:00:00" | }, | { | "dt": 1499860800, | "main": { | "temp": 17.36, | "temp_min": 17.36, | "temp_max": 17.36, | "pressure": 1014.97, | "sea_level": 1020.66, | "grnd_level": 1014.97, | "humidity": 99, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 92 | }, | "wind": { | "speed": 5.26, | "deg": 213.003 | }, | "rain": { | "3h": 0.35 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 12:00:00" | }, | { | "dt": 1499871600, | "main": { | "temp": 16.56, | "temp_min": 16.56, | "temp_max": 16.56, | "pressure": 1013.83, | "sea_level": 1019.54, | "grnd_level": 1013.83, | "humidity": 99, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 100 | }, | "wind": { | "speed": 4.91, | "deg": 218.501 | }, | "rain": { | "3h": 1.62 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 15:00:00" | }, | { | "dt": 1499882400, | "main": { | "temp": 16.66, | "temp_min": 16.66, | "temp_max": 16.66, | "pressure": 1012.87, | "sea_level": 1018.63, | "grnd_level": 1012.87, | "humidity": 99, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 100 | }, | "wind": { | "speed": 3.45, | "deg": 233.006 | }, | "rain": { | "3h": 1.58 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-12 18:00:00" | }, | { | "dt": 1499893200, | "main": { | "temp": 15.83, | "temp_min": 15.83, | "temp_max": 15.83, | "pressure": 1013.57, | "sea_level": 1019.23, | "grnd_level": 1013.57, | "humidity": 99, | "temp_kf": 0 | }, | "weather": [ | { | "id": 501, | "main": "Rain", | "description": "moderate rain", | "icon": "10n" | } | ], | "clouds": { | "all": 92 | }, | "wind": { | "speed": 4.47, | "deg": 303.501 | }, | "rain": { | "3h": 3.25 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-12 21:00:00" | }, | { | "dt": 1499904000, | "main": { | "temp": 13.85, | "temp_min": 13.85, | "temp_max": 13.85, | "pressure": 1016.28, | "sea_level": 1022.11, | "grnd_level": 1016.28, | "humidity": 97, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10n" | } | ], | "clouds": { | "all": 92 | }, | "wind": { | "speed": 10.06, | "deg": 309.007 | }, | "rain": { | "3h": 1.83 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-13 00:00:00" | }, | { | "dt": 1499914800, | "main": { | "temp": 11.97, | "temp_min": 11.97, | "temp_max": 11.97, | "pressure": 1019.88, | "sea_level": 1025.68, | "grnd_level": 1019.88, | "humidity": 91, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10n" | } | ], | "clouds": { | "all": 76 | }, | "wind": { | "speed": 8.52, | "deg": 298.001 | }, | "rain": { | "3h": 0.29 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-13 03:00:00" | }, | { | "dt": 1499925600, | "main": { | "temp": 13.11, | "temp_min": 13.11, | "temp_max": 13.11, | "pressure": 1022.08, | "sea_level": 1027.91, | "grnd_level": 1022.08, | "humidity": 93, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 12 | }, | "wind": { | "speed": 7.67, | "deg": 298 | }, | "rain": { | "3h": 0.3 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-13 06:00:00" | }, | { | "dt": 1499936400, | "main": { | "temp": 16.57, | "temp_min": 16.57, | "temp_max": 16.57, | "pressure": 1023.66, | "sea_level": 1029.38, | "grnd_level": 1023.66, | "humidity": 91, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02d" | } | ], | "clouds": { | "all": 20 | }, | "wind": { | "speed": 7.52, | "deg": 300 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-13 09:00:00" | }, | { | "dt": 1499947200, | "main": { | "temp": 19.3, | "temp_min": 19.3, | "temp_max": 19.3, | "pressure": 1024.57, | "sea_level": 1030.29, | "grnd_level": 1024.57, | "humidity": 87, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02d" | } | ], | "clouds": { | "all": 12 | }, | "wind": { | "speed": 7.01, | "deg": 309.003 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-13 12:00:00" | }, | { | "dt": 1499958000, | "main": { | "temp": 20.01, | "temp_min": 20.01, | "temp_max": 20.01, | "pressure": 1024.92, | "sea_level": 1030.61, | "grnd_level": 1024.92, | "humidity": 81, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02d" | } | ], | "clouds": { | "all": 12 | }, | "wind": { | "speed": 5.82, | "deg": 319.001 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-13 15:00:00" | }, | { | "dt": 1499968800, | "main": { | "temp": 18.47, | "temp_min": 18.47, | "temp_max": 18.47, | "pressure": 1025.2, | "sea_level": 1030.94, | "grnd_level": 1025.2, | "humidity": 80, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02d" | } | ], | "clouds": { | "all": 20 | }, | "wind": { | "speed": 3.87, | "deg": 326.001 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-13 18:00:00" | }, | { | "dt": 1499979600, | "main": { | "temp": 14.27, | "temp_min": 14.27, | "temp_max": 14.27, | "pressure": 1025.49, | "sea_level": 1031.29, | "grnd_level": 1025.49, | "humidity": 88, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02n" | } | ], | "clouds": { | "all": 20 | }, | "wind": { | "speed": 2.57, | "deg": 317.502 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-13 21:00:00" | }, | { | "dt": 1499990400, | "main": { | "temp": 12.73, | "temp_min": 12.73, | "temp_max": 12.73, | "pressure": 1025.26, | "sea_level": 1031.06, | "grnd_level": 1025.26, | "humidity": 85, | "temp_kf": 0 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04n" | } | ], | "clouds": { | "all": 76 | }, | "wind": { | "speed": 1.42, | "deg": 295.501 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-14 00:00:00" | }, | { | "dt": 1500001200, | "main": { | "temp": 13.32, | "temp_min": 13.32, | "temp_max": 13.32, | "pressure": 1024.65, | "sea_level": 1030.49, | "grnd_level": 1024.65, | "humidity": 83, | "temp_kf": 0 | }, | "weather": [ | { | "id": 804, | "main": "Clouds", | "description": "overcast clouds", | "icon": "04n" | } | ], | "clouds": { | "all": 88 | }, | "wind": { | "speed": 1.56, | "deg": 276.003 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-14 03:00:00" | }, | { | "dt": 1500012000, | "main": { | "temp": 15.4, | "temp_min": 15.4, | "temp_max": 15.4, | "pressure": 1024.36, | "sea_level": 1030.1, | "grnd_level": 1024.36, | "humidity": 93, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 64 | }, | "wind": { | "speed": 2.01, | "deg": 317.003 | }, | "rain": { | "3h": 0.010000000000002 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-14 06:00:00" | }, | { | "dt": 1500022800, | "main": { | "temp": 17.29, | "temp_min": 17.29, | "temp_max": 17.29, | "pressure": 1024.05, | "sea_level": 1029.68, | "grnd_level": 1024.05, | "humidity": 93, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10d" | } | ], | "clouds": { | "all": 56 | }, | "wind": { | "speed": 2.07, | "deg": 304.001 | }, | "rain": { | "3h": 0.009999999999998 | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-14 09:00:00" | }, | { | "dt": 1500033600, | "main": { | "temp": 18.93, | "temp_min": 18.93, | "temp_max": 18.93, | "pressure": 1023.06, | "sea_level": 1028.69, | "grnd_level": 1023.06, | "humidity": 92, | "temp_kf": 0 | }, | "weather": [ | { | "id": 802, | "main": "Clouds", | "description": "scattered clouds", | "icon": "03d" | } | ], | "clouds": { | "all": 36 | }, | "wind": { | "speed": 2.11, | "deg": 303 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-14 12:00:00" | }, | { | "dt": 1500044400, | "main": { | "temp": 19.77, | "temp_min": 19.77, | "temp_max": 19.77, | "pressure": 1022.34, | "sea_level": 1027.98, | "grnd_level": 1022.34, | "humidity": 77, | "temp_kf": 0 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04d" | } | ], | "clouds": { | "all": 64 | }, | "wind": { | "speed": 2.32, | "deg": 329.001 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-14 15:00:00" | }, | { | "dt": 1500055200, | "main": { | "temp": 18.59, | "temp_min": 18.59, | "temp_max": 18.59, | "pressure": 1022.73, | "sea_level": 1028.47, | "grnd_level": 1022.73, | "humidity": 71, | "temp_kf": 0 | }, | "weather": [ | { | "id": 804, | "main": "Clouds", | "description": "overcast clouds", | "icon": "04d" | } | ], | "clouds": { | "all": 88 | }, | "wind": { | "speed": 2.42, | "deg": 346.504 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-14 18:00:00" | }, | { | "dt": 1500066000, | "main": { | "temp": 16.11, | "temp_min": 16.11, | "temp_max": 16.11, | "pressure": 1024, | "sea_level": 1029.73, | "grnd_level": 1024, | "humidity": 72, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10n" | } | ], | "clouds": { | "all": 64 | }, | "wind": { | "speed": 3.43, | "deg": 11.0028 | }, | "rain": { | "3h": 0.09 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-14 21:00:00" | }, | { | "dt": 1500076800, | "main": { | "temp": 12.7, | "temp_min": 12.7, | "temp_max": 12.7, | "pressure": 1024.62, | "sea_level": 1030.39, | "grnd_level": 1024.62, | "humidity": 91, | "temp_kf": 0 | }, | "weather": [ | { | "id": 500, | "main": "Rain", | "description": "light rain", | "icon": "10n" | } | ], | "clouds": { | "all": 20 | }, | "wind": { | "speed": 2.51, | "deg": 334.502 | }, | "rain": { | "3h": 0.07 | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-15 00:00:00" | }, | { | "dt": 1500087600, | "main": { | "temp": 11.4, | "temp_min": 11.4, | "temp_max": 11.4, | "pressure": 1025.13, | "sea_level": 1030.97, | "grnd_level": 1025.13, | "humidity": 88, | "temp_kf": 0 | }, | "weather": [ | { | "id": 803, | "main": "Clouds", | "description": "broken clouds", | "icon": "04n" | } | ], | "clouds": { | "all": 56 | }, | "wind": { | "speed": 1.75, | "deg": 303.506 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-15 03:00:00" | }, | { | "dt": 1500098400, | "main": { | "temp": 14.55, | "temp_min": 14.55, | "temp_max": 14.55, | "pressure": 1025.81, | "sea_level": 1031.63, | "grnd_level": 1025.81, | "humidity": 85, | "temp_kf": 0 | }, | "weather": [ | { | "id": 801, | "main": "Clouds", | "description": "few clouds", | "icon": "02d" | } | ], | "clouds": { | "all": 12 | }, | "wind": { | "speed": 3.32, | "deg": 299.503 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-15 06:00:00" | }, | { | "dt": 1500109200, | "main": { | "temp": 18.45, | "temp_min": 18.45, | "temp_max": 18.45, | "pressure": 1026.3, | "sea_level": 1031.98, | "grnd_level": 1026.3, | "humidity": 81, | "temp_kf": 0 | }, | "weather": [ | { | "id": 800, | "main": "Clear", | "description": "clear sky", | "icon": "01d" | } | ], | "clouds": { | "all": 0 | }, | "wind": { | "speed": 4.16, | "deg": 297.502 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-15 09:00:00" | }, | { | "dt": 1500120000, | "main": { | "temp": 20.25, | "temp_min": 20.25, | "temp_max": 20.25, | "pressure": 1026.49, | "sea_level": 1032.22, | "grnd_level": 1026.49, | "humidity": 76, | "temp_kf": 0 | }, | "weather": [ | { | "id": 800, | "main": "Clear", | "description": "clear sky", | "icon": "02d" | } | ], | "clouds": { | "all": 8 | }, | "wind": { | "speed": 4.16, | "deg": 301.003 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-15 12:00:00" | }, | { | "dt": 1500130800, | "main": { | "temp": 20.61, | "temp_min": 20.61, | "temp_max": 20.61, | "pressure": 1026.18, | "sea_level": 1031.92, | "grnd_level": 1026.18, | "humidity": 69, | "temp_kf": 0 | }, | "weather": [ | { | "id": 800, | "main": "Clear", | "description": "clear sky", | "icon": "02d" | } | ], | "clouds": { | "all": 8 | }, | "wind": { | "speed": 4.21, | "deg": 293.002 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-15 15:00:00" | }, | { | "dt": 1500141600, | "main": { | "temp": 19.07, | "temp_min": 19.07, | "temp_max": 19.07, | "pressure": 1026.17, | "sea_level": 1031.88, | "grnd_level": 1026.17, | "humidity": 68, | "temp_kf": 0 | }, | "weather": [ | { | "id": 800, | "main": "Clear", | "description": "clear sky", | "icon": "01d" | } | ], | "clouds": { | "all": 0 | }, | "wind": { | "speed": 3.62, | "deg": 273.506 | }, | "rain": { | | }, | "sys": { | "pod": "d" | }, | "dt_txt": "2017-07-15 18:00:00" | }, | { | "dt": 1500152400, | "main": { | "temp": 15.7, | "temp_min": 15.7, | "temp_max": 15.7, | "pressure": 1026.64, | "sea_level": 1032.3, | "grnd_level": 1026.64, | "humidity": 70, | "temp_kf": 0 | }, | "weather": [ | { | "id": 804, | "main": "Clouds", | "description": "overcast clouds", | "icon": "04n" | } | ], | "clouds": { | "all": 88 | }, | "wind": { | "speed": 3.56, | "deg": 260.501 | }, | "rain": { | | }, | "sys": { | "pod": "n" | }, | "dt_txt": "2017-07-15 21:00:00" | } | ], | "city": { | "id": 2950159, | "name": "Berlin", | "coord": { | "lat": 52.5244, | "lon": 13.4105 | }, | "country": "DE" | } |} """.stripMargin val githubTokenResponse: String = """ |{ | "access_token": "12345551233123a", | "scope": "user%3Aemail", | "token_type": "bearer" |} """.stripMargin val githubUserResponse: String = """ |{ | "login": "poweruser", | "id": 12313, | "avatar_url": "https://avatars0.githubusercontent.com/u/116554653", | "gravatar_id": "", | "url": "https://api.github.com/users/poweruser", | "html_url": "https://github.com/poweruser", | "followers_url": "https://api.github.com/users/poseruser/followers", | "following_url": "https://api.github.com/users/poseruser/following{/other_user}", | "gists_url": "https://api.github.com/users/poseruser/gists{/gist_id}", | "starred_url": "https://api.github.com/users/poseruser/starred{/owner}{/repo}", | "subscriptions_url": "https://api.github.com/users/poseruser/subscriptions", | "organizations_url": "https://api.github.com/users/poseruser/orgs", | "repos_url": "https://api.github.com/users/poweruser/repos", | "events_url": "https://api.github.com/users/poweruser/events{/privacy}", | "received_events_url": "https://api.github.com/users/poseruser/received_events", | "type": "User", | "site_admin": false, | "name": "Power User", | "company": null, | "blog": "", | "location": "Berlin, Germany", | "email": null, | "hireable": true, | "bio": null, | "public_repos": 36, | "public_gists": 0, | "followers": 11, | "following": 29, | "created_at": "2014-01-31T15:05:01Z", | "updated_at": "2017-07-21T19:17:41Z" |} """.stripMargin }
malaman/scala-weather-app
api/test/Fixtures.scala
Scala
mit
42,362
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.streaming import java.util.Locale import scala.collection.JavaConverters._ import org.apache.spark.annotation.InterfaceStability import org.apache.spark.internal.Logging import org.apache.spark.sql.{AnalysisException, DataFrame, Dataset, SparkSession} import org.apache.spark.sql.execution.command.DDLUtils import org.apache.spark.sql.execution.datasources.DataSource import org.apache.spark.sql.execution.streaming.StreamingRelation import org.apache.spark.sql.types.StructType /** * Interface used to load a streaming `Dataset` from external storage systems (e.g. file systems, * key-value stores, etc). Use `SparkSession.readStream` to access this. * * @since 2.0.0 */ @InterfaceStability.Evolving final class DataStreamReader private[sql](sparkSession: SparkSession) extends Logging { /** * Specifies the input data source format. * * @since 2.0.0 */ def format(source: String): DataStreamReader = { this.source = source this } /** * Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema * automatically from data. By specifying the schema here, the underlying data source can * skip the schema inference step, and thus speed up data loading. * * @since 2.0.0 */ def schema(schema: StructType): DataStreamReader = { this.userSpecifiedSchema = Option(schema) this } /** * Adds an input option for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to parse timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 2.0.0 */ def option(key: String, value: String): DataStreamReader = { this.extraOptions += (key -> value) this } /** * Adds an input option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Boolean): DataStreamReader = option(key, value.toString) /** * Adds an input option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Long): DataStreamReader = option(key, value.toString) /** * Adds an input option for the underlying data source. * * @since 2.0.0 */ def option(key: String, value: Double): DataStreamReader = option(key, value.toString) /** * (Scala-specific) Adds input options for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to parse timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 2.0.0 */ def options(options: scala.collection.Map[String, String]): DataStreamReader = { this.extraOptions ++= options this } /** * Adds input options for the underlying data source. * * You can set the following option(s): * <ul> * <li>`timeZone` (default session local timezone): sets the string that indicates a timezone * to be used to parse timestamps in the JSON/CSV datasources or partition values.</li> * </ul> * * @since 2.0.0 */ def options(options: java.util.Map[String, String]): DataStreamReader = { this.options(options.asScala) this } /** * Loads input data stream in as a `DataFrame`, for data streams that don't require a path * (e.g. external key-value stores). * * @since 2.0.0 */ def load(): DataFrame = { if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) { throw new AnalysisException("Hive data source can only be used with tables, you can not " + "read files of Hive data source directly.") } val dataSource = DataSource( sparkSession, userSpecifiedSchema = userSpecifiedSchema, className = source, options = extraOptions.toMap) Dataset.ofRows(sparkSession, StreamingRelation(dataSource)) } /** * Loads input in as a `DataFrame`, for data streams that read from some path. * * @since 2.0.0 */ def load(path: String): DataFrame = { option("path", path).load() } /** * Loads a JSON file stream and returns the results as a `DataFrame`. * * <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by * default. For JSON (one record per file), set the `wholeFile` option to true. * * This function goes through the input once to determine the input schema. If you know the * schema in advance, use the version that specifies the schema to avoid the extra scan. * * You can set the following JSON-specific options to deal with non-standard JSON files: * <ul> * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.</li> * <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li> * <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal * type. If the values do not fit in decimal, then it infers them as doubles.</li> * <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li> * <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li> * <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes * </li> * <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers * (e.g. 00012)</li> * <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all * character using backslash quoting mechanism</li> * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records * during parsing. * <ul> * <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts * the malformed string into a field configured by `columnNameOfCorruptRecord`. To keep * corrupt records, an user can set a string type field named `columnNameOfCorruptRecord` * in an user-defined schema. If a schema does not have the field, it drops corrupt records * during parsing. When inferring a schema, it implicitly adds a `columnNameOfCorruptRecord` * field in an output schema.</li> * <li>`DROPMALFORMED` : ignores the whole corrupted records.</li> * <li>`FAILFAST` : throws an exception when it meets corrupted records.</li> * </ul> * </li> * <li>`columnNameOfCorruptRecord` (default is the value specified in * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string * created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li> * <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to * date type.</li> * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that * indicates a timestamp format. Custom date formats follow the formats at * `java.text.SimpleDateFormat`. This applies to timestamp type.</li> * <li>`wholeFile` (default `false`): parse one record, which may span multiple lines, * per file</li> * </ul> * * @since 2.0.0 */ def json(path: String): DataFrame = format("json").load(path) /** * Loads a CSV file stream and returns the result as a `DataFrame`. * * This function will go through the input once to determine the input schema if `inferSchema` * is enabled. To avoid going through the entire data once, disable `inferSchema` option or * specify the schema explicitly using `schema`. * * You can set the following CSV-specific options to deal with CSV files: * <ul> * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.</li> * <li>`sep` (default `,`): sets the single character as a separator for each * field and value.</li> * <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding * type.</li> * <li>`quote` (default `"`): sets the single character used for escaping quoted values where * the separator can be part of the value. If you would like to turn off quotations, you need to * set not `null` but an empty string. This behaviour is different form * `com.databricks.spark.csv`.</li> * <li>`escape` (default `\`): sets the single character used for escaping quotes inside * an already quoted value.</li> * <li>`comment` (default empty string): sets the single character used for skipping lines * beginning with this character. By default, it is disabled.</li> * <li>`header` (default `false`): uses the first line as names of columns.</li> * <li>`inferSchema` (default `false`): infers the input schema automatically from data. It * requires one extra pass over the data.</li> * <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading * whitespaces from values being read should be skipped.</li> * <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing * whitespaces from values being read should be skipped.</li> * <li>`nullValue` (default empty string): sets the string representation of a null value. Since * 2.0.1, this applies to all supported types including the string type.</li> * <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li> * <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity * value.</li> * <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity * value.</li> * <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format. * Custom date formats follow the formats at `java.text.SimpleDateFormat`. This applies to * date type.</li> * <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that * indicates a timestamp format. Custom date formats follow the formats at * `java.text.SimpleDateFormat`. This applies to timestamp type.</li> * <li>`maxColumns` (default `20480`): defines a hard limit of how many columns * a record can have.</li> * <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed * for any given value being read. By default, it is -1 meaning unlimited length</li> * <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records * during parsing. It supports the following case-insensitive modes. * <ul> * <li>`PERMISSIVE` : sets other fields to `null` when it meets a corrupted record, and puts * the malformed string into a field configured by `columnNameOfCorruptRecord`. To keep * corrupt records, an user can set a string type field named `columnNameOfCorruptRecord` * in an user-defined schema. If a schema does not have the field, it drops corrupt records * during parsing. When a length of parsed CSV tokens is shorter than an expected length * of a schema, it sets `null` for extra fields.</li> * <li>`DROPMALFORMED` : ignores the whole corrupted records.</li> * <li>`FAILFAST` : throws an exception when it meets corrupted records.</li> * </ul> * </li> * <li>`columnNameOfCorruptRecord` (default is the value specified in * `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string * created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li> * <li>`wholeFile` (default `false`): parse one record, which may span multiple lines.</li> * </ul> * * @since 2.0.0 */ def csv(path: String): DataFrame = format("csv").load(path) /** * Loads a Parquet file stream, returning the result as a `DataFrame`. * * You can set the following Parquet-specific option(s) for reading Parquet files: * <ul> * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.</li> * <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets * whether we should merge schemas collected from all * Parquet part-files. This will override * `spark.sql.parquet.mergeSchema`.</li> * </ul> * * @since 2.0.0 */ def parquet(path: String): DataFrame = { format("parquet").load(path) } /** * Loads text files and returns a `DataFrame` whose schema starts with a string column named * "value", and followed by partitioned columns if there are any. * * Each line in the text files is a new row in the resulting DataFrame. For example: * {{{ * // Scala: * spark.readStream.text("/path/to/directory/") * * // Java: * spark.readStream().text("/path/to/directory/") * }}} * * You can set the following text-specific options to deal with text files: * <ul> * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.</li> * </ul> * * @since 2.0.0 */ def text(path: String): DataFrame = format("text").load(path) /** * Loads text file(s) and returns a `Dataset` of String. The underlying schema of the Dataset * contains a single string column named "value". * * If the directory structure of the text files contains partitioning information, those are * ignored in the resulting Dataset. To include partitioning information as columns, use `text`. * * Each line in the text file is a new element in the resulting Dataset. For example: * {{{ * // Scala: * spark.readStream.textFile("/path/to/spark/README.md") * * // Java: * spark.readStream().textFile("/path/to/spark/README.md") * }}} * * You can set the following text-specific options to deal with text files: * <ul> * <li>`maxFilesPerTrigger` (default: no max limit): sets the maximum number of new files to be * considered in every trigger.</li> * </ul> * * @param path input path * @since 2.1.0 */ def textFile(path: String): Dataset[String] = { if (userSpecifiedSchema.nonEmpty) { throw new AnalysisException("User specified schema not supported with `textFile`") } text(path).select("value").as[String](sparkSession.implicits.newStringEncoder) } /////////////////////////////////////////////////////////////////////////////////////// // Builder pattern config options /////////////////////////////////////////////////////////////////////////////////////// private var source: String = sparkSession.sessionState.conf.defaultDataSourceName private var userSpecifiedSchema: Option[StructType] = None private var extraOptions = new scala.collection.mutable.HashMap[String, String] }
mzl9039/spark
sql/core/src/main/scala/org/apache/spark/sql/streaming/DataStreamReader.scala
Scala
apache-2.0
15,883
import scala.quoted.* object Macro { inline def mac(): String = ${ macImpl() } def macImpl()(using Quotes): Expr[String] = '{(x: String) => "anything"} match case '{ (in: String) => ($out: tpe2) } => Expr(out.toString) case _ => ??? }
dotty-staging/dotty
tests/run-macros/i8746/Macro_1.scala
Scala
apache-2.0
257
/* * Copyright (c) 2013 Habla Computing * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.hablapps.bigbrothapp.test import language.reflectiveCalls import org.scalatest.FunSpec import org.scalatest.BeforeAndAfter import org.scalatest.matchers.ShouldMatchers import org.hablapps.updatable._ import org.hablapps.react import org.hablapps.speech import speech._ import org.hablapps.bigbrothapp._ class Nominating(sys: speech.System with BigBrothappProgram with react.Debug) extends FunSpec with ShouldMatchers with BeforeAndAfter { describe("BigBrothapp") { import sys._ val Output( leoAtHouse: $[Housemate], rapAtHouse: $[Housemate], redAtHouse: $[Housemate], shrAtHouse: $[Housemate], leoNomination: $[Nomination], rapNomination: $[Nomination], redNomination: $[Nomination], shrNomination: $[Nomination]) = reset(for { sea <- Initiate(BigBrothapp()) hou <- Initiate(House(), sea); aud <- Initiate(Audience(), sea); leonardo <- Play(Contestant().name += "leonardo", sea) raphael <- Play(Contestant().name += "raphael", sea) redRanger <- Play(Contestant().name += "redRanger", sea) shredder <- Play(Contestant().name += "shredder", sea) brotha <- Play(BigBrotha(), hou) leoAtHouse <- Play(Housemate().name += "leonardo", leonardo, hou) rapAtHouse <- Play(Housemate().name += "raphael", raphael, hou) redAtHouse <- Play(Housemate().name += "redRanger", redRanger, hou) shrAtHouse <- Play(Housemate().name += "shredder", shredder, hou) _ <- Play(Viewer(), aud) _ <- Play(Viewer(), aud) _ <- Play(Viewer(), aud) su <- Say( EvictionSetUp(__new = Option(Eviction().substatus += Nominating)), brotha, hou) _ <- Done(su, PERFORMED) evict <- Initiate(Eviction().substatus += Nominating, hou) leoNomination <- Initiate(Nomination().name += "leonardo", evict) _ <- Play3(Nominee().name += "leonardo", leoAtHouse, leoNomination) rapNomination <- Initiate(Nomination().name += "raphael", evict) _ <- Play3(Nominee().name += "raphael", rapAtHouse, rapNomination) redNomination <- Initiate(Nomination().name += "redRanger", evict) _ <- Play3(Nominee().name += "redRanger", redAtHouse, redNomination) shrNomination <- Initiate(Nomination().name += "shredder", evict) _ <- Play3(Nominee().name += "shredder", shrAtHouse, shrNomination) } yield ( leoAtHouse, rapAtHouse, redAtHouse, shrAtHouse, leoNomination, rapNomination, redNomination, shrNomination)) it("should allow the housemates to nominate each other") { attempt(Say( Nominate( __new = Option(Nominator().name += "leonardo"), _reason = "He is smelly"), leoAtHouse, shrNomination)) attempt(Say( Nominate( __new = Option(Nominator().name += "raphael"), _reason = "Because yes"), rapAtHouse, shrNomination)) attempt(Say( Nominate( __new = Option(Nominator().name += "redRanger"), _reason = "Because of an agreement"), redAtHouse, shrNomination)) val NextState(obtained) = attempt(Say( Nominate( __new = Option(Nominator().name += "shredder"), _reason = "I hate red"), shrAtHouse, redNomination)) reset(for { sea <- Initiate(BigBrothapp()) hou <- Initiate(House(), sea) aud <- Initiate(Audience(), sea) leonardo <- Play(Contestant().name += "leonardo", sea) raphael <- Play(Contestant().name += "raphael", sea) redRanger <- Play(Contestant().name += "redRanger", sea) shredder <- Play(Contestant().name += "shredder", sea) brotha <- Play(BigBrotha(), hou) leoAtHouse <- Play(Housemate().name += "leonardo", leonardo, hou) rapAtHouse <- Play(Housemate().name += "raphael", raphael, hou) redAtHouse <- Play(Housemate().name += "redRanger", redRanger, hou) shrAtHouse <- Play(Housemate().name += "shredder", shredder, hou) _ <- Play(Viewer(), aud) _ <- Play(Viewer(), aud) _ <- Play(Viewer(), aud) su <- Say( EvictionSetUp(__new = Option(Eviction().substatus += Nominating)), brotha, hou) _ <- Done(su, PERFORMED) evict <- Initiate(Eviction().substatus += Nominating, hou) leoNomination <- Initiate(Nomination().name += "leonardo", evict) leoAsNominee <- Play3(Nominee().name += "leonardo", leoAtHouse, leoNomination) rapNomination <- Initiate(Nomination().name += "raphael", evict) rapAsNominee <- Play3(Nominee().name += "raphael", rapAtHouse, rapNomination) redNomination <- Initiate(Nomination().name += "redRanger", evict) redAsNominee <- Play3(Nominee().name += "redRanger", redAtHouse, redNomination) shrNomination <- Initiate(Nomination().name += "shredder", evict) shrAsNominee <- Play3(Nominee().name += "shredder", shrAtHouse, shrNomination) _ <- Abandon(leoAsNominee) _ <- Abandon(rapAsNominee) _ <- Finish(leoNomination) _ <- Finish(rapNomination) _ <- Let(evict.substatus += Polling) } yield ( leoAtHouse, rapAtHouse, redAtHouse, shrAtHouse, leoNomination, rapNomination, redNomination, shrNomination)) obtained should be(getState) } } }
hablapps/app-bigbrothapp
src/test/scala/org/hablapps/bigbrothapp/test/Nominating.scala
Scala
apache-2.0
6,164
package recfun import common._ object Main { def main(args: Array[String]) { println("Pascal's Triangle") for (row <- 0 to 10) { for (col <- 0 to row) print(pascal(col, row) + " ") println() } } def min(xs: List[Int]): Int = try { if (xs.head < min(xs.tail)) xs.head else min(xs.tail) } catch { case e: java.util.NoSuchElementException => xs.head } /** * Exercise 1 */ def pascal(c: Int, r: Int): Int = if (r < 0 || c < 0 || c > r) throw new java.util.NoSuchElementException else if (r == 0 || c == 0 || c == r) 1 else pascal(c - 1, r - 1) + pascal(c, r - 1) /** * Exercise 2 */ def balance(chars: List[Char]): Boolean = innerBalance(chars, 0) def innerBalance(chars: List[Char], opens: Int): Boolean = if (chars.isEmpty && opens == 0) true else if ((chars.isEmpty && opens > 0) || opens < 0) false else if (chars.head == ')') innerBalance(chars.tail, opens-1) else if (chars.head == '(') innerBalance(chars.tail, opens+1) else innerBalance(chars.tail, opens) /** * Exercise 3 */ def countChange(money: Int, coins: List[Int]): Int = if (money <= 0 || coins.isEmpty) 0 else innerCountChange(money, coins, 0) def innerCountChange(money: Int, coins: List[Int], value: Int): Int = if (coins.isEmpty) 0 else if (money == 0) 1 else if (money < coins.head) innerCountChange(money, coins.tail, value) else innerCountChange(money - coins.head, coins, value) + innerCountChange(money, coins.tail, value+1) }
tuohis/functional_programming
recfun/src/main/scala/recfun/Main.scala
Scala
mit
1,544
package com.acme.sausage import java.time.Instant case class S3Location(bucket: String, name: String) sealed trait Event { val when: Instant = Instant.now() } case class Requested() extends Event case class Fetched(source: S3Location, directory: String) extends Event case class Ran(command: Seq[String], cwd: String, output: String) extends Event case class Published(destination: S3Location) extends Event case class Cleaned() extends Event case class Failed(message: String, reason: String, cleanup: Boolean) extends Event object Failed { def apply(message: String, exception: Exception): Failed = Failed(message, exception.getMessage, true) def apply(message: String, exception: Exception, cleanup: Boolean): Failed = Failed(message, exception.getMessage, cleanup) }
rizsotto/Sausage
src/main/scala/com/acme/sausage/Event.scala
Scala
bsd-3-clause
782
package tomby.scala.bricks case class Position(x: Int, y: Int) { def neighbors: Set[Position] = Set(up, down, right, left) def up: Position = { Position(x, y + 1) } def down: Position = { Position(x, y - 1) } def right: Position = { Position(x + 1, y) } def left: Position = { Position(x - 1, y) } def distance(other: Position): Double = { val diffx = x - other.x val diffy = y - other.y math.sqrt(math.pow(diffx, 2) + math.pow(diffy, 2)) } def adjacent(other: Position): Boolean = { distance(other) == 1.0 } }
tonivade/scala-bricks
src/main/scala/tomby/scala/bricks/Position.scala
Scala
mit
586
/* * Copyright 2011-2014 Chris de Vreeze * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.ebpi.yaidom.convert /** * Conversions between yaidom nodes and Scala XML nodes. * * These conversions are handy when one wants to use XML literals (as offered by standard Scala XML) in combination with yaidom. * * Example usage: * {{{ * val scalaXmlElem = <a xmlns="http://a"><b><c>test</c></b></a> * * val elem = ScalaXmlConversions.convertToElem(scalaXmlElem) * * useImmutableElem(elem) * }}} * * See [[nl.ebpi.yaidom.convert.YaidomToScalaXmlConversions]] and in particular [[nl.ebpi.yaidom.convert.ScalaXmlToYaidomConversions]] * for some pitfalls and peculiarities when using these conversions. * * @author Chris de Vreeze */ object ScalaXmlConversions extends YaidomToScalaXmlConversions with ScalaXmlToYaidomConversions
EBPI/yaidom
src/main/scala/nl/ebpi/yaidom/convert/ScalaXmlConversions.scala
Scala
apache-2.0
1,357
import sbt._, Keys._ import sbt.internal.SessionSettings object Common { lazy val k1 = taskKey[Unit]("") lazy val k2 = taskKey[Unit]("") lazy val k3 = taskKey[Unit]("") lazy val k4 = taskKey[Unit]("") val UpdateK1 = Command.command("UpdateK1") { st: State => val ex = Project extract st import ex._ val session2 = BuiltinCommands.setThis(st, ex, Seq(k1 := {}), """k1 := { |// |// |}""".stripMargin).session val st1 = BuiltinCommands.reapply(session2, structure, st) // SessionSettings.writeSettings(ex.currentRef, session2, ex.session.original, ex.structure) SessionSettings.saveAllSettings(st1) } val UpdateK3 = Command.command("UpdateK3") { st: State => val ex = Project extract st import ex._ val session2 = BuiltinCommands.setThis(st, ex, Seq(k3 := {}), """k3 := { |// |// |}""".stripMargin).session val st1 = BuiltinCommands.reapply(session2, structure, st) // SessionSettings.writeSettings(ex.currentRef, session2, ex.session.original, ex.structure) SessionSettings.saveAllSettings(st1) } } // vim: set ts=4 sw=4 et:
Duhemm/sbt
sbt/src/sbt-test/project/session-update-from-cmd/project/Common.scala
Scala
bsd-3-clause
1,309
/* * Longest Collatz Sequence * ======================== * The following iterative sequence is defined for the set of positive * integers: * * n → n/2 (n is even) * n → 3n + 1 (n is odd) * * Using the rule above and starting with 13, we generate the following * sequence: * * 13 → 40 → 20 → 10 → 5 → 16 → 8 → 4 → 2 → 1 * * It can be seen that this sequence (starting at 13 and finishing at 1) * contains 10 terms. Although it has not been proved yet (Collatz Problem), * it is thought that all starting numbers finish at 1. * * Which starting number, under one million, produces the longest chain? * * NOTE: Once the chain starts the terms are allowed to go above one million. */ import scala.collection.mutable.ArrayDeque implicit class Int2Divides(d: Int) { def divides(n: Long): Boolean = n % d == 0 } implicit class Long2IsEven(n: Long) { lazy val isEven: Boolean = 2 divides n } def nextCollatz(n: Long): Long = { if (n.isEven) { n/2 } else { 3*n + 1 } } def maxChain(length: Int): Int = { // // Reuse previous computations for values less than current n. // val chains: Array[Int] = Array.fill(length) { 0 } chains(1) = 1 (2 to chains.length) map { n => // Calculate the chain, until we reach a value previously calculated. val chain = ArrayDeque[Long]() var current: Long = n // Note that chains can become larger than `length`. while (current >= chains.length || chains(current.toInt) == 0) { // Push current onto chain and update for next in Collatz chain. chain += current current = nextCollatz(current) } // Find the chain lengths for each new number in the chain. i.e. want to // add all newly learned partial chain lengths from this sequence instead // of just a single length for n. var length = chains(current.toInt) chain.reverse foreach { n => length += 1 if (n < chains.length) { chains(n.toInt) = length } } } val (_, maxIndex) = chains.zipWithIndex.maxBy(_._1) maxIndex } val answer = maxChain(1_000_000) // = 837,799 println(answer)
daithiocrualaoich/euler
scala/014.scala
Scala
apache-2.0
2,147
package play.boilerplate.generators.security import io.swagger.models.{Operation => SwaggerOperation} import play.boilerplate.generators.injection.InjectionProvider.Dependency import play.boilerplate.parser.model.SecurityRequirement import treehugger.forest._ import treehuggerDSL._ import scala.collection.JavaConverters._ trait SecurityProvider { import SecurityProvider._ def securitySchema: String def controllerImports: Seq[Import] def controllerParents: Seq[Type] def controllerSelfTypes: Seq[Type] def controllerDependencies: Seq[Dependency] def serviceImports: Seq[Import] def getActionSecurity(security: Seq[SecurityRequirement]): ActionSecurity } object SecurityProvider { trait ActionSecurity { def actionMethod(parser: Tree): Tree def securityValues: Seq[(String, ValDef)] def securityParams: Seq[(String, Type)] def securityDocs : Seq[(String, String)] def securityParamsDef: Iterable[ValDef] = securityParams.map { case (name, tpe) => PARAM(name, tpe).empty } } object WithoutSecurity extends ActionSecurity { override def actionMethod(parser: Tree): Tree = REF("Action.async") APPLY parser override val securityValues: Seq[(String, ValDef)] = Nil override val securityParams: Seq[(String, Type)] = Nil override val securityDocs : Seq[(String, String)] = Nil } def default: SecurityProvider = new SecurityProvider { override val securitySchema: String = "none" override val controllerSelfTypes: Seq[Type] = Nil override val controllerImports: Seq[Import] = Nil override val controllerParents: Seq[Type] = Nil override val controllerDependencies: Seq[Dependency] = Nil override val serviceImports: Seq[Import] = Nil override def getActionSecurity(security: Seq[SecurityRequirement]): ActionSecurity = WithoutSecurity } def getOperationSecurity(operation: SwaggerOperation): Option[Seq[SecurityRequirement]] = { Option(operation.getSecurity).map { security => for { auth <- security.asScala (name, scopes) <- auth.asScala } yield SecurityRequirement(name, scopes.asScala.toIndexedSeq) } } def parseAction(operation: SwaggerOperation, provider: SecurityProvider): ActionSecurity = { getOperationSecurity(operation).map(provider.getActionSecurity).getOrElse(WithoutSecurity) } case class SecurityScope(s: String) { val scope: String = s.split(':').head val values: Seq[String] = for (i <- s.split(':').tail; v <- i.split(',')) yield v } abstract class DefaultSecurity(override val securitySchema: String) extends SecurityProvider { def composeActionSecurity(scopes: Seq[SecurityScope]): ActionSecurity override def getActionSecurity(security: Seq[SecurityRequirement]): ActionSecurity = { security.find(_.schemaName == securitySchema) match { case Some(SecurityRequirement(_, scopes)) => composeActionSecurity(scopes.toIndexedSeq.map(SecurityScope.apply)) case None => WithoutSecurity } } } }
Romastyi/sbt-play-boilerplate
sbt-plugin/lib/src/main/scala/play/boilerplate/generators/security/SecurityProvider.scala
Scala
apache-2.0
3,056
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_13.scalatest3_0_8 import org.jetbrains.plugins.scala.testingSupport.scalatest.ScalaTestSelectedTests class Scalatest2_13_3_0_8_SelectedTestsTest extends Scalatest2_13_3_0_8_Base with ScalaTestSelectedTests
JetBrains/intellij-scala
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_13/scalatest3_0_8/Scalatest2_13_3_0_8_SelectedTestsTest.scala
Scala
apache-2.0
276
package ru.maizy.ambient7.core.data /** * Copyright (c) Nikita Kovaliov, maizy.ru, 2017 * See LICENSE.txt for details. */ trait Device { def deviceType: DeviceType.Type def id: String }
maizy/ambient7
core/src/main/scala/ru/maizy/ambient7/core/data/Device.scala
Scala
apache-2.0
194
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.storage import java.io.{BufferedOutputStream, FileOutputStream, File, OutputStream} import java.nio.channels.FileChannel import org.apache.spark.Logging import org.apache.spark.serializer.{SerializerInstance, SerializationStream} import org.apache.spark.executor.ShuffleWriteMetrics import org.apache.spark.util.Utils /** * An interface for writing JVM objects to some underlying storage. This interface allows * appending data to an existing block, and can guarantee atomicity in the case of faults * as it allows the caller to revert partial writes. * * This interface does not support concurrent writes. Also, once the writer has * been opened, it cannot be reopened again. */ private[spark] abstract class BlockObjectWriter(val blockId: BlockId) extends OutputStream { def open(): BlockObjectWriter def close() def isOpen: Boolean /** * Flush the partial writes and commit them as a single atomic block. */ def commitAndClose(): Unit /** * Reverts writes that haven't been flushed yet. Callers should invoke this function * when there are runtime exceptions. This method will not throw, though it may be * unsuccessful in truncating written data. */ def revertPartialWritesAndClose() /** * Writes a key-value pair. */ def write(key: Any, value: Any) /** * Notify the writer that a record worth of bytes has been written with OutputStream#write. */ def recordWritten() /** * Returns the file segment of committed data that this Writer has written. * This is only valid after commitAndClose() has been called. */ def fileSegment(): FileSegment } /** * BlockObjectWriter which writes directly to a file on disk. Appends to the given file. */ private[spark] class DiskBlockObjectWriter( blockId: BlockId, file: File, serializerInstance: SerializerInstance, bufferSize: Int, compressStream: OutputStream => OutputStream, syncWrites: Boolean, // These write metrics concurrently shared with other active BlockObjectWriter's who // are themselves performing writes. All updates must be relative. writeMetrics: ShuffleWriteMetrics) extends BlockObjectWriter(blockId) with Logging { /** The file channel, used for repositioning / truncating the file. */ private var channel: FileChannel = null private var bs: OutputStream = null private var fos: FileOutputStream = null private var ts: TimeTrackingOutputStream = null private var objOut: SerializationStream = null private var initialized = false private var hasBeenClosed = false /** * Cursors used to represent positions in the file. * * xxxxxxxx|--------|--- | * ^ ^ ^ * | | finalPosition * | reportedPosition * initialPosition * * initialPosition: Offset in the file where we start writing. Immutable. * reportedPosition: Position at the time of the last update to the write metrics. * finalPosition: Offset where we stopped writing. Set on closeAndCommit() then never changed. * -----: Current writes to the underlying file. * xxxxx: Existing contents of the file. */ private val initialPosition = file.length() private var finalPosition: Long = -1 private var reportedPosition = initialPosition /** * Keep track of number of records written and also use this to periodically * output bytes written since the latter is expensive to do for each record. */ private var numRecordsWritten = 0 override def open(): BlockObjectWriter = { if (hasBeenClosed) { throw new IllegalStateException("Writer already closed. Cannot be reopened.") } fos = new FileOutputStream(file, true) ts = new TimeTrackingOutputStream(writeMetrics, fos) channel = fos.getChannel() bs = compressStream(new BufferedOutputStream(ts, bufferSize)) objOut = serializerInstance.serializeStream(bs) initialized = true this } override def close() { if (initialized) { Utils.tryWithSafeFinally { if (syncWrites) { // Force outstanding writes to disk and track how long it takes objOut.flush() val start = System.nanoTime() fos.getFD.sync() writeMetrics.incShuffleWriteTime(System.nanoTime() - start) } } { objOut.close() } channel = null bs = null fos = null ts = null objOut = null initialized = false hasBeenClosed = true } } override def isOpen: Boolean = objOut != null override def commitAndClose(): Unit = { if (initialized) { // NOTE: Because Kryo doesn't flush the underlying stream we explicitly flush both the // serializer stream and the lower level stream. objOut.flush() bs.flush() close() } finalPosition = file.length() // In certain compression codecs, more bytes are written after close() is called writeMetrics.incShuffleBytesWritten(finalPosition - reportedPosition) } // Discard current writes. We do this by flushing the outstanding writes and then // truncating the file to its initial position. override def revertPartialWritesAndClose() { try { writeMetrics.decShuffleBytesWritten(reportedPosition - initialPosition) writeMetrics.decShuffleRecordsWritten(numRecordsWritten) if (initialized) { objOut.flush() bs.flush() close() } val truncateStream = new FileOutputStream(file, true) try { truncateStream.getChannel.truncate(initialPosition) } finally { truncateStream.close() } } catch { case e: Exception => logError("Uncaught exception while reverting partial writes to file " + file, e) } } override def write(key: Any, value: Any) { if (!initialized) { open() } objOut.writeKey(key) objOut.writeValue(value) recordWritten() } override def write(b: Int): Unit = throw new UnsupportedOperationException() override def write(kvBytes: Array[Byte], offs: Int, len: Int): Unit = { if (!initialized) { open() } bs.write(kvBytes, offs, len) } override def recordWritten(): Unit = { numRecordsWritten += 1 writeMetrics.incShuffleRecordsWritten(1) if (numRecordsWritten % 32 == 0) { updateBytesWritten() } } override def fileSegment(): FileSegment = { new FileSegment(file, initialPosition, finalPosition - initialPosition) } /** * Report the number of bytes written in this writer's shuffle write metrics. * Note that this is only valid before the underlying streams are closed. */ private def updateBytesWritten() { val pos = channel.position() writeMetrics.incShuffleBytesWritten(pos - reportedPosition) reportedPosition = pos } // For testing private[spark] override def flush() { objOut.flush() bs.flush() } }
andrewor14/iolap
core/src/main/scala/org/apache/spark/storage/BlockObjectWriter.scala
Scala
apache-2.0
7,774
package uk.co.morleydev.zander.client.validator.exception class NoLocalArtefactsExistException extends RuntimeException
MorleyDev/zander.client
src/main/scala/uk/co/morleydev/zander/client/validator/exception/NoLocalArtefactsExistException.scala
Scala
mit
121
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs // Licence: http://www.gnu.org/licenses/gpl-3.0.en.html package org.ensime.indexer import org.objectweb.asm.Opcodes._ import scala.collection.immutable.Queue sealed trait Access case object Public extends Access case object Default extends Access case object Protected extends Access case object Private extends Access object Access { def apply(code: Int): Access = if ((ACC_PUBLIC & code) > 0) Public else if ((ACC_PROTECTED & code) > 0) Protected else if ((ACC_PRIVATE & code) > 0) Private else Default } sealed trait FullyQualifiedName { def contains(o: FullyQualifiedName): Boolean def fqnString: String } case class PackageName(path: List[String]) extends FullyQualifiedName { def contains(o: FullyQualifiedName) = o match { case PackageName(pn) => pn.startsWith(path) case ClassName(p, _) => contains(p) case MemberName(c, _) => contains(c) } def fqnString = path.mkString(".") def parent = PackageName(path.init) } case class ClassName(pack: PackageName, name: String) extends FullyQualifiedName with DescriptorType { def contains(o: FullyQualifiedName) = o match { case ClassName(op, on) if pack == op & on.startsWith(name) => (on == name) || on.startsWith(name + "$") case MemberName(cn, _) => contains(cn) case _ => false } def fqnString = if (pack.path.isEmpty) name else ClassName.cleanupPackage(pack.fqnString + "." + name) def internalString = "L" + (if (pack.path.isEmpty) name else pack.path.mkString("/") + "/" + name) + ";" } object ClassName { private val Root = PackageName(Nil) // we consider Primitives to be ClassNames private def Primitive(name: String, desc: String): ClassName = new ClassName(Root, name) { override def fqnString = name override def internalString = desc } val PrimitiveBoolean = Primitive("boolean", "Z") val PrimitiveByte = Primitive("byte", "B") val PrimitiveChar = Primitive("char", "C") val PrimitiveShort = Primitive("short", "S") val PrimitiveInt = Primitive("int", "I") val PrimitiveLong = Primitive("long", "J") val PrimitiveFloat = Primitive("float", "F") val PrimitiveDouble = Primitive("double", "D") val PrimitiveVoid = Primitive("void", "V") // must be a single type descriptor // strips array reification def fromDescriptor(desc: String): ClassName = DescriptorParser.parseType(desc) match { case c: ClassName => c case a: ArrayDescriptor => a.reifier } // internal name is effectively the FQN with / instead of dots def fromInternal(internal: String): ClassName = { val parts = internal.split("/") val (before, after) = parts.splitAt(parts.length - 1) ClassName(PackageName(before.toList), after(0)) } def cleanupPackage(name: String): String = { name.replaceAll("\\\\.package\\\\$?\\\\.", ".") .replaceAll("\\\\.package\\\\$(?!$)", ".") .replaceAll("\\\\.package$", ".package\\\\$") } } case class MemberName( owner: ClassName, name: String ) extends FullyQualifiedName { def contains(o: FullyQualifiedName) = this == o def fqnString = ClassName.cleanupPackage(owner.fqnString + "." + name) } sealed trait DescriptorType { def internalString: String } case class ArrayDescriptor(fqn: DescriptorType) extends DescriptorType { def reifier: ClassName = fqn match { case c: ClassName => c case a: ArrayDescriptor => a.reifier } def internalString = "[" + fqn.internalString } case class Descriptor(params: List[DescriptorType], ret: DescriptorType) { def descriptorString = "(" + params.map(_.internalString).mkString("") + ")" + ret.internalString } // TODO: replace generics Strings with domain objects case class RawClassfile( name: ClassName, generics: Option[String], superClass: Option[ClassName], interfaces: List[ClassName], access: Access, deprecated: Boolean, fields: Queue[RawField], methods: Queue[RawMethod], source: RawSource ) case class RawSource( filename: Option[String], line: Option[Int] ) case class RawType( fqn: String, access: Access ) { def fqnString = ClassName.cleanupPackage(fqn) } case class RawField( name: MemberName, clazz: ClassName, generics: Option[String], access: Access ) case class RawMethod( name: MemberName, access: Access, descriptor: Descriptor, generics: Option[String], line: Option[Int] )
j-mckitrick/ensime-sbt
src/sbt-test/ensime-sbt/ensime-server/core/src/main/scala/org/ensime/indexer/domain.scala
Scala
apache-2.0
4,435
package reswing import scala.swing.{Component, LayoutContainer} import scala.swing.event.{ComponentAdded, ComponentRemoved} trait ReLayoutContainer[Constraints] extends ReUIElement { protected def peer: LayoutContainer private val peerLayout = peer.layout.asInstanceOf[ scala.collection.mutable.Map[Component, Constraints] ] private def peerContents = peerLayout.toMap private def peerContents_=(components: Map[Component, Constraints]): Unit = { peerLayout.clear() peerLayout ++= components peer.repaint() peer.peer.validate() } def contents: ReSwingValue[Map[Component, Constraints]] contents.using({ () => peerContents }, peerContents_= _, classOf[ComponentAdded], classOf[ComponentRemoved]) object layout { def update(component: Component, constraints: Constraints) = peerContents += (component -> constraints) } } object ReLayoutContainer { implicit def toLayoutContainer(component: ReLayoutContainer[_]): LayoutContainer = component.peer }
guidosalva/REScala
Code/Extensions/RESwing/src/main/scala/reswing/ReLayoutContainer.scala
Scala
apache-2.0
1,011
/* * Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com> */ package play.api.libs.ws.ahc import java.net.URI import akka.stream.scaladsl.Source import akka.util.ByteString import play.api.libs.ws.{ WSBody, _ } import play.api.mvc.MultipartFormData import play.core.formatters.Multipart import scala.concurrent.Future import scala.concurrent.duration.Duration /** * A WS Request backed by AsyncHTTPClient. */ case class AhcWSRequest(underlying: StandaloneAhcWSRequest) extends WSRequest { override type Self = WSRequest override type Response = WSResponse /** * The URI for this request */ override def uri: URI = underlying.uri /** * The base URL for this request */ override def url: String = underlying.url /** * The method for this request */ override def method: String = underlying.method /** * The body of this request */ override def body: WSBody = underlying.body /** * The headers for this request */ override def headers: Map[String, Seq[String]] = underlying.headers /** * The cookies for this request */ override def cookies: Seq[WSCookie] = underlying.cookies /** * The query string for this request */ override def queryString: Map[String, Seq[String]] = underlying.queryString /** * A calculator of the signature for this request */ override def calc: Option[WSSignatureCalculator] = underlying.calc /** * The authentication this request should use */ override def auth: Option[(String, String, WSAuthScheme)] = underlying.auth /** * Whether this request should follow redirects */ override def followRedirects: Option[Boolean] = underlying.followRedirects /** * The timeout for the request */ override def requestTimeout: Option[Int] = underlying.requestTimeout /** * The virtual host this request will use */ override def virtualHost: Option[String] = underlying.virtualHost /** * The proxy server this request will use */ override def proxyServer: Option[WSProxyServer] = underlying.proxyServer override def contentType: Option[String] = underlying.contentType override def sign(calc: WSSignatureCalculator): Self = toWSRequest { underlying.sign(calc) } override def withCookies(cookies: WSCookie*): WSRequest = toWSRequest { underlying.withCookies(cookies: _*) } override def withQueryStringParameters(parameters: (String, String)*): WSRequest = toWSRequest { underlying.withQueryStringParameters(parameters: _*) } override def withAuth(username: String, password: String, scheme: WSAuthScheme): Self = toWSRequest { underlying.withAuth(username, password, scheme) } override def withHeaders(hdrs: (String, String)*): Self = toWSRequest { underlying.withHeaders(hdrs: _*) } override def withHttpHeaders(headers: (String, String)*): WSRequest = toWSRequest { underlying.withHttpHeaders(headers: _*) } override def withQueryString(parameters: (String, String)*): Self = toWSRequest { underlying.withQueryString(parameters: _*) } override def withFollowRedirects(follow: Boolean): Self = toWSRequest { underlying.withFollowRedirects(follow) } override def withRequestTimeout(timeout: Duration): Self = toWSRequest { underlying.withRequestTimeout(timeout) } override def withRequestFilter(filter: WSRequestFilter): WSRequest = toWSRequest { underlying.withRequestFilter(filter.asInstanceOf[WSRequestFilter]) } override def withVirtualHost(vh: String): Self = toWSRequest { underlying.withVirtualHost(vh) } override def withProxyServer(proxyServer: WSProxyServer): Self = toWSRequest { underlying.withProxyServer(proxyServer) } override def withMethod(method: String): Self = toWSRequest { underlying.withMethod(method) } override def withBody(body: WSBody): Self = toWSRequest { underlying.withBody(body) } override def withBody(file: java.io.File): Self = toWSRequest(underlying.withBody(file)) override def withBody[T: BodyWritable](body: T): Self = toWSRequest(underlying.withBody(body)) /** * Sets a multipart body for this request */ override def withBody(body: Source[MultipartFormData.Part[Source[ByteString, _]], _]): Self = { val boundary = Multipart.randomBoundary() val contentType = s"multipart/form-data; boundary=$boundary" withBody(StreamedBody(Multipart.transform(body, boundary))).withHeaders("Content-Type" -> contentType) } override def delete(): Future[Response] = execute("DELETE") override def get(): Future[Response] = execute("GET") override def head(): Future[Response] = execute("HEAD") override def options(): Future[Response] = execute("OPTIONS") /** * Perform a PATCH on the request asynchronously. */ override def patch(body: Source[MultipartFormData.Part[Source[ByteString, _]], _]): Future[Response] = { withBody(body).execute("PATCH") } override def patch(file: java.io.File): Future[Response] = withBody(file).execute("PATCH") override def patch[T: BodyWritable](body: T): Future[Response] = withBody(body).execute("PATCH") /** * Perform a POST on the request asynchronously. */ override def post(body: Source[MultipartFormData.Part[Source[ByteString, _]], _]): Future[Response] = withBody(body).execute("POST") override def post(body: java.io.File): Future[Response] = withBody(body).execute("POST") override def post[T: BodyWritable](body: T): Future[Response] = withBody(body).execute("POST") /** * Perform a PUT on the request asynchronously. */ override def put(body: Source[MultipartFormData.Part[Source[ByteString, _]], _]): Future[Response] = withBody(body).execute("PUT") override def put[T: BodyWritable](body: T): Future[Response] = withBody(body).execute("PUT") override def put(body: java.io.File): Future[Response] = withBody(body).execute("PUT") def stream(): Future[StreamedResponse] = underlying.stream() def execute(method: String): Future[Response] = { withMethod(method).execute() } override def execute(): Future[Response] = { val futureResponse: Future[StandaloneWSResponse] = underlying.execute() futureResponse.map { f => AhcWSResponse(f.asInstanceOf[StandaloneAhcWSResponse]) }(play.core.Execution.trampoline) } private def toWSRequest(request: StandaloneWSRequest): Self = { AhcWSRequest(request.asInstanceOf[StandaloneAhcWSRequest]) } }
ktoso/playframework
framework/src/play-ahc-ws/src/main/scala/play/api/libs/ws/ahc/AhcWSRequest.scala
Scala
apache-2.0
6,454
package com.qingstor.sdk.util import java.time._ import org.scalatest.FunSuite class TimeUtilTest extends FunSuite{ test("ZonedDateTime to String test") { val time = ZonedDateTime.of(2017, 2, 17, 10, 0, 0, 0, ZoneId.of("Asia/Shanghai")) val timeString = "Fri, 17 Feb 2017 02:00:00 GMT" assert(timeString == TimeUtil.zonedDateTimeToString(time)) } test("String to ZonedDateTime test") { val timeString = "Fri, 17 Feb 2017 10:00:00 GMT" val time = ZonedDateTime.of(2017, 2, 17, 10, 0, 0, 0, ZoneOffset.UTC) assert(time == TimeUtil.stringToZonedDateTime(timeString)) } test("String to unix int test") { val timeString = "Fri, 17 Feb 2017 10:00:00 GMT" assert(1487325600000L == TimeUtil.stringToUnixInt(timeString)) } }
cheerx/qingstor-sdk-scala
src/test/scala/com/qingstor/sdk/util/TimeUtilTest.scala
Scala
apache-2.0
765
package com.github.lavrov.xml.reader import cats.{Applicative, Show} import cats.instances.list._ case class XmlPath(path: List[String]) { def \\ (child: String) = copy(path :+ child) def read[A](implicit reader: Reader[A]) = Reader( nodeSeq => reader.run(path.foldLeft(nodeSeq)(_ \\ _)) .leftMap( _.map( readerError => readerError.copy( path = XmlPath(path ++ readerError.path.path)))) ) def list[A](implicit reader: Reader[A]) = read( Reader( nodeSeq => Applicative[Result].sequence(nodeSeq.toList map reader.run) ) ) def optional[A](implicit reader: Reader[A]): Reader[Option[A]] = list[A].andThen(r => valid(r.headOption)) def first[A](implicit reader: Reader[A]): Reader[A] = optional[A].andThen(_ map valid getOrElse invalid(s"node not found", this)) @inline def apply[A](implicit reader: Reader[A]) = read[A] } object XmlPath { val __ = XmlPath(Nil) implicit val xmlPathShow: Show[XmlPath] = Show.show( path => path.path.mkString("__\\\\", "\\\\", "") ) }
lavrov/xml-reader
src/main/scala/com/github/lavrov/xml/reader/XmlPath.scala
Scala
mit
1,064
package slash_actions import models._ object Start extends SlashAction { def execute( votingSession:Option[VotingSession], username:String, data:String):Option[String] = { val ticketDescription = data VotingSession.destroyCurrent startNewSession(ticketDescription) sendVotingStartedMessage(ticketDescription) None } def startNewSession(ticketDescription:String) = { VotingSession.create(ticketDescription, Map[String, Int]()) } def sendVotingStartedMessage(ticketDescription:String) = { val message = s"<!here|here> Ticket $ticketDescription is ready for voting. " + "Please respond using `/chasm vote` to place your vote." slack.IncomingWebhookClient.postInChannel(message) } }
conor-pappas/chasm_bot
app/value_objects/slash_actions/Start.scala
Scala
mit
748
/** * Copyright (C) 2014 Kaj Magnus Lindberg (born 1979) * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License as * published by the Free Software Foundation, either version 3 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package debiki.dao import com.debiki.core._ import com.debiki.core.Prelude._ import debiki._ import java.{util => ju} import debiki.dao.CachingDao.CacheKey import SpecialContentPages._ /** Loads special content pages, e.g. a page with a user-content-license text * that can be included as a section on the terms-of-use page. */ trait SpecialContentDao { self: SiteDao => object specialContentPages { def termsOfUseContentLicense: String = { val content = loadSpecialContentPage(TermsOfUseContentLicenseId) getOrElse TermsOfUseContentLicense replaceNamesApplyMarkup(content) } def termsOfUseJurisdiction: String = { val content = loadSpecialContentPage(TermsOfUseJurisdictionId) getOrElse TermsOfUseJurisdiction replaceNamesApplyMarkup(content) } } protected def loadSpecialContentPage(pageId: PageId): Option[Content] = { loadPageBodiesTitles(pageId::Nil).headOption flatMap { case (pageId, pageParts) => pageParts.body map { body => // Return None so the caller fallbacks to the default content, if we are // to use the default content. if (body.currentText == SpecialContentPages.UseDefaultContentMark) return None // Special content pages are always auto approved, it's ok to use `currentText`. Content(text = body.currentText) } } } private def replaceNamesApplyMarkup(content: Content): String = { var text = content.text.replaceAllLiterally( "%{company_short_name}", self.loadWholeSiteSettings().companyShortName.value.toString) val nodeSeq = ReactRenderer.renderAndSanitizeCommonMark( text, allowClassIdDataAttrs = false, followLinks = false) nodeSeq.toString } } trait CachingSpecialContentDao extends SpecialContentDao { self: CachingSiteDao => onPageSaved { sitePageId => // if page id == some special content page id, uncache it. } // override def loadSpecialContentPage(...) ... }
debiki/debiki-server-old
app/debiki/dao/SpecialContentDao.scala
Scala
agpl-3.0
2,726
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License */ package org.apache.toree.kernel.api import java.io.{InputStream, PrintStream} import java.net.URI import java.util.concurrent.{ConcurrentHashMap, TimeUnit, TimeoutException} import scala.collection.mutable import com.typesafe.config.Config import org.apache.spark.api.java.JavaSparkContext import org.apache.spark.sql.SQLContext import org.apache.spark.{SparkConf, SparkContext} import org.apache.toree.annotations.Experimental import org.apache.toree.boot.layer.InterpreterManager import org.apache.toree.comm.CommManager import org.apache.toree.{ReflectionAccessor, global} import org.apache.toree.global.ExecuteRequestState import org.apache.toree.interpreter.Results.Result import org.apache.toree.interpreter._ import org.apache.toree.kernel.protocol.v5 import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader import org.apache.toree.kernel.protocol.v5.magic.MagicParser import org.apache.toree.kernel.protocol.v5.stream.KernelOutputStream import org.apache.toree.kernel.protocol.v5.{KMBuilder, KernelMessage, MIMEType} import org.apache.toree.magic.MagicManager import org.apache.toree.plugins.PluginManager import org.apache.toree.utils.LogLike import scala.language.dynamics import scala.reflect.runtime.universe._ import scala.util.DynamicVariable import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} /** * Represents the main kernel API to be used for interaction. * * @param _config The configuration used when starting the kernel * @param interpreterManager The interpreter manager to expose in this instance * @param comm The Comm manager to expose in this instance * @param actorLoader The actor loader to use for message relaying */ @Experimental class Kernel ( private val _config: Config, private val actorLoader: ActorLoader, val interpreterManager: InterpreterManager, val comm: CommManager, val pluginManager: PluginManager ) extends KernelLike with LogLike { /** * Jars that have been added to the kernel */ private val jars = new mutable.ArrayBuffer[URI]() override def addJars(uris: URI*): Unit = { uris.foreach { uri => if (uri.getScheme != "file") { throw new RuntimeException("Cannot add non-local jar: " + uri) } } jars ++= uris interpreter.addJars(uris.map(_.toURL):_*) uris.foreach(uri => sparkContext.addJar(uri.getPath)) } /** * Represents the current input stream used by the kernel for the specific * thread. */ private val currentInputStream = new DynamicVariable[InputStream](null) private val currentInputKernelMessage = new DynamicVariable[KernelMessage](null) /** * Represents the current output stream used by the kernel for the specific * thread. */ private val currentOutputStream = new DynamicVariable[PrintStream](null) private val currentOutputKernelMessage = new DynamicVariable[KernelMessage](null) /** * Represents the current error stream used by the kernel for the specific * thread. */ private val currentErrorStream = new DynamicVariable[PrintStream](null) private val currentErrorKernelMessage = new DynamicVariable[KernelMessage](null) /** * Represents magics available through the kernel. */ val magics = new MagicManager(pluginManager) /** * Represents magic parsing functionality. */ val magicParser = new MagicParser(magics) /** * Represents the data that can be shared using the kernel as the middleman. * * @note Using Java structure to enable other languages to have easy access! */ val data: java.util.Map[String, Any] = new ConcurrentHashMap[String, Any]() val interpreter = interpreterManager.defaultInterpreter.get /** * Handles the output of interpreting code. * * @param output the output of the interpreter * @return (success, message) or (failure, message) */ private def handleInterpreterOutput( output: (Result, Either[ExecuteOutput, ExecuteFailure]) ): (Boolean, ExecuteOutput) = { val (success, result) = output success match { case Results.Success => (true, result.left.get) case Results.Error => (false, Map("text/plain" -> result.right.getOrElse("").toString)) case Results.Aborted => (false, Map("text/plain" -> "Aborted!")) case Results.Incomplete => // If we get an incomplete it's most likely a syntax error, so // let the user know. (false, Map("text/plain" -> "Syntax Error!")) } } override def config:Config = { _config } /** * Executes a block of code represented as a string and returns the result. * * @param code The code as an option to execute * @return A tuple containing the result (true/false) and the output as a * string */ def eval(code: Option[String]): (Boolean, ExecuteOutput) = { code.map(c => { magicParser.parse(c) match { case Left(parsedCode) => val output = interpreter.interpret(parsedCode) handleInterpreterOutput(output) case Right(errMsg) => (false, Map("text/plain" -> errMsg)) } }).getOrElse((false, Map("text/plain" -> "Error!"))) } /** * Constructs a new instance of the stream methods using the latest * kernel message instance. * * @return The collection of stream methods */ override def stream: StreamMethods = stream() /** * Constructs a new instance of the stream methods using the specified * kernel message instance. * * @param parentMessage The message to serve as the parent of outgoing * messages sent as a result of using streaming methods * @return The collection of streaming methods */ private[toree] def stream( parentMessage: v5.KernelMessage = lastKernelMessage() ): StreamMethods = { new StreamMethods(actorLoader, parentMessage) } /** * Returns a collection of methods that can be used to display data from the * kernel to the client. * * @return The collection of display methods */ override def display: DisplayMethodsLike = display() /** * Constructs a new instance of the stream methods using the specified * kernel message instance. * * @param parentMessage The message to serve as the parent of outgoing * messages sent as a result of using streaming methods * @return The collection of streaming methods */ private[toree] def display( parentMessage: v5.KernelMessage = lastKernelMessage(), kmBuilder: v5.KMBuilder = v5.KMBuilder() ): DisplayMethods = { new DisplayMethods(actorLoader, parentMessage, kmBuilder) } /** * Constructs a new instance of the factory methods using the latest * kernel message instance. * * @return The collection of factory methods */ override def factory: FactoryMethods = factory() /** * Constructs a new instance of the factory methods using the specified * kernel message and kernel message builder. * * @param parentMessage The message to serve as the parent of outgoing * messages sent as a result of using an object created * by the factory methods * @param kmBuilder The builder to be used by objects created by factory * methods * @return The collection of factory methods */ private[toree] def factory( parentMessage: v5.KernelMessage = lastKernelMessage(), kmBuilder: v5.KMBuilder = v5.KMBuilder() ): FactoryMethods = { new FactoryMethods(_config, actorLoader, parentMessage, kmBuilder) } /** * Returns a print stream to be used for communication back to clients * via standard out. * * @return The print stream instance or an error if the stream info is * not found */ override def out: PrintStream = { val kernelMessage = lastKernelMessage() ReflectionAccessor.useReflection { constructStream(currentOutputStream, currentOutputKernelMessage, kernelMessage, { kernelMessage => val outputStream = this.factory(parentMessage = kernelMessage) .newKernelOutputStream("stdout") new PrintStream(outputStream) }) } } /** * Returns a print stream to be used for communication back to clients * via standard error. * * @return The print stream instance or an error if the stream info is * not found */ override def err: PrintStream = { val kernelMessage = lastKernelMessage() ReflectionAccessor.useReflection { constructStream(currentErrorStream, currentErrorKernelMessage, kernelMessage, { kernelMessage => val outputStream = this.factory(parentMessage = kernelMessage) .newKernelOutputStream("stderr") new PrintStream(outputStream) }) } } /** * Returns an input stream to be used to receive information from the client. * * @return The input stream instance or an error if the stream info is * not found */ override def in: InputStream = { val kernelMessage = lastKernelMessage() ReflectionAccessor.useReflection { constructStream(currentInputStream, currentInputKernelMessage, kernelMessage, { kernelMessage => this.factory(parentMessage = kernelMessage).newKernelInputStream() }) } } /** * Constructs or uses an existing stream. * * @param dynamicStream The DynamicVariable containing the stream to modify * or use * @param dynamicKernelMessage The DynamicVariable containing the KernelMessage to * check against the new KernelMessage * @param newKernelMessage The potentially-new KernelMessage * @param streamConstructionFunc The function used to create a new stream * @param typeTag The type information associated with the stream * @tparam T The stream type * @return The new stream or existing stream */ private def constructStream[T]( dynamicStream: DynamicVariable[T], dynamicKernelMessage: DynamicVariable[KernelMessage], newKernelMessage: KernelMessage, streamConstructionFunc: (KernelMessage) => T )(implicit typeTag: TypeTag[T]) = { // Update the stream being used only if the information has changed // or if the stream has not been initialized if (updateKernelMessage(dynamicKernelMessage, newKernelMessage) || dynamicStream.value == null) { logger.trace("Creating new kernel " + typeTag.tpe.toString + "!") dynamicStream.value = streamConstructionFunc(newKernelMessage) } dynamicStream.value } /** * Updates the last stream info returning the status of whether or not the * new stream info was different than the last stream info. * * @param dynamicKernelMessage The dynamic variable containing the current * stream info * @param kernelMessage The new stream info * @return True if the new stream info is different from the last (therefore * replaced), otherwise false */ private def updateKernelMessage( dynamicKernelMessage: DynamicVariable[KernelMessage], kernelMessage: KernelMessage ): Boolean = if (kernelMessage != null && !kernelMessage.equals(dynamicKernelMessage.value)) { dynamicKernelMessage.value = kernelMessage true } else { false } /** * Retrieves the last kernel message received by the kernel. * * @throws IllegalArgumentException If no kernel message has been received * @return The kernel message instance */ private def lastKernelMessage() = { val someKernelMessage = ExecuteRequestState.lastKernelMessage require(someKernelMessage.nonEmpty, "No kernel message received!") someKernelMessage.get } // TODO: Think of a better way to test without exposing this protected[toree] def createSparkConf(conf: SparkConf) = { if(conf.contains("spark.submit.deployMode")) { logger.info("Utilizing deploy mode: " + conf.get("spark.submit.deployMode")) } else { logger.info("Setting deployMode to client") conf.set("spark.submit.deployMode", "client") } conf } // TODO: Think of a better way to test without exposing this protected[toree] def initializeSparkContext(sparkConf: SparkConf): SparkContext = { logger.debug("Constructing new Spark Context") // TODO: Inject stream redirect headers in Spark dynamically var sparkContext: SparkContext = null val outStream = new KernelOutputStream( actorLoader, KMBuilder(), global.ScheduledTaskManager.instance, sendEmptyOutput = _config.getBoolean("send_empty_output") ) // Update global stream state and use it to set the Console local variables // for threads in the Spark threadpool global.StreamState.setStreams(System.in, outStream, outStream) global.StreamState.withStreams { sparkContext = new SparkContext(sparkConf) } sparkContext } protected[toree] def initializeSqlContext( sparkContext: SparkContext ): SQLContext = { val sqlContext: SQLContext = try { logger.info("Attempting to create Hive Context") val hiveContextClassString = "org.apache.spark.sql.hive.HiveContext" logger.debug(s"Looking up $hiveContextClassString") val hiveContextClass = Class.forName(hiveContextClassString) val sparkContextClass = classOf[SparkContext] val sparkContextClassName = sparkContextClass.getName logger.debug(s"Searching for constructor taking $sparkContextClassName") val hiveContextContructor = hiveContextClass.getConstructor(sparkContextClass) logger.debug("Invoking Hive Context constructor") hiveContextContructor.newInstance(sparkContext).asInstanceOf[SQLContext] } catch { case _: Throwable => logger.warn("Unable to create Hive Context! Defaulting to SQL Context!") new SQLContext(sparkContext) } sqlContext } override def interpreter(name: String): Option[Interpreter] = { interpreterManager.interpreters.get(name) } private lazy val defaultSparkConf: SparkConf = createSparkConf(new SparkConf()) override def sparkContext: SparkContext = { defaultSparkConf.getOption("spark.master") match { case Some(master) if !master.contains("local") => // when connecting to a remote cluster, the first call to getOrCreate // may create a session and take a long time, so this starts a future // to get the session. if it take longer than 100 ms, then print a // message to the user that Spark is starting. import scala.concurrent.ExecutionContext.Implicits.global val sessionFuture = Future { SparkContext.getOrCreate(defaultSparkConf) } try { Await.result(sessionFuture, Duration(100, TimeUnit.MILLISECONDS)) } catch { case timeout: TimeoutException => // getting the session is taking a long time, so assume that Spark // is starting and print a message display.content( MIMEType.PlainText, "Waiting for a Spark session to start...") Await.result(sessionFuture, Duration.Inf) } case _ => SparkContext.getOrCreate(defaultSparkConf) } } override def sparkConf: SparkConf = sparkContext.getConf override def javaSparkContext: JavaSparkContext = javaSparkContext(sparkContext) override def sqlContext: SQLContext = sqlContext(sparkContext) private val javaContexts = new mutable.WeakHashMap[SparkContext, JavaSparkContext] private def javaSparkContext(sparkContext: SparkContext): JavaSparkContext = { javaContexts.synchronized { javaContexts.getOrElseUpdate( sparkContext, new JavaSparkContext(sparkContext)) } } private val sqlContexts = new mutable.WeakHashMap[SparkContext, SQLContext] private def sqlContext(sparkContext: SparkContext): SQLContext = { sqlContexts.synchronized { sqlContexts.getOrElseUpdate( sparkContext, initializeSqlContext(sparkContext)) } } }
Myllyenko/incubator-toree
kernel/src/main/scala/org/apache/toree/kernel/api/Kernel.scala
Scala
apache-2.0
16,924
package com.databricks.spark.sql.perf.mllib.feature import org.apache.spark.ml import org.apache.spark.ml.PipelineStage import org.apache.spark.sql._ import com.databricks.spark.sql.perf.mllib.OptionImplicits._ import com.databricks.spark.sql.perf.mllib.data.DataGenerator import com.databricks.spark.sql.perf.mllib.{BenchmarkAlgorithm, MLBenchContext, TestFromTraining} /** Object for testing VectorSlicer performance */ object VectorSlicer extends BenchmarkAlgorithm with TestFromTraining { override def trainingDataSet(ctx: MLBenchContext): DataFrame = { import ctx.params._ DataGenerator.generateContinuousFeatures( ctx.sqlContext, numExamples, ctx.seed(), numPartitions, numFeatures ) } override def getPipelineStage(ctx: MLBenchContext): PipelineStage = { import ctx.params._ val indices = (0 until numFeatures by 2).toArray new ml.feature.VectorSlicer() .setInputCol("features") .setIndices(indices) } }
databricks/spark-sql-perf
src/main/scala/com/databricks/spark/sql/perf/mllib/feature/VectorSlicer.scala
Scala
apache-2.0
993
package akkaviz.frontend.components import rx.Var import scalatags.JsDom.all._ class SettingsTab( monitoringStatus: Var[MonitoringStatus], showUnconnected: Var[Boolean] ) extends Tab { val monitoringOnOff = new MonitoringOnOff(monitoringStatus) val unconnectedOnOff = new UnconnectedOnOff(showUnconnected) override def name: String = "Settings" override def tabId: String = "globalsettings" override def onCreate(): Unit = { monitoringOnOff.attach(tabBody) unconnectedOnOff.attach(tabBody) val graphSettings = div(id := "graphsettings").render tabBody.appendChild(graphSettings) } }
blstream/akka-viz
frontend/src/main/scala/akkaviz/frontend/components/SettingsTab.scala
Scala
mit
627
package utils import frequencycount.Item import scala.collection.immutable.IndexedSeq object Utils { def create(elements: Int, item: Item.Item): List[String] = { val seq: IndexedSeq[String] = for (i <- 1 to elements) yield { item.toString } seq.toList } }
mvogiatzis/freq-count
src/main/scala/Utils/Utils.scala
Scala
mit
283
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.json import com.fasterxml.jackson.core.{JsonParser, JsonToken} import org.apache.spark.sql.types._ object JacksonUtils { /** * Advance the parser until a null or a specific token is found */ def nextUntil(parser: JsonParser, stopOn: JsonToken): Boolean = { parser.nextToken() match { case null => false case x => x != stopOn } } /** * Verify if the schema is supported in JSON parsing. */ def verifySchema(schema: StructType): Unit = { def verifyType(name: String, dataType: DataType): Unit = dataType match { case NullType | BooleanType | ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType | StringType | TimestampType | DateType | BinaryType | _: DecimalType => case st: StructType => st.foreach(field => verifyType(field.name, field.dataType)) case at: ArrayType => verifyType(name, at.elementType) // For MapType, its keys are treated as a string (i.e. calling `toString`) basically when // generating JSON, so we only care if the values are valid for JSON. case mt: MapType => verifyType(name, mt.valueType) case udt: UserDefinedType[_] => verifyType(name, udt.sqlType) case _ => throw new UnsupportedOperationException( s"Unable to convert column $name of type ${dataType.simpleString} to JSON.") } schema.foreach(field => verifyType(field.name, field.dataType)) } }
bravo-zhang/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/json/JacksonUtils.scala
Scala
apache-2.0
2,276
package com.twitter.finagle.mysql import java.nio.charset.Charset import java.nio.charset.StandardCharsets.{UTF_8, ISO_8859_1, US_ASCII} object MysqlCharset { /** * Default Java Charset used by this client, UTF-8. */ val defaultCharset: Charset = UTF_8 /** * Converts from mysql charset to java charset. */ def apply(charset: Short): Charset = if (isUtf8(charset)) UTF_8 else if (isLatin1(charset)) ISO_8859_1 else if (isBinary(charset)) US_ASCII else throw new IllegalArgumentException("Charset %d is not supported.".format(charset)) /** * SELECT id,collation_name FROM information_schema.collations * WHERE `collation_name` LIKE 'latin1%' ORDER BY id; */ private[this] val Latin1Set = Set(5, 8, 15, 31, 47, 48, 49, 94) /** * "SELECT id,collation_name FROM information_schema.collations * WHERE collation_name LIKE '%utf8' ORDER BY id" */ private[this] val Utf8Set = Set(192 to 254: _*) + 33 + 45 + 46 + 83 /** * @see https://dev.mysql.com/doc/refman/5.7/en/charset-unicode-sets.html */ val Utf8_bin: Short = 83.toShort /** * @see https://dev.mysql.com/doc/refman/5.7/en/charset-unicode-sets.html */ val Utf8_general_ci: Short = 33.toShort /** * @see https://dev.mysql.com/doc/refman/5.7/en/charset-binary-set.html */ val Binary: Short = 63.toShort private[this] val CompatibleSet = Latin1Set ++ Utf8Set + Binary def isCompatible(code: Short): Boolean = CompatibleSet(code) def isUtf8(code: Short): Boolean = Utf8Set(code) def isLatin1(code: Short): Boolean = Latin1Set(code) def isBinary(code: Short): Boolean = code == Binary }
luciferous/finagle
finagle-mysql/src/main/scala/com/twitter/finagle/mysql/MysqlCharset.scala
Scala
apache-2.0
1,667
package euler object euler11 { var s = """08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48""" val array = s.split("\n").map(_.trim).map(_.split(" ").map(_.toInt)) def numXY(x: Int, y: Int): Int = if (array.isDefinedAt(x) && array(x).isDefinedAt(y)) array(x)(y) else 0 def productXY(x: Int, y: Int): Int = { val horizontal = (0 to 3).map(l => numXY(x+l, y)).product val vertical = (0 to 3).map(l => numXY(x, y+l)).product val up = (0 to 3).map(l => numXY(x+l, y+l)).product val down = (0 to 3).map(l => numXY(x+l, y-l)).product List(horizontal, vertical, up, down).max } val sol = (0 to 19).map(x => (0 to 19).map((x, _)).map(y => productXY(y._1, y._2))).flatten.max }
vramana/euler
src/main/scala/problem011.scala
Scala
mit
2,003
/** * Copyright 2015 ICT. * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cn.ac.ict.acs.netflow.query.master import akka.serialization.Serialization import cn.ac.ict.acs.netflow._ import cn.ac.ict.acs.netflow.ha._ class ZKRecoveryModeFactory(conf: NetFlowConf, serializer: Serialization) extends RecoveryModeFactory(conf, serializer) { def createPersistenceEngine() = new QueryMasterZKPersistenceEngine(conf, serializer) def createLeaderElectionAgent(master: LeaderElectable) = new ZooKeeperLeaderElectionAgent(master, conf) } class QueryMasterZKPersistenceEngine(conf: NetFlowConf, serialization: Serialization) extends ZooKeeperPersistenceEngine(conf, serialization) with MasterPersistenceEngine class QueryMasterBHPersistenceEngine extends BlackHolePersistenceEngine with MasterPersistenceEngine
ayscb/netflow
query/src/main/scala/cn/ac/ict/acs/netflow/query/master/ZKRecoveryModeFactory.scala
Scala
apache-2.0
1,579
package org.twitterReplica.scala class ScalaHelperHBase { } object ScalaHelperHBase { def getResults() : RDD[(Result)] = { function body return [expr] }
DaniUPC/near-image-replica-detection
src/main/scala/HBaseScalaHelper.scala
Scala
gpl-2.0
169
package tests import cdgp._ import fuel.util.{CollectorStdout, Options, Rng} import org.junit.Test import org.junit.Assert._ import swim.tree.Op object TestCDGPState { val scriptMax = """(set-logic LIA) (synth-fun max2 ((x Int) (y Int)) Int ((Start Int (x y 0 1 (+ Start Start) (- Start Start) (ite StartBool Start Start))) (StartBool Bool ((and StartBool StartBool) (or StartBool StartBool) (not StartBool) (<= Start Start) (= Start Start) (>= Start Start))))) (declare-var x Int) (declare-var y Int) (constraint (>= (max2 x y) x)) (constraint (>= (max2 x y) y)) (constraint (or (= x (max2 x y)) (= y (max2 x y)))) (check-synth)""" val scriptMaxRenamedVars = """(set-logic LIA) (synth-fun max2 ((a Int) (b Int)) Int ((Start Int (a b 0 1 (+ Start Start) (- Start Start) (ite StartBool Start Start))) (StartBool Bool ((and StartBool StartBool) (or StartBool StartBool) (not StartBool) (<= Start Start) (= Start Start) (>= Start Start))))) (declare-var x Int) (declare-var y Int) (constraint (>= (max2 x y) x)) (constraint (>= (max2 x y) y)) (constraint (or (= x (max2 x y)) (= y (max2 x y)))) (check-synth)""" val scriptPsuedoMaxRenamedVars = """(set-logic LIA) (synth-fun max2 ((a Int) (b Int)) Int ((Start Int (a b 0 1 (+ Start Start) (- Start Start) (ite StartBool Start Start))) (StartBool Bool ((and StartBool StartBool) (or StartBool StartBool) (not StartBool) (<= Start Start) (= Start Start) (>= Start Start))))) (declare-var x Int) (declare-var y Int) (constraint (>= (max2 x y) x)) (constraint (>= (max2 x y) y)) (check-synth)""" val scriptMaxFixedX = """(set-logic LIA) (synth-fun max2 ((argA Int) (argB Int)) Int ((Start Int (argA argB 0 1 (+ Start Start) (- Start Start) (ite StartBool Start Start))) (StartBool Bool ((and StartBool StartBool) (or StartBool StartBool) (not StartBool) (<= Start Start) (= Start Start) (>= Start Start))))) (declare-var y Int) (constraint (>= (max2 1 y) 1)) (constraint (>= (max2 1 y) y)) (constraint (or (= 1 (max2 1 y)) (= y (max2 1 y)))) (check-synth)""" val scriptNotSingleInvocation = """; three.sl ; Synthesize x * 3 mod 10 (set-logic LIA) (synth-fun f ((x Int)) Int ((Start Int (x 3 7 10 (* Start Start) (mod Start Start))))) (declare-var x Int) (constraint (= (f x) (+ 10 (f x)))) (constraint (= (f 1) 3)) (constraint (= (f 2) 6)) (constraint (= (f 3) 9)) (constraint (= (f 4) 2)) (constraint (= (f 5) 5)) (constraint (= (f 6) 8)) (constraint (= (f 7) 1)) (constraint (= (f 8) 4)) (constraint (= (f 9) 7)) (constraint (= (f 0) 0)) (check-synth) """ val scriptIdentity = """ |(set-logic NRA) |(synth-fun f ((x Real)) Real) |(declare-var x Real) |(constraint (= (f 1.0) 1.0)) |(constraint (forall ((x Real)(cdgp.P1.x Real)) | (=> (> cdgp.P1.x x) (>= (f cdgp.P1.x) (f x))))) |(check-synth) """.stripMargin } final class TestCDGPState { implicit val emptyOpt = Options(s"--selection lexicase --evolutionMode generational ${Global.solverConfig}") implicit val coll = CollectorStdout(emptyOpt) implicit val rng = Rng(emptyOpt) @Test def testEvalTestsExtraction(): Unit = { implicit val opt = Options(s"--regression true --testsTypesForRatio c,s,i --partialConstraintsInFitness true ${Global.solverConfig}") implicit val optEvalValue = Options(s"--testErrorVerValue 1.0 --regression true --testsTypesForRatio c,s,i --partialConstraintsInFitness true ${Global.solverConfig}") implicit val optEvalValueDiff5 = Options(s"--testErrorVerValue 1.0 --testsMaxDiff 5 --regression true --testsTypesForRatio c,s,i --partialConstraintsInFitness true ${Global.solverConfig}") implicit val optEvalPercent100 = Options(s"--testErrorVerPercent 1.0 --regression true --testsTypesForRatio c,s,i --partialConstraintsInFitness true ${Global.solverConfig}") implicit val optEvalPercent300 = Options(s"--testErrorVerPercent 3.0 --regression true --testsTypesForRatio c,s,i --partialConstraintsInFitness true ${Global.solverConfig}") val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptIdentity) val state = StateCDGP(problem)(opt, coll, rng) val eval = EvalContinuous.EvalCDGPSeqDouble(state, Set("c", "i", "s"))(opt, coll) val evalValueCIS = EvalContinuous.EvalCDGPSeqDouble(state, Set("c", "i", "s"))(optEvalValue, coll) val evalValueDiff5CIS = EvalContinuous.EvalCDGPSeqDouble(state, Set("c", "i", "s"))(optEvalValueDiff5, coll) val evalRatio100CIS = EvalContinuous.EvalCDGPSeqDouble(state, Set("c", "i", "s"))(optEvalPercent100, coll) val evalRatio300C = EvalContinuous.EvalCDGPSeqDouble(state, Set("c"))(optEvalPercent300, coll) // (constraint (= (f 1.0) 1.0)) // the first normal test state.testsManager.addNewTest((Map("x"->10.0), None)) state.testsManager.addNewTest((Map("x"->11.0), None)) state.testsManager.addNewTest((Map("x"->2.0), Some(2.0))) state.testsManager.addNewTest((Map("x"->3.0), Some(3.0))) state.testsManager.flushHelpers() val v1 = Seq(0.0, 2.0, 1.0, 1.0, 2.0, 5.0) // ERRORS // VALUES: (SSS, 1.0, ---, ---, 2.0, 3.0) val testsNormal = eval.extractEvalNormal(v1) val testsSpecial = eval.extractEvalSpecial(v1) assertEquals(5, testsNormal.size) assertEquals(2.0, testsNormal(0), 0.0) assertEquals(1.0, testsNormal(1), 0.0) assertEquals(1.0, testsNormal(2), 0.0) assertEquals(2.0, testsNormal(3), 0.0) assertEquals(5.0, testsNormal(4), 0.0) assertEquals(1, testsSpecial.size) assertEquals(0.0, testsSpecial(0), 0.0) val testsComplete = eval.extractEvalComplete(v1, state.testsManager.tests) val testsIncomplete = eval.extractEvalIncomplete(v1, state.testsManager.tests) assertEquals(3, testsComplete.size) assertEquals(2.0, testsComplete(0), 0.0) assertEquals(2.0, testsComplete(1), 0.0) assertEquals(5.0, testsComplete(2), 0.0) assertEquals(2, testsIncomplete.size) assertEquals(1.0, testsIncomplete(0), 0.0) assertEquals(1.0, testsIncomplete(1), 0.0) assertEquals((1, 6), evalValueCIS.getNumPassedAndTotal(v1, state.testsManager.tests)) assertEquals(false, evalValueCIS.doVerify(v1, state.testsManager.tests)) assertEquals((1, 6), evalValueDiff5CIS.getNumPassedAndTotal(v1, state.testsManager.tests)) assertEquals(true, evalValueDiff5CIS.doVerify(v1, state.testsManager.tests)) assertEquals((2, 6), evalRatio100CIS.getNumPassedAndTotal(v1, state.testsManager.tests)) assertEquals(false, evalRatio100CIS.doVerify(v1, state.testsManager.tests)) assertEquals((3, 3), evalRatio300C.getNumPassedAndTotal(v1, state.testsManager.tests)) assertEquals(true, evalRatio300C.doVerify(v1, state.testsManager.tests)) } @Test def test_max2_t(): Unit = { // Testing CDGP for pure test-based specification val state = StateCDGP("resources/LIA/tests/max2_t.sl") val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) state.testsManager.flushHelpers() // propagate tests assertEquals(5, state.testsManager.getNumberOfTests) assertEquals(5, state.testsManager.getNumberOfKnownOutputs) assertEquals(false, state.sygusData.singleInvocAll) assertEquals(false, state.sygusData.singleInvocFormal) val op = SMTLIBFormatter.smtlibToOp("""(ite (>= a b) a b)""") assertEquals(Set("a", "b"), state.testsManager.getTests().head._1.keys.toSet) assertEquals(0, eval.evalOnTests(op, state.testsManager.getTests()).sum) } @Test def test_evalOnTestsMax(): Unit = { val op = Op.fromStr("ite(>=(x y) x 0)", useSymbols=true) val t1 = (GetValueParser("((x 4)(y 3))").toMap, Some(4)) val t2 = (GetValueParser("((x 5)(y 1))").toMap, Some(5)) val t3 = (GetValueParser("((x 1)(y 3))").toMap, Some(3)) val tests = Seq(t1, t2, t3) val code = TestCDGPState.scriptMax val problem = LoadSygusBenchmark.parseText(code) val eval = EvalDiscrete.EvalCDGPSeqInt(StateCDGP(problem), Set("c", "i")) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) } @Test def test_evalOnTestsMaxUsingSolver(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMax) val state = StateCDGP(problem) val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) val op = Op.fromStr("ite(>=(x y) x 0)", useSymbols=true) val t1 = (GetValueParser("((x 4)(y 3))").toMap, Some(4)) val t2 = (GetValueParser("((x 5)(y 1))").toMap, Some(5)) val t3 = (GetValueParser("((x 1)(y 3))").toMap, Some(3)) state.testsManager.addNewTest(t1) state.testsManager.addNewTest(t2) state.testsManager.addNewTest(t3) state.testsManager.flushHelpers() assertEquals(3, state.testsManager.getNumberOfTests) assertEquals(3, state.testsManager.getNumberOfKnownOutputs) assertEquals(1, eval.evalOnTests(op, state.testsManager.getTests()).sum) assertEquals(3, state.testsManager.getNumberOfKnownOutputs) } @Test def test_evalOnTestsString(): Unit = { val problem = LoadSygusBenchmark.parseText(Global.specFirstname) val state = StateCDGP(problem) val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) val tests = Seq( (Map("name" -> "\\\\x00 \\\\x00"), Some("\\\\x00")), (Map("name" -> " "),Some("")), (Map("name" -> "\\\\x00 "),Some("\\\\x00")), (Map("name" -> " \\\\x00"),Some("")), (Map("name" -> " \\\\x00\\\\x00\\\\x00"),Some("")), (Map("name" -> "\\\\x00\\\\x00 \\\\x00"),Some("\\\\x00\\\\x00")), (Map("name" -> " \\\\x00\\\\x00"),Some("")), (Map("name" -> "\\\\x00 \\\\x00\\\\x00"),Some("\\\\x00")), (Map("name" -> " \\\\x00"),Some(""))) tests.foreach{ t => state.testsManager.tests.append(t) } val op = SMTLIBFormatter.smtlibToOp("""(str.substr name 0 (str.indexof name " " 0))""") assertEquals(0, eval.evalOnTests(op, state.testsManager.getTests()).sum) } @Test def test_evalOnTestsMaxVerify(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMax) val state = StateCDGP(problem) val op = Op.fromStr("ite(>=(x y) x 0)", useSymbols=true) val (dec, output) = state.verify(op) assertEquals("sat", dec) assertEquals(true, output.isDefined) } @Test def test_evalOnTestsMaxDifferentVarOrderInModel(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMax) val eval = EvalDiscrete.EvalCDGPSeqInt(StateCDGP(problem), Set("c", "i")) val op = Op.fromStr("ite(>=(x y) x 0)", useSymbols=true) val t1 = (GetValueParser("((y 3)(x 4))").toMap, Some(4)) val t2 = (GetValueParser("((y 1)(x 5))").toMap, Some(5)) val t3 = (GetValueParser("((y 3)(x 1))").toMap, Some(3)) val tests = Seq(t1, t2, t3) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) } @Test def test_evalOnTestsMaxRenamedVars(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMaxRenamedVars) val state = StateCDGP(problem) val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) val op = Op.fromStr("ite(>=(a b) a 0)", useSymbols=true) val t1 = state.createCompleteTest(GetValueParser("((x 4)(y 3))").toMap, Some(4)) val t2 = state.createCompleteTest(GetValueParser("((x 5)(y 1))").toMap, Some(5)) val t3 = state.createCompleteTest(GetValueParser("((x 1)(y 3))").toMap, Some(3)) val tests = Seq(t1, t2, t3) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) val t2_2 = (GetValueParser("((y 1)(x 5))").toMap, Some(5)) val tests_2 = Seq(t1, t2_2, t3) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) } @Test def test_evalOnTestsMaxFixedX(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMaxFixedX) val state = StateCDGP(problem) val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) val op = Op.fromStr("ite(>=(argA argB) argA 0)", useSymbols=true) val t1 = state.createCompleteTest(GetValueParser("((asd 4)(y -3))").toMap, Some(1)) val t2 = state.createCompleteTest(GetValueParser("((asd 5)(y 0))").toMap, Some(1)) val t3 = state.createCompleteTest(GetValueParser("((asd 1)(y 3))").toMap, Some(3)) val tests = Seq(t1, t2, t3) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) } @Test def test_evalOnTestsMaxFixedX2(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMaxFixedX) val state = StateCDGP(problem) val eval = EvalDiscrete.EvalCDGPSeqInt(state, Set("c", "i")) val op = Op.fromStr("ite(>=(argA argB) argA 0)", useSymbols=true) val t1 = state.createCompleteTest(GetValueParser("((y -3))").toMap, Some(1)) val t2 = state.createCompleteTest(GetValueParser("((y 0))").toMap, Some(1)) val t3 = state.createCompleteTest(GetValueParser("((y 3))").toMap, Some(3)) val tests = Seq(t1, t2, t3) assertEquals(Seq(0, 0, 1), eval.evalOnTests(op, tests)) } @Test def test_checkIfSingleCorrectAnswer_unsat(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptMaxRenamedVars) val sygusData = SygusProblemData(problem) val query = SMTLIBFormatter.checkIfSingleAnswerForEveryInput(problem, sygusData) val state = StateCDGP(problem) val (decision, output) = state.solver.executeQuery(query) assertEquals("unsat", decision) // unsat, so there is only a single answer } @Test def test_checkIfSingleInvocation(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptNotSingleInvocation) val data = SygusProblemData(problem, mixedSpecAllowed=true) assertEquals(true, data.singleInvocFormal) } @Test def test_checkIfSingleCorrectAnswer_sat(): Unit = { val problem = LoadSygusBenchmark.parseText(TestCDGPState.scriptPsuedoMaxRenamedVars) val sygusData = SygusProblemData(problem) val query = SMTLIBFormatter.checkIfSingleAnswerForEveryInput(problem, sygusData) val state = StateCDGP(problem) val (decision, output) = state.solver.executeQuery(query) assertEquals("sat", decision) } @Test def test_createRandomTest(): Unit = { val code = """(set-logic LIA) |(synth-fun f ( (w Int)(x Int)(y Int)(z Int)) Int ) |(declare-var a Int) |(constraint (= (f a a 4 4) (+ (* 2 a) 8))) |(check-synth) """.stripMargin val problem = LoadSygusBenchmark.parseText(code) val state = StateGPR(problem) val test = state.createRandomTest().get assertEquals(true, test.isCompleteTest) val test2 = (test._1.map{ case (k, v) => (k, if (k == "y" || k == "z") 4 else 1)}, test._2) println(s"Test: $test") assertEquals(Seq("a", "a", "4", "4"), state.invocations.head) assertEquals(Map("w"->1, "x"->1, "y"->4, "z"->4), test2._1) } @Test def test_createTestFromFailedVerification(): Unit = { val code = """(set-logic LIA) |(synth-fun f ( (w Int)(x Int)(y Int)(z Int)) Int ) |(declare-var a Int) |(constraint (= (f a a 4 4) (+ (* 2 a) 8))) |(check-synth) """.stripMargin val problem = LoadSygusBenchmark.parseText(code) val state = StateCDGP(problem) val solverOut = "((a 1))" val test = state.createTestFromFailedVerification(solverOut).get println(s"Test: $test") val testModel = GetValueParser(solverOut).toMap val testInputs = state.modelToSynthFunInputs(testModel) assertEquals(Seq("a", "a", "4", "4"), state.invocations.head) assertEquals(Map("w"->1, "x"->1, "y"->4, "z"->4), testInputs) } @Test def test_createTestFromFailedVerification_tooBig(): Unit = { val code = """(set-logic LIA) |(synth-fun f ( (w Int)(x Int)(y Int)(z Int)) Int ) |(declare-var a Int) |(constraint (= (f a a 4 4) (+ (* 2 a) 8))) |(check-synth) """.stripMargin val problem = LoadSygusBenchmark.parseText(code) val state = StateCDGP(problem) val solverOut = "((a 12345678901234))" val test = state.createTestFromFailedVerification(solverOut) assertEquals(None, test) } @Test def test_createTestsFromConstraints(): Unit = { val code = """(set-logic SLIA) |(synth-fun f ((s String)(a Int)(b Int)) String ((Start String (s)))) |(declare-var s String) |(declare-var a Int) |(declare-var b Int) |(constraint (= (f "asd" 0 1) "sad")) |(constraint (= (str.len (f s a b)) (str.len s))) |(constraint (= (f "asd" 0 2) "das")) """.stripMargin val problem = LoadSygusBenchmark.parseText(code) val data = SygusProblemData(problem, mixedSpecAllowed = true) val tests = data.testCasesConstrToTests() assertEquals(2, tests.size) assertEquals(Map("s"->"asd", "a"->0, "b"->1), tests(0)._1) assertEquals(Some("sad"), tests(0)._2) assertEquals(Map("s"->"asd", "a"->0, "b"->2), tests(1)._1) assertEquals(Some("das"), tests(1)._2) } @Test def test_createTestsFromConstraints2(): Unit = { val code = """(set-logic SLIA) |(synth-fun f ((s String)(a Int)(b Int)) String ((Start String (s)))) |(declare-var s String) |(declare-var a Int) |(declare-var b Int) |(constraint (= (f s 0 0) s)) |(constraint (= (str.len (f s a b)) (str.len s))) |(constraint (= "das" (f "asd" 0 2))) """.stripMargin val problem = LoadSygusBenchmark.parseText(code) val data = SygusProblemData(problem, mixedSpecAllowed = true) val tests = data.testCasesConstrToTests() assertEquals(1, tests.size) assertEquals(Map("s"->"asd", "a"->0, "b"->2), tests(0)._1) assertEquals(Some("das"), tests(0)._2) } @Test def test_createTestFromCounterex(): Unit = { val state = StateCDGP("resources/LIA/max2_t.sl") val model = Map("x"->5, "y"->9) val test = state.createTestFromCounterex(model) assertEquals(false, test.isCompleteTest) assertEquals(Map("x"->5, "y"->9), test.input) } @Test def test_comprehensive(): Unit = { def testBenchmark(path: String, singleAnswerF: Boolean, numTests: Int, numFormConstr: Int, tcInput: Map[String, Any], tcInitialInput: Option[Set[String]]): Unit = { println("-" * 50) println("FILE: " + path) println("-" * 50) val state = StateCDGP(path) // A newly created test, which happens after the verification. val test = state.createTestFromCounterex(Map("x"->5, "y"->9)) assertEquals(singleAnswerF, state.singleAnswerFormal) assertEquals(numTests, state.testsManager.newTests.size) if (state.testsManager.newTests.nonEmpty) { state.testsManager.newTests.foreach { tc => assertEquals(tcInitialInput.getOrElse(Set()), tc._1.keys.toSet) } } assertEquals(numFormConstr, state.sygusData.formalConstr.size) assertEquals(tcInput, test.input) } val testVars = Some(Set("a", "b")) val benchs = List( ("resources/LIA/tests/max2_f_diffNames.sl", true, 0, 3, Map("a"->5, "b"->9), None), ("resources/LIA/tests/max2_f_reversedNames.sl", true, 0, 3, Map("y"->5, "x"->9), None), ("resources/LIA/tests/max2_f_sameNames.sl", true, 0, 3, Map("x"->5, "y"->9), None), ("resources/LIA/tests/max2_m.sl", false, 5, 2, Map("x"->5, "y"->9), testVars), // x,y because the spec is incomplete ("resources/LIA/tests/max2_t.sl", false, 5, 0, Map("x"->5, "y"->9), testVars), ("resources/LIA/tests/max2_t_spuriousVars.sl", false, 5, 0, Map("x"->5, "y"->9), testVars) ) benchs.foreach{ b => testBenchmark(b._1, b._2, b._3, b._4, b._5, b._6) } } @Test def test_modelToSynthFunInputs(): Unit = { assertEquals(Map("a"->0, "b"->1), StateCDGP.modelToSynthFunInputs(Map("x"->0, "y"->1), Seq("x", "y"), Seq("a", "b"))) assertEquals(Map(), StateCDGP.modelToSynthFunInputs(Map("x"->0, "y"->1), Seq(), Seq())) assertEquals(Map("a"->0, "b"->1), StateCDGP.modelToSynthFunInputs(Map("x"->0, "y"->1, "z"->9), Seq("x", "y"), Seq("a", "b"))) assertEquals(Map("a"->0, "b"->2, "c"->1), StateCDGP.modelToSynthFunInputs(Map("x"->0, "y"->1), Seq("x", "2", "y"), Seq("a", "b", "c"))) assertEquals(Map("a"->1, "b"->2, "c"->3), StateCDGP.modelToSynthFunInputs(Map("x"->0, "y"->1), Seq("1", "2", "3"), Seq("a", "b", "c"))) } }
kkrawiec/CDGP
src/test/scala/TestCDGPState.scala
Scala
mit
20,137
/* * Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com> * Copyright (C) 2017-2018 Alexis Seigneurin. * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.kafka.streams.scala import org.apache.kafka.streams.kstream.{ KGroupedStream => KGroupedStreamJ, KGroupedTable => KGroupedTableJ, KStream => KStreamJ, KTable => KTableJ, SessionWindowedKStream => SessionWindowedKStreamJ, TimeWindowedKStream => TimeWindowedKStreamJ } import org.apache.kafka.streams.scala.kstream._ import org.apache.kafka.streams.KeyValue import org.apache.kafka.common.serialization.Serde import scala.language.implicitConversions import org.apache.kafka.streams.processor.StateStore /** * Implicit conversions between the Scala wrapper objects and the underlying Java * objects. */ object ImplicitConversions { implicit def wrapKStream[K, V](inner: KStreamJ[K, V]): KStream[K, V] = new KStream[K, V](inner) implicit def wrapKGroupedStream[K, V](inner: KGroupedStreamJ[K, V]): KGroupedStream[K, V] = new KGroupedStream[K, V](inner) implicit def wrapSessionWindowedKStream[K, V](inner: SessionWindowedKStreamJ[K, V]): SessionWindowedKStream[K, V] = new SessionWindowedKStream[K, V](inner) implicit def wrapTimeWindowedKStream[K, V](inner: TimeWindowedKStreamJ[K, V]): TimeWindowedKStream[K, V] = new TimeWindowedKStream[K, V](inner) implicit def wrapKTable[K, V](inner: KTableJ[K, V]): KTable[K, V] = new KTable[K, V](inner) implicit def wrapKGroupedTable[K, V](inner: KGroupedTableJ[K, V]): KGroupedTable[K, V] = new KGroupedTable[K, V](inner) implicit def tuple2ToKeyValue[K, V](tuple: (K, V)): KeyValue[K, V] = new KeyValue(tuple._1, tuple._2) // we would also like to allow users implicit serdes // and these implicits will convert them to `Serialized`, `Produced` or `Consumed` implicit def serializedFromSerde[K, V](implicit keySerde: Serde[K], valueSerde: Serde[V]): Serialized[K, V] = Serialized.`with`[K, V] implicit def consumedFromSerde[K, V](implicit keySerde: Serde[K], valueSerde: Serde[V]): Consumed[K, V] = Consumed.`with`[K, V] implicit def producedFromSerde[K, V](implicit keySerde: Serde[K], valueSerde: Serde[V]): Produced[K, V] = Produced.`with`[K, V] implicit def materializedFromSerde[K, V, S <: StateStore](implicit keySerde: Serde[K], valueSerde: Serde[V]): Materialized[K, V, S] = Materialized.`with`[K, V, S] implicit def joinedFromKeyValueOtherSerde[K, V, VO](implicit keySerde: Serde[K], valueSerde: Serde[V], otherValueSerde: Serde[VO]): Joined[K, V, VO] = Joined.`with`[K, V, VO] }
mihbor/kafka
streams/streams-scala/src/main/scala/org/apache/kafka/streams/scala/ImplicitConversions.scala
Scala
apache-2.0
3,533
/* * Copyright 2020 Precog Data * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar import slamdata.Predef._ import scala.collection.mutable.ArrayBuffer import scala.concurrent.duration.FiniteDuration import cats.effect.IO class TestRateLimitUpdater[A] extends RateLimitUpdater[IO, A] { val plusOnes: ArrayBuffer[A] = ArrayBuffer() val waits: ArrayBuffer[A] = ArrayBuffer() val configs: ArrayBuffer[A] = ArrayBuffer() def plusOne(key: A): IO[Unit] = IO.delay(plusOnes += key) def wait(key: A, duration: FiniteDuration): IO[Unit] = IO.delay(waits += key) def config(key: A, config: RateLimiterConfig): IO[Unit] = IO.delay(configs += key) }
djspiewak/quasar
foundation/src/test/scala/quasar/TestRateLimitUpdater.scala
Scala
apache-2.0
1,183
package com.negrisoli.algorithms.implementation object BeautifulTriplets { def main(args: Array[String]): Unit = { val sc = new java.util.Scanner(System.in) val List(n, d) = ((0 to 1) map (i => sc.nextInt)).toList val a = ((0 to n - 1) map (i => sc.nextInt)).toList println(resolve(a, d)) } def resolve(a: List[Int], d: Int) = { def getFirst(idx: Int) = if (idx >= a.size) -1 else a indexWhere ( _ >= a(idx) + d, idx + 1) (for { i <- (0 until a.size - 2) j = getFirst(i) if j >= 0 && a(j) == a(i) + d k = getFirst(j) if k >= 0 && a(k) == a(j) + d } yield 1).size } }
rbatista/algorithms
challenges/hacker-rank/scala/src/main/scala/com/negrisoli/algorithms/implementation/BeautifulTriplets.scala
Scala
mit
651
package scoder object DecodeOk { def apply[T](t: T): DecodeResult[Nothing, T] = DecodeResult(Right(t)) def unapply[E, T](result: DecodeResult[E, T]): Option[T] = result match { case DecodeResult(Right(t)) => Some(t) case _ => None } }
mrvisser/scoder
core/src/main/scala/scoder/DecodeOk.scala
Scala
mit
256