code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job.yarn
import org.apache.samza.clustermanager.SamzaApplicationState
import org.apache.samza.config.{Config, MetricsConfig}
import org.apache.samza.util.Logging
import org.apache.samza.util.MetricsReporterLoader
import org.apache.samza.metrics.ReadableMetricsRegistry
import org.apache.samza.metrics.MetricsHelper
import scala.collection.JavaConverters._
object SamzaAppMasterMetrics {
val sourceName = "ApplicationMaster"
}
/**
* Responsible for wiring up Samza's metrics. Given that Samza has a metric
* registry, we might as well use it. This class takes Samza's application
* master state, and converts it to metrics.
*/
class SamzaAppMasterMetrics(val config: Config,
val state: SamzaApplicationState,
val registry: ReadableMetricsRegistry) extends MetricsHelper with Logging {
private val metricsConfig = new MetricsConfig(config)
val reporters = MetricsReporterLoader.getMetricsReporters(metricsConfig, SamzaAppMasterMetrics.sourceName).asScala
reporters.values.foreach(_.register(SamzaAppMasterMetrics.sourceName, registry))
def start() {
val mRunningContainers = newGauge("running-containers", () => state.runningProcessors.size)
val mNeededContainers = newGauge("needed-containers", () => state.neededProcessors.get())
val mCompletedContainers = newGauge("completed-containers", () => state.completedProcessors.get())
val mFailedContainers = newGauge("failed-containers", () => state.failedContainers.get())
val mReleasedContainers = newGauge("released-containers", () => state.releasedContainers.get())
val mContainers = newGauge("container-count", () => state.processorCount.get())
val mJobHealthy = newGauge("job-healthy", () => if (state.jobHealthy.get()) 1 else 0)
reporters.values.foreach(_.start)
}
def stop() {
reporters.values.foreach(_.stop)
}
}
| lhaiesp/samza | samza-yarn/src/main/scala/org/apache/samza/job/yarn/SamzaAppMasterMetrics.scala | Scala | apache-2.0 | 2,669 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.misc
import com.castlebravostudios.rayguns.api.LensGrinderRecipeRegistry
import com.castlebravostudios.rayguns.mod.Config
import com.castlebravostudios.rayguns.mod.ModularRayguns
import net.minecraft.block.Block
import net.minecraft.item.Item
import net.minecraft.item.ItemStack
object Tier3Diode extends Item {
setCreativeTab(ModularRayguns.raygunsTab)
setUnlocalizedName("rayguns.Tier3Diode")
setTextureName("rayguns:diode_t3")
} | Redattack34/ModularRayguns | src/main/scala/com/castlebravostudios/rayguns/items/misc/Tier3Diode.scala | Scala | bsd-3-clause | 2,083 |
package com.yuzhouwan.hacker.singleton
/**
* Copyright @ 2019 yuzhouwan.com
* All right reserved.
* Function:Singleton Object
*
* @author Benedict Jin
* @since 2018/6/22
*/
class SingletonObj private(db: String) {
override def toString: String = db
}
object SingletonObj {
private val dbs: Map[String, SingletonObj] = Map(
"redis" -> new SingletonObj("redis"),
"mysql" -> new SingletonObj("mysql"),
"hbase" -> new SingletonObj("hbase")
)
def getDb(db: String): SingletonObj = {
if (dbs.contains(db)) dbs(db) else null
}
}
| asdf2014/yuzhouwan | yuzhouwan-hacker/src/main/scala/com/yuzhouwan/hacker/singleton/SingletonObj.scala | Scala | apache-2.0 | 568 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka08
import java.util.UUID
import com.typesafe.scalalogging.LazyLogging
import kafka.utils.ZkUtils
import org.I0Itec.zkclient.ZkClient
import org.geotools.data.store.{ContentDataStore, ContentEntry}
import org.geotools.feature.NameImpl
import org.joda.time.{Duration, Instant}
import org.junit.runner.RunWith
import org.locationtech.geomesa.kafka.{KafkaDataStoreHelper, ReplayConfig}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.{After, Specification}
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class KafkaDataStoreSchemaManagerTest
extends Specification
with HasEmbeddedKafka {
// skip embedded kafka tests unless explicitly enabled, they often fail randomly
skipAllUnless(sys.props.get(SYS_PROP_RUN_TESTS).exists(_.toBoolean))
// todo: missing tests -
// todo test general exception handling (use zk mock for this?)
"createSchema" should {
"fail if SFT is not a Streaming or Replay type" in new ZkContext(zkConnect) {
val typename = "test-create-no-topic"
val sft = createSFT(typename)
val datastore = new TestDataStore(zkConnect, zkPath)
datastore.createSchema(sft) must throwA[IllegalArgumentException]
}
"with a Streaming SFT" should {
"result in exactly one schema node and exactly one topic node" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-create-live"
val sft = createStreamingSFT(typename)
datastore.createSchema(sft)
zkClient.countChildren(zkPath) mustEqual 1
val zChildren = zkClient.getChildren(zkPath)
zChildren.size() mustEqual 1
zChildren.get(0) mustEqual typename
val sPath = datastore.getSchemaPath(typename)
val encoded = zkClient.readData[String](sPath)
encoded mustEqual SimpleFeatureTypes.encodeType(sft, includeUserData = true)
val sChildren = zkClient.getChildren(sPath)
sChildren.size() mustEqual 1
sChildren.get(0) mustEqual "Topic"
val topic = zkClient.readData[String](datastore.getTopicPath(typename))
zkUtils.topicExists(topic) must beTrue
}
}
"with a Replay SFT" should {
"result in exactly one schema node and exactly one topic node and one replay node" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-create-replay"
val sft = createReplaySFT(typename)
datastore.createSchema(sft)
zkClient.countChildren(zkPath) mustEqual 1
val zChildren = zkClient.getChildren(zkPath)
zChildren.size() mustEqual 1
val replayTypename = zChildren.get(0)
replayTypename mustEqual sft.getTypeName
val sPath = datastore.getSchemaPath(replayTypename)
val encoded = zkClient.readData[String](sPath)
encoded mustEqual SimpleFeatureTypes.encodeType(sft, includeUserData = true)
val sChildren = zkClient.getChildren(sPath).asScala
sChildren.size mustEqual 2
sChildren must contain("Topic", "ReplayConfig")
val topic = zkClient.readData[String](datastore.getTopicPath(replayTypename))
zkUtils.topicExists(topic) must beTrue
val encodeReplayConfig = zkClient.readData[String](datastore.getReplayConfigPath(replayTypename))
ReplayConfig.decode(encodeReplayConfig) must beSome(replayConfig)
}
}
"fail if type already created" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-live-already-exists"
val sft = createStreamingSFT(typename)
datastore.createSchema(sft)
val sft2 = createStreamingSFT(typename)
datastore.createSchema(sft2) must throwA[IllegalArgumentException]
}
}
"getFeatureConfig" should {
"throws exception when type doesn't exists" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typeName = "test-get-doesNotExist"
datastore.getFeatureConfig(typeName) must throwA[RuntimeException]
}
"retrieve an existing schema" >> {
"for a Streaming SFT" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-get-streaming"
val sft = createStreamingSFT(typename)
val topic = KafkaDataStoreHelper.extractTopic(sft).get
datastore.createSchema(sft)
val fc = datastore.getFeatureConfig(sft.getTypeName)
fc must not(beNull)
fc.topic mustEqual topic
fc.replayConfig must beNone
fc.sft mustEqual sft
fc.sft.getUserData mustEqual sft.getUserData
}
"for a Replay SFT" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-get-replay"
val sft = createReplaySFT(typename)
val topic = KafkaDataStoreHelper.extractTopic(sft).get
datastore.createSchema(sft)
val fc = datastore.getFeatureConfig(sft.getTypeName)
fc must not(beNull)
fc.topic mustEqual topic
fc.replayConfig must beSome(replayConfig)
fc.sft mustEqual sft
fc.sft.getUserData mustEqual sft.getUserData
}
}
}
"getNames" should {
"return all created type names" >> {
"when there are none" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val names = datastore.getNames()
names must haveSize(0)
}
"when there is one" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-getnames-single"
val liveSft = createStreamingSFT(typename)
datastore.createSchema(liveSft)
val expected = List(liveSft.getTypeName).map(name => new NameImpl(name): Name)
val names = datastore.getNames().asScala
names must containTheSameElementsAs(expected)
}
"when there are multiple" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-getnames-multiple"
val liveSft = createStreamingSFT(typename)
datastore.createSchema(liveSft)
val replaySft = KafkaDataStoreHelper.createReplaySFT(liveSft, replayConfig)
datastore.createSchema(replaySft)
val expected = List(liveSft.getTypeName, replaySft.getTypeName).map(name => new NameImpl(name): Name)
val names = datastore.getNames().asScala
names must containTheSameElementsAs(expected)
}
}
}
"removeSchema" should {
"remove the specified schema" >> {
"when given a String" in new ZkContext(zkConnect) {
//create two schemas (one replay, one live)
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-remove-string"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
val replaySFT = KafkaDataStoreHelper.createReplaySFT(liveSFT, replayConfig)
datastore.createSchema(replaySFT)
// verify there are two
datastore.getNames must haveSize(2)
// now remove the replay
datastore.removeSchema(replaySFT.getTypeName)
datastore.getNames must haveSize(1)
datastore.getFeatureConfig(replaySFT.getTypeName) must throwA[RuntimeException]
datastore.getFeatureConfig(liveSFT.getTypeName) must not(beNull)
zkClient.exists(datastore.getSchemaPath(replaySFT.getTypeName)) must beFalse
zkClient.exists(datastore.getSchemaPath(liveSFT.getTypeName)) must beTrue
val topic = KafkaDataStoreHelper.extractTopic(liveSFT).get
zkUtils.topicExists(topic) must beTrue and (
zkClient.exists(ZkUtils.getDeleteTopicPath(topic)) must beFalse)
}
"when given a Name" in new ZkContext(zkConnect) {
//create two schemas (one replay, one live)
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-remove-string"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
val replaySFT = KafkaDataStoreHelper.createReplaySFT(liveSFT, replayConfig)
datastore.createSchema(replaySFT)
// verify there are two
datastore.getNames must haveSize(2)
// now remove the replay
datastore.removeSchema(replaySFT.getName)
datastore.getNames must haveSize(1)
datastore.getFeatureConfig(replaySFT.getTypeName) must throwA[RuntimeException]
datastore.getFeatureConfig(liveSFT.getTypeName) must not(beNull)
zkClient.exists(datastore.getSchemaPath(replaySFT.getTypeName)) must beFalse
zkClient.exists(datastore.getSchemaPath(liveSFT.getTypeName)) must beTrue
val topic = KafkaDataStoreHelper.extractTopic(liveSFT).get
zkUtils.topicExists(topic) must beTrue and (
zkClient.exists(ZkUtils.getDeleteTopicPath(topic)) must beFalse)
}
"delete the topic when removing a Streaming SFT" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-remove-string"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
// now remove the replay
datastore.removeSchema(liveSFT.getTypeName)
datastore.getNames must haveSize(0)
datastore.getFeatureConfig(liveSFT.getTypeName) must throwA[RuntimeException]
zkClient.exists(datastore.getSchemaPath(liveSFT.getTypeName)) must beFalse
// the topic should no longer exist or at a minimum be marked for deletion
val topic = KafkaDataStoreHelper.extractTopic(liveSFT).get
zkUtils.topicExists(topic) must beFalse or (
zkClient.exists(ZkUtils.getDeleteTopicPath(topic)) must beTrue)
}
}
}
"getLiveFeatureType" should {
"retrieve the appropriate original schema" in new ZkContext(zkConnect) {
//create two schemas (one replay, one live)
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-getlive"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
val replaySFT = KafkaDataStoreHelper.createReplaySFT(liveSFT, replayConfig)
datastore.createSchema(replaySFT)
val result = datastore.getLiveFeatureType(replaySFT)
result must beSome(liveSFT)
val origSft = result.get
origSft.getUserData mustEqual liveSFT.getUserData
}
"return None if the original schema doesn't exist" in new ZkContext(zkConnect) {
//create two schemas (one replay, one live)
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-getlive-noexists"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
val replaySFT = KafkaDataStoreHelper.createReplaySFT(liveSFT, replayConfig)
datastore.createSchema(replaySFT)
datastore.removeSchema(liveSFT.getTypeName)
datastore.getLiveFeatureType(replaySFT) must beNone
}
"return None if not a replay schema" in new ZkContext(zkConnect) {
val datastore = new TestDataStore(zkConnect, zkPath)
val typename = "test-getlive-notreplay"
val liveSFT = createStreamingSFT(typename)
datastore.createSchema(liveSFT)
datastore.getLiveFeatureType(liveSFT) must beNone
}
}
step {
shutdown()
}
}
class TestDataStore(override val zookeepers: String,
override val zkPath: String)
extends ContentDataStore with KafkaDataStoreSchemaManager with LazyLogging {
override val partitions: Int = 1
override val replication: Int = 1
override def createFeatureSource(entry: ContentEntry) = {
throw new UnsupportedOperationException(
"called TestDataStore.createFeatureSource() - this should not have happened!!!")
}
override def createTypeNames() = getNames()
}
class ZkContext(val zkConnect: String) extends After with LazyLogging {
val schema = "name:String,age:Int,dtg:Date,*geom:Point:srid=4326"
lazy val replayConfig = new ReplayConfig(new Instant(123L), new Instant(223L), new Duration(5L))
val zkUtils = KafkaUtils08.createZkUtils(zkConnect, Int.MaxValue, Int.MaxValue)
val zkClient = zkUtils.zkClient
val zkPath = createRandomZkNode(zkClient)
logger.trace(s"created $zkPath")
def createSFT(typeName: String): SimpleFeatureType = {
SimpleFeatureTypes.createType(typeName, schema)
}
def createStreamingSFT(typeName: String): SimpleFeatureType = {
KafkaDataStoreHelper.createStreamingSFT(createSFT(typeName), zkPath)
}
def createReplaySFT(typeName: String, rc: ReplayConfig = replayConfig): SimpleFeatureType = {
KafkaDataStoreHelper.createReplaySFT(createStreamingSFT(typeName), rc)
}
override def after = {
logger.trace(s"cleaning up $zkPath")
zkClient.deleteRecursive(zkPath)
// zkClient.close()
}
private def createRandomZkNode(zkClient: ZkClient): String = {
val randomPath = s"/kdssmTest-${UUID.randomUUID}"
logger.trace(s"creating zkPath: $randomPath")
zkClient.createPersistent(randomPath)
randomPath
}
} | nagavallia/geomesa | geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-08-datastore/src/test/scala/org/locationtech/geomesa/kafka08/KafkaDataStoreSchemaManagerTest.scala | Scala | apache-2.0 | 13,940 |
package com.github.mdr.mash.functions
import com.github.mdr.mash.completions.CompletionSpec
import com.github.mdr.mash.evaluator.{ Arguments, EvaluationContext }
import com.github.mdr.mash.inference._
import com.github.mdr.mash.runtime.MashValue
abstract class MashMethod(val name: String) {
def call(target: MashValue, boundParams: BoundParams): MashValue
def paramContext(target: MashValue): EvaluationContext = EvaluationContext.NotUsed
def callNullary(target: MashValue): MashValue = call(target, params.bindTo(Arguments.EmptyArguments, paramContext(target)))
def params: ParameterModel
def allowsNullary: Boolean = params.allowsNullary
def typeInferenceStrategy: MethodTypeInferenceStrategy = NoMethodTypeInferenceStrategy
override def toString = s"<.$name>"
def flags: Seq[Flag] = params.flags
def getCompletionSpecs(argPos: Int, targetTypeOpt: Option[Type], arguments: TypedArguments): Seq[CompletionSpec] = Seq()
def summaryOpt: Option[String]
def descriptionOpt: Option[String] = None
def isPrivate: Boolean = false
def isPublic: Boolean = !isPrivate
def aliases: Seq[String] = Seq()
def names: Seq[String] = name +: aliases
/**
* If the method is shy, then it is not made available in subclasses without qualification through "this"
*/
val isShy: Boolean = false
def sourceOpt: Option[String] = None
} | mdr/mash | src/main/scala/com/github/mdr/mash/functions/MashMethod.scala | Scala | mit | 1,375 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.redis.sink.writer
import com.datamountaineer.kcql.Kcql
import com.datamountaineer.streamreactor.connect.redis.sink.config.{RedisKCQLSetting, RedisSinkSettings}
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.kafka.connect.errors.ConnectException
import org.apache.kafka.connect.sink.SinkRecord
import scala.collection.JavaConverters._
import scala.util.{Failure, Success, Try}
/**
* A generic Redis `writer` that can store data into Redis streams / KCQL
*
* Requires KCQL syntax: INSERT .. SELECT .. STOREAS stream
*
* Examples:
*
* INSERT INTO stream1 SELECT * from cpuTopic STOREAS stream
*/
class RedisStreams(sinkSettings: RedisSinkSettings) extends RedisWriter with PubSubSupport {
val configs: Set[Kcql] = sinkSettings.kcqlSettings.map(_.kcqlConfig)
configs.foreach { c =>
// assert(c.getTarget.length > 0, "Add to your KCQL syntax : INSERT INTO REDIS_KEY_NAME ")
assert(c.getSource.trim.length > 0, "You need to define one (1) topic to source data. Add to your KCQL syntax: SELECT * FROM topicName")
val allFields = if (c.getIgnoredFields.isEmpty) false else true
assert(c.getStoredAs.equalsIgnoreCase("Stream"), "This mode requires the KCQL syntax: STOREAS Stream")
}
// Write a sequence of SinkRecords to Redis
override def write(records: Seq[SinkRecord]): Unit = {
if (records.isEmpty)
logger.debug("No records received on 'STREAM' Redis writer")
else {
logger.debug(s"'STREAM' Redis writer received ${records.size} records")
insert(records.groupBy(_.topic))
}
}
// Insert a batch of sink records
def insert(records: Map[String, Seq[SinkRecord]]): Unit = {
records.foreach({
case (topic, sinkRecords: Seq[SinkRecord]) => {
val topicSettings: Set[RedisKCQLSetting] = sinkSettings.kcqlSettings.filter(_.kcqlConfig.getSource == topic)
if (topicSettings.isEmpty)
logger.warn(s"Received a batch for topic $topic - but no KCQL supports it")
val t = Try {
sinkRecords.foreach { record =>
topicSettings.map { KCQL =>
// Get a SinkRecord
val recordToSink = convert(record, fields = KCQL.fieldsAndAliases, ignoreFields = KCQL.ignoredFields)
val jsonPayload = convertValueToJson(recordToSink)
val payload = Try(new ObjectMapper().convertValue(jsonPayload, classOf[java.util.HashMap[String, Any]])) match {
case Success(value) =>
value.asScala.toMap.map{
case(k, v) =>
(k, v.toString)
}
case Failure(exception) =>
throw new ConnectException(s"Failed to convert payload to key value pairs", exception)
}
jedis.xadd(KCQL.kcqlConfig.getTarget, null, payload.asJava)
}
}
}
handleTry(t)
}
logger.debug(s"Published ${sinkRecords.size} messages for topic $topic")
})
}
}
| datamountaineer/stream-reactor | kafka-connect-redis/src/main/scala/com/datamountaineer/streamreactor/connect/redis/sink/writer/RedisStreams.scala | Scala | apache-2.0 | 3,649 |
package dit4c.scheduler.service
import akka.actor._
import akka.http.scaladsl.Http
import akka.stream.scaladsl._
import akka.stream.OverflowStrategy
import akka.http.scaladsl.model.ws.Message
import akka.http.scaladsl.model.ws.WebSocketRequest
import akka.NotUsed
import akka.stream.ActorMaterializer
import akka.http.scaladsl.model.ws.BinaryMessage
import akka.util.ByteString
import scala.concurrent.Future
import akka.http.scaladsl.model.ws.TextMessage
import akka.event.LoggingReceive
import akka.http.scaladsl.model.Uri
import dit4c.scheduler.domain.RktClusterManager
import dit4c.scheduler.domain.Instance
import java.time.Instant
import scala.util.Random
import dit4c.common.KryoSerializable
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.Multipart
import akka.http.scaladsl.model.ContentTypes
import akka.http.scaladsl.model.ContentType
import akka.http.scaladsl.model.MediaType
import akka.http.scaladsl.model.HttpCharsets
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.RequestEntity
import akka.http.scaladsl.marshalling.Marshal
import akka.util.Timeout
import scala.concurrent.duration._
import akka.http.scaladsl.model.HttpResponse
import pdi.jwt.JwtClaim
import akka.http.scaladsl.model.headers.Authorization
import akka.http.scaladsl.model.headers.Cookie
import akka.http.scaladsl.model.headers.`Set-Cookie`
import akka.http.scaladsl.model.headers.OAuth2BearerToken
import akka.http.scaladsl.model.HttpHeader
import java.util.Base64
import dit4c.scheduler.domain.Cluster
import java.net.URLEncoder
object PortalMessageBridge {
case object BridgeClosed extends KryoSerializable
class UnmarshallingActor extends Actor with ActorLogging {
implicit val materializer = ActorMaterializer()
val receive: Receive = LoggingReceive {
case msg: BinaryMessage =>
implicit val ec = context.dispatcher
toByteString(msg).foreach { bs =>
import dit4c.protobuf.scheduler.inbound.InboundMessage
import InboundMessage.Payload
val parsedMsg = InboundMessage.parseFrom(bs.toArray).payload match {
case Payload.Empty => None // Do nothing
case Payload.RequestInstanceStateUpdate(value) => Some(value)
case Payload.StartInstance(value) => Some(value)
case Payload.DiscardInstance(value) => Some(value)
case Payload.SaveInstance(value) => Some(value)
case Payload.ConfirmInstanceUpload(value) => Some(value)
case Payload.SignedMessageForScheduler(value) => Some(value)
}
parsedMsg match {
case None =>
log.debug(s"portal sent empty keep-alive message")
case Some(msg) =>
log.debug(s"portal sent message: $msg")
sendToParent(msg)
}
}
case msg: TextMessage =>
// Ignore text messages, but drain to avoid problems
val addPrefix = "Text from portal: ".concat _
msg.textStream.runForeach(addPrefix.andThen(log.info))
case BridgeClosed =>
log.debug("portal message bridge closed - sending to parent")
sendToParent(BridgeClosed)
case akka.actor.Status.Failure(e) =>
log.error(s"portal message bridge connection failed: $e")
sendToParent(BridgeClosed)
case msg =>
throw new Exception(s"Unknown message: $msg")
}
def sendToParent[M](msg: M) { context.parent ! msg }
def toByteString(binaryMessage: BinaryMessage): Future[ByteString] = binaryMessage match {
case BinaryMessage.Strict(bs) => Future.successful(bs)
case BinaryMessage.Streamed(dataStream) =>
dataStream.runReduce((m: ByteString, n: ByteString) => m ++ n)
}
}
}
class PortalMessageBridge(keyManager: ActorRef, registrationUrl: String)
extends Actor with ActorLogging with Stash {
import dit4c.scheduler.service
implicit val materializer = ActorMaterializer()
var outboundSource: Source[Message, ActorRef] = null
var inboundSink: Sink[Message, NotUsed] = null
var inbound: ActorRef = null
// Everything is so closely linked that a child failure means we should shut everything down
override val supervisorStrategy = AllForOneStrategy() {
case _ => SupervisorStrategy.Escalate
}
override def preStart {
import context.dispatcher
import akka.pattern.ask
implicit val timeout = Timeout(5.seconds)
inbound = context.watch(context.actorOf(Props[PortalMessageBridge.UnmarshallingActor], "unmarshaller"))
inboundSink = Sink.actorRef(inbound, PortalMessageBridge.BridgeClosed)
outboundSource = Source.actorRef(128, OverflowStrategy.dropHead)
def outboundActorRefExtractor(nu: NotUsed, ref: ActorRef) = ref
val setup = for {
armoredPgpPublicKeyRing <- (keyManager ? KeyManager.GetPublicKeyInfo).collect {
case KeyManager.PublicKeyInfo(fingerprint, keyBlock) =>
keyBlock
}
payload <- {
val formData = Multipart.FormData(Multipart.FormData.BodyPart.Strict(
"keys",
HttpEntity(
MediaType.applicationWithFixedCharset("pgp-keys", HttpCharsets.`UTF-8`).toContentType,
armoredPgpPublicKeyRing),
Map("filename" -> "keys.asc")))
Marshal(formData).to[RequestEntity]
}
(redirectUri, cookies) <- Http()(context.system)
.singleRequest(
HttpRequest(
method=HttpMethods.POST,
registrationUrl,
entity=payload)
)
.collect {
case r: HttpResponse if r.status.isRedirection => r
}
.map { r =>
(
Uri(r.getHeader("Location").get.value)
.resolvedAgainst(registrationUrl),
r.headers.toList.collect {
case `Set-Cookie`(cookie) => cookie
}
)
}
authClaim = JwtClaim(expiration=Some(Instant.now.getEpochSecond+120))
authToken <- (keyManager ? KeyManager.SignJwtClaim(authClaim)).collect {
case KeyManager.SignedJwtTokens(token :: others) => token
}
websocketUri = redirectUri.copy(scheme = redirectUri.scheme match {
case "http" => "ws"
case "https" => "wss"
})
} yield {
val outbound = Http()(context.system).singleWebSocketRequest(
WebSocketRequest.fromTargetUri(websocketUri)
.copy(extraHeaders=
Authorization(OAuth2BearerToken(authToken)) ::
cookies.map(c => Cookie(c.pair))
),
Flow.fromSinkAndSourceMat(
inboundSink, outboundSource)(outboundActorRefExtractor))._2
self ! outbound
context.parent ! ClusterManager.GetClusters
}
setup.recover({
case e =>
log.error(e, "Setup failed")
context.stop(self)
})
}
val receive: Receive = {
case outbound: ActorRef if sender == self =>
unstashAll()
context.become(running(outbound))
context.watch(outbound)
case msg => stash()
}
def running(outbound: ActorRef): Receive = LoggingReceive {
// Inbound
case dit4c.protobuf.scheduler.inbound.RequestInstanceStateUpdate(instanceId, clusterId) =>
import dit4c.scheduler.domain._
context.parent ! service.ClusterManager.ClusterCommand(clusterId,
RktClusterManager.GetInstanceStatus(instanceId))
case dit4c.protobuf.scheduler.inbound.StartInstance(instanceId, clusterId, imageUrl, clusterAccessPasses) =>
import dit4c.scheduler.domain._
log.info(s"Instance $instanceId requested on $clusterId using $imageUrl, with access passes:\\n"+
clusterAccessPasses.map(_.toByteArray).map(Base64.getEncoder.encodeToString).mkString("\\n"))
context.parent ! service.ClusterManager.ClusterCommand(clusterId,
RktClusterManager.StartInstance(instanceId, imageUrl, portalUri))
case dit4c.protobuf.scheduler.inbound.SaveInstance(instanceId, clusterId, imageServer) =>
import dit4c.scheduler.domain._
import dit4c.scheduler.domain.{instance => i}
context.parent ! service.ClusterManager.ClusterCommand(clusterId,
RktClusterManager.InstanceEnvelope(instanceId,
Instance.Save(imageServer)))
case dit4c.protobuf.scheduler.inbound.DiscardInstance(instanceId, clusterId) =>
import dit4c.scheduler.domain._
import dit4c.scheduler.domain.{instance => i}
context.parent ! service.ClusterManager.ClusterCommand(clusterId,
RktClusterManager.InstanceEnvelope(instanceId, Instance.Discard))
case dit4c.protobuf.scheduler.inbound.ConfirmInstanceUpload(instanceId, clusterId) =>
import dit4c.scheduler.domain._
import dit4c.scheduler.domain.{instance => i}
context.parent ! service.ClusterManager.ClusterCommand(clusterId,
RktClusterManager.InstanceEnvelope(instanceId, Instance.ConfirmUpload))
case dit4c.protobuf.scheduler.inbound.SignedMessageForScheduler(msg) =>
log.info(s"Signed message received:\\n$msg")
context.actorOf(
Props(classOf[SignedMessageProcessor], keyManager, msg))
// Outbound
case Instance.StatusReport(Instance.Errored, data: Instance.ErrorData) =>
import dit4c.protobuf.scheduler.{outbound => pb}
val msg = pb.OutboundMessage(newMsgId, pb.OutboundMessage.Payload.InstanceStateUpdate(
pb.InstanceStateUpdate(data.instanceId, pb.InstanceStateUpdate.InstanceState.ERRORED,
data.errors.mkString("\\n\\n"), Some(pbTimestamp(Instant.now)))
))
outbound ! toBinaryMessage(msg.toByteArray)
case Instance.StatusReport(state, data: Instance.SomeData) =>
import dit4c.protobuf.scheduler.{outbound => pb}
val pbState = state match {
case Instance.JustCreated => pb.InstanceStateUpdate.InstanceState.CREATED
case Instance.WaitingForImage => pb.InstanceStateUpdate.InstanceState.CREATED
case Instance.Starting => pb.InstanceStateUpdate.InstanceState.STARTING
case Instance.Running => pb.InstanceStateUpdate.InstanceState.STARTED
case Instance.Stopping => pb.InstanceStateUpdate.InstanceState.STOPPING
case Instance.Exited => pb.InstanceStateUpdate.InstanceState.EXITED
case Instance.Saved => pb.InstanceStateUpdate.InstanceState.SAVED
case Instance.Saving => pb.InstanceStateUpdate.InstanceState.SAVING
case Instance.Uploading => pb.InstanceStateUpdate.InstanceState.UPLOADING
case Instance.Uploaded => pb.InstanceStateUpdate.InstanceState.UPLOADED
case Instance.Discarding => pb.InstanceStateUpdate.InstanceState.DISCARDING
case Instance.Discarded => pb.InstanceStateUpdate.InstanceState.DISCARDED
case Instance.Errored => pb.InstanceStateUpdate.InstanceState.ERRORED
}
val msg = pb.OutboundMessage(newMsgId, pb.OutboundMessage.Payload.InstanceStateUpdate(
pb.InstanceStateUpdate(data.instanceId, pbState, "", Some(pbTimestamp(Instant.now)))
))
outbound ! toBinaryMessage(msg.toByteArray)
data match {
case data: Instance.StartData =>
import dit4c.common.KeyHelpers._
data.keys.foreach { keys =>
val msg = pb.OutboundMessage(newMsgId, pb.OutboundMessage.Payload.AllocatedInstanceKey(
pb.AllocatedInstanceKey(data.instanceId, keys.armoredPgpPublicKeyBlock)))
outbound ! toBinaryMessage(msg.toByteArray)
}
case _ => // No need to do anything
}
case msg: Cluster.GetStateResponse =>
import dit4c.protobuf.scheduler.{outbound => pb}
Some(msg)
.collect {
case Cluster.Active(clusterId, displayName, supportsSave) =>
pb.ClusterStateUpdate(
clusterId,
pb.ClusterStateUpdate.ClusterState.ACTIVE,
displayName,
supportsSave,
Some(pbTimestamp(Instant.now)))
case Cluster.Inactive(clusterId, displayName) =>
pb.ClusterStateUpdate(
clusterId,
pb.ClusterStateUpdate.ClusterState.INACTIVE,
displayName,
false,
Some(pbTimestamp(Instant.now)))
}
.map { msg =>
pb.OutboundMessage(newMsgId,
pb.OutboundMessage.Payload.ClusterStateUpdate(msg))
}
.foreach { msg =>
outbound ! toBinaryMessage(msg.toByteArray)
}
case RktClusterManager.UnableToStartInstance(instanceId, reason) =>
import dit4c.protobuf.scheduler.{outbound => pb}
val msg = pb.OutboundMessage(newMsgId, pb.OutboundMessage.Payload.InstanceStateUpdate(
pb.InstanceStateUpdate(instanceId, pb.InstanceStateUpdate.InstanceState.ERRORED,
reason, Some(pbTimestamp(Instant.now)))
))
outbound ! toBinaryMessage(msg.toByteArray)
case RktClusterManager.UnknownInstance(instanceId) =>
import dit4c.protobuf.scheduler.{outbound => pb}
val msg = pb.OutboundMessage(newMsgId, pb.OutboundMessage.Payload.InstanceStateUpdate(
pb.InstanceStateUpdate(instanceId, pb.InstanceStateUpdate.InstanceState.UNKNOWN,
"", Some(pbTimestamp(Instant.now)))
))
outbound ! toBinaryMessage(msg.toByteArray)
case msg: dit4c.scheduler.api.AddNode if msg.sshHostKeyFingerprints.isEmpty =>
log.error(s"Received add node request with no fingerprints: $msg")
case dit4c.scheduler.api.AddNode(clusterId, host, port, username, sshHostKeyFingerprints) =>
val bestFingerprint =
(sshHostKeyFingerprints.filter(_.startsWith("SHA256:")) ++ sshHostKeyFingerprints).head
val id =
Seq(username, host, port.toString, bestFingerprint)
.map(URLEncoder.encode(_, "UTF-8"))
.mkString("_")
context.parent ! ClusterManager.ClusterCommand(
clusterId,
RktClusterManager.AddRktNode(
id,
host,
port,
username,
sshHostKeyFingerprints,
"/var/lib/dit4c-rkt"))
case dit4c.scheduler.api.CoolDownNodes(clusterId, sshHostKeyFingerprints) =>
context.parent ! ClusterManager.ClusterCommand(
clusterId,
RktClusterManager.CoolDownRktNodes(sshHostKeyFingerprints))
case dit4c.scheduler.api.DecommissionNodes(clusterId, sshHostKeyFingerprints) =>
context.parent ! ClusterManager.ClusterCommand(
clusterId,
RktClusterManager.DecommissionRktNodes(sshHostKeyFingerprints))
case PortalMessageBridge.BridgeClosed =>
log.info(s"bridge closed → terminating outbound actor")
outbound ! akka.actor.Status.Success(NotUsed)
case Terminated(ref) if ref == outbound =>
log.info(s"shutting down after outbound actor terminated")
context.stop(self)
}
/**
* 128-bit identifier as hexadecimal
*
* Intended to be long enough that it's globally unlikely to have a collision,
* but based on time so it can also be sorted.
*/
protected def newMsgId = {
val now = Instant.now
f"${now.getEpochSecond}%016x".takeRight(10) + // 40-bit epoch seconds
f"${now.getNano / 100}%06x" + // 24-bit 100 nanosecond slices
f"${Random.nextLong}%016x" // 64-bits of random
}
protected def toBinaryMessage(bs: Array[Byte]): BinaryMessage = BinaryMessage(ByteString(bs))
protected def pbTimestamp(t: Instant): com.google.protobuf.timestamp.Timestamp =
com.google.protobuf.timestamp.Timestamp(t.getEpochSecond, t.getNano)
protected lazy val portalUri: String = {
val scheme = Uri(registrationUrl).scheme match {
case "ws" => "http"
case "wss" => "https"
case other => other
}
Uri(registrationUrl).copy(scheme = scheme, path = Uri.Path.Empty, rawQueryString = None, fragment = None).toString
}
} | dit4c/dit4c | dit4c-scheduler/src/main/scala/dit4c/scheduler/service/PortalMessageBridge.scala | Scala | mit | 15,842 |
/**
* Created by Irina on 8/20/14.
*/
package org.scalatrain
case class JourneyPlanner(trains: Set[Train]) {
require(trains != null, "trains must not be null")
val stations: Set[Station] = trains flatMap { _.stations }
def trains(station: Station): Set[Train] = {
require(station != null, "station must not be null")
trains filter { _.stations contains station }
}
}
| onoprodum/myScalatrain | src/main/scala/JourneyPlanner.scala | Scala | gpl-2.0 | 388 |
/*
* Copyright 2010-2011 Vilius Normantas <[email protected]>
*
* This file is part of Crossbow library.
*
* Crossbow is free software: you can redistribute it and/or modify it under the terms of the GNU
* General Public License as published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* Crossbow is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
* even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with Crossbow. If not,
* see <http://www.gnu.org/licenses/>.
*/
package lt.norma.crossbow.indicators
import lt.norma.crossbow.core._
import lt.norma.crossbow.messages.BarClose
import lt.norma.crossbow.core.testutils.approx
import org.joda.time.DateTime
import org.scalatest.FunSuite
class EmaContinuousTest extends FunSuite {
class I(n: String) extends MutableIndicator[Double] {
def name = n
def dependencies = Empty
}
test("EmaContinuous indicator") {
val i1 = new I("A")
val i = new EmaContinuous(5, i1)
val l = new IndicatorList(i)
val e = 0.000005
expect("EMA_C(5; A)") {
i.name
}
expect(Set(i1)) {
i.dependencies
}
expect(None) {
i()
}
i1.set(0.5)
l.send(new BarClose(new DateTime))
expect(0.5) {
i.value
}
i1.set(1)
l.send(new BarClose(new DateTime))
approx(0.66667, e) {
i.value
}
i1.set(2)
l.send(new BarClose(new DateTime))
approx(1.11111, e) {
i.value
}
i1.set(1.5)
l.send(new BarClose(new DateTime))
approx(1.24074, e) {
i.value
}
i1.set(0.5)
l.send(new BarClose(new DateTime))
approx(0.99383, e) {
i.value
}
i1.set(6)
l.send(new BarClose(new DateTime))
approx(2.66255, e) {
i.value
}
i1.set(3)
l.send(new BarClose(new DateTime))
approx(2.77503, e) {
i.value
}
i1.set(2)
l.send(new BarClose(new DateTime))
approx(2.51669, e) {
i.value
}
i1.set(1)
l.send(new BarClose(new DateTime))
approx(2.01113, e) {
i.value
}
i1.set(-25)
l.send(new BarClose(new DateTime))
approx(-6.99258, e) {
i.value
}
i1.unset()
l.send(new BarClose(new DateTime))
approx(-6.99258, e) {
i.value
}
l.send(new BarClose(new DateTime))
approx(-6.99258, e) {
i.value
}
l.send(new BarClose(new DateTime))
approx(-6.99258, e) {
i.value
}
l.send(new BarClose(new DateTime))
approx(-6.99258, e) {
i.value
}
i1.set(10)
l.send(new BarClose(new DateTime))
approx(-1.32839, e) {
i.value
}
i1.set(100)
l.send(new BarClose(new DateTime))
approx(32.44774, e) {
i.value
}
i1.set(75)
l.send(new BarClose(new DateTime))
approx(46.63183, e) {
i.value
}
i1.set(-1)
l.send(new BarClose(new DateTime))
approx(30.75455, e) {
i.value
}
i1.set(200)
l.send(new BarClose(new DateTime))
approx(87.16970, e) {
i.value
}
i1.set(199)
l.send(new BarClose(new DateTime))
approx(124.44647, e) {
i.value
}
}
test("EmaContinuous indicator - invalid period") {
intercept[IllegalArgumentException] {
new EmaContinuous(0, new I("A"))
}
intercept[IllegalArgumentException] {
new EmaContinuous(-5, new I("A"))
}
}
}
| ViliusN/Crossbow | crossbow-core/test/lt/norma/crossbow/indicators/EmaContinuousTest.scala | Scala | gpl-3.0 | 3,592 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author Matthew Saltz, John Miller
* @version 1.3
* @date Thu Jul 25 11:28:31 EDT 2013
* @see LICENSE (MIT style license file).
*
* Graph Dual Simulation Using Immutable Sets
*/
package scalation.graphalytics
import scala.collection.immutable.{Set => SET}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSim2` class provides a second implementation for Dual Graph Simulation.
* It differs from `DualSim` by not using inverse adjacency sets ('pa') in
* order to save space.
* @param g the data graph G(V, E, l)
* @param q the query graph Q(U, D, k)
*/
class DualSim2 (g: Graph, q: Graph)
extends GraphMatcher (g, q)
{
private val DEBUG = true // debug flag
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Apply the Dual Graph Simulation pattern matching algorithm to find the mappings
* from the query graph 'q' to the data graph 'g'. These are represented by a
* multi-valued function 'phi' that maps each query graph vertex 'u' to a
* set of data graph vertices '{v}'.
*/
def mappings (): Array [SET [Int]] = saltzDualSim (feasibleMates ())
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given the mappings 'phi' produced by the 'feasibleMates' method,
* eliminate mappings 'u -> v' when (1) v's children fail to match u's
* or (2) v's parents fail to match u's.
* @param phi array of mappings from a query vertex u to { graph vertices v }
*/
def saltzDualSim (phi: Array [SET [Int]]): Array [SET [Int]] =
{
var alter = true
while (alter) { // check for matching children/parents
alter = false
for (u <- qRange; u_c <- q.ch(u)) { // for each u in q and its children u_c
if (DEBUG) { println (s"for u = $u, u_c = $u_c"); showMappings (phi) }
var newPhi = SET [Int] () // subset of phi(u_c) having a parent in phi(u)
for (v <- phi(u)) { // data vertex v matching u's label
val phiInt = g.ch(v) & phi(u_c) // children of v contained in phi(u_c)
if (phiInt.isEmpty) {
phi(u) -= v // remove vertex v from phi(u)
if (phi(u).isEmpty) return phi // no match for vertex u => no overall match
alter = true
} // if
// build newPhi to contain only those vertices in phi(u_c) which also have a parent in phi(u)
newPhi ++= phiInt
} // for
if (newPhi.isEmpty) return phi // empty newPhi => no match
if (newPhi.size < phi(u_c).size) alter = true // since newPhi is smaller than phi(u_c)
if (SELF_LOOPS && u_c == u) phi(u_c) &= newPhi else phi(u_c) = newPhi
} // for
} // while
phi
} // saltzDualSim
} // DualSim2 class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSim2Test` object is used to test the `DualSim2` class.
* > run-main scalation.graphalytics.DualSim2Test
*/
object DualSim2Test extends App
{
val g = Graph.g1
val q = Graph.q1
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim2 (g, q)).test ("DualSim2") // Dual Graph Simulation Pattern Matcher
} // DualSim2Test object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSim2Test2` object is used to test the `DualSim2` class.
* > run-main scalation.graphalytics.DualSim2Test2
*/
object DualSim2Test2 extends App
{
val g = Graph.g2
val q = Graph.q2
println (s"g.checkEdges = ${g.checkEdges}")
g.printG ()
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim2 (g, q)).test ("DualSim2") // Dual Graph Simulation Pattern Matcher
} // DualSim2Test2 object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DualSim2Test3` object is used to test the 'DualSim2' class.
* > run-main scalation.graphalytics.DualSim2Test3
*/
object DualSim2Test3 extends App
{
val gSize = 1000 // size of the data graph
val qSize = 10 // size of the query graph
val nLabels = 100 // number of distinct labels
val gAvDegree = 5 // average vertex out degree for data graph
val qAvDegree = 2 // average vertex out degree for query graph
val g = GraphGen.genRandomGraph (gSize, nLabels, gAvDegree, false, "g")
val q = GraphGen.genBFSQuery (qSize, qAvDegree, g, false, "q")
println (s"q.checkEdges = ${q.checkEdges}")
q.printG ()
(new DualSim2 (g, q)).test ("DualSim2") // Dual Graph Simulation Pattern Matcher
} // DualSim2Test3 object
| NBKlepp/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/graphalytics/DualSim2.scala | Scala | mit | 5,264 |
class X { def a = 5 }
trait Y extends X { override def a = 7 }
class Test extends X with Y {
override def a = { 7 + super.a }
def b = super[X].a
def c = super[Y].a
} | VladimirNik/tasty | exttests/tests/super1/Test.scala | Scala | bsd-3-clause | 171 |
/*
* Copyright (c) 2014. Webtrends (http://www.webtrends.com)
* @author cuthbertm on 11/20/14 12:16 PM
*/
package com.webtrends.harness.component.spotifyapi
import com.webtrends.harness.component.Component
class SpotifyAPIManager(name:String) extends Component(name) with SpotifyAPI {
/**
* We add super.receive because if you override the receive message from the component
* and then do not include super.receive it will not handle messages from the
* ComponentManager correctly and basically not start up properly
*
* @return
*/
override def receive = super.receive orElse {
case _ => "DO SOMETHING HERE"
}
/**
* Start function will start any child actors that will be managed by the ComponentManager
* @return
*/
override def start = {
startSpotifyAPI
super.start
}
} | Crashfreak/SpotifyAPI | src/main/scala/com/webtrends/harness/component/spotifyapi/SpotifyAPIManager.scala | Scala | apache-2.0 | 829 |
package org.tinydvr.service
import net.liftweb.json.Extraction.decompose
import net.liftweb.json._
import org.scalatra._
import org.slf4j.LoggerFactory
/**
* Adds some useful functionality on top of the ScalatraServlet get and post functions
*/
trait JsonAPIServlet extends ScalatraServlet {
val CONTENT_TYPE_JSON = "application/json"
private val logger = LoggerFactory.getLogger(getClass)
private implicit val formats = net.liftweb.json.DefaultFormats
override def get(transformers: RouteTransformer*)(body: => Any): Route = {
super.get(transformers: _*)(withJson { body })
}
override def post(transformers: RouteTransformer*)(body: => Any): Route = {
super.get(transformers: _*)(withJson { body })
}
def okResponse(result: AnyRef, msg: Option[String] = Some("OK")): Any = {
jsonResponse(200, msg, Some(result))
}
def halt400(msg: Option[String] = Some("Bad Request")): Nothing = {
halt(400, body = jsonResponse(400, msg, None))
}
def halt401(msg: Option[String] = Some("Unauthorized")): Nothing = {
halt(401, body = jsonResponse(401, msg, None))
}
def halt500(msg: Option[String] = Some("Internal Server Error")): Nothing = {
halt(500, body = jsonResponse(500, msg, None))
}
protected def jsonResponse(status: Int, msg: Option[String], result: Option[AnyRef] = None): Any = {
contentType = CONTENT_TYPE_JSON
compact(render(decompose(wrapResponse(status, msg, result))))
}
protected def wrapResponse(status: Int, msg: Option[String], result: Option[AnyRef] = None): JsonAPIResponse = {
JsonAPIResponse(
status,
msg,
result
)
}
protected def withJson[T <: Any](body: => T): Any = {
try {
val res = body
okResponse(res.asInstanceOf[AnyRef])
} catch {
case e: Exception => {
logger.error("Uncaught Exception in JsonServlet", e)
halt500()
}
}
}
}
//
// Types
//
case class JsonAPIResponse(status: Int, message: Option[String], data: Option[Any])
| lou-k/tinydvr | src/main/scala/org/tinydvr/service/JsonAPIServlet.scala | Scala | gpl-3.0 | 2,012 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.net.{InetSocketAddress, URL}
import javax.servlet.DispatcherType
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.language.implicitConversions
import scala.xml.Node
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.server.handler._
import org.eclipse.jetty.servlet._
import org.eclipse.jetty.util.thread.QueuedThreadPool
import org.json4s.JValue
import org.json4s.jackson.JsonMethods.{pretty, render}
import org.apache.spark.{Logging, SecurityManager, SparkConf}
import org.apache.spark.util.Utils
/**
* Utilities for launching a web server using Jetty's HTTP Server class
*/
private[spark] object JettyUtils extends Logging {
// Base type for a function that returns something based on an HTTP request. Allows for
// implicit conversion from many types of functions to jetty Handlers.
type Responder[T] = HttpServletRequest => T
class ServletParams[T <% AnyRef](val responder: Responder[T],
val contentType: String,
val extractFn: T => String = (in: Any) => in.toString) {}
// Conversions from various types of Responder's to appropriate servlet parameters
implicit def jsonResponderToServlet(responder: Responder[JValue]): ServletParams[JValue] =
new ServletParams(responder, "text/json", (in: JValue) => pretty(render(in)))
implicit def htmlResponderToServlet(responder: Responder[Seq[Node]]): ServletParams[Seq[Node]] =
new ServletParams(responder, "text/html", (in: Seq[Node]) => "<!DOCTYPE html>" + in.toString)
implicit def textResponderToServlet(responder: Responder[String]): ServletParams[String] =
new ServletParams(responder, "text/plain")
def createServlet[T <% AnyRef](
servletParams: ServletParams[T],
securityMgr: SecurityManager): HttpServlet = {
new HttpServlet {
override def doGet(request: HttpServletRequest, response: HttpServletResponse) {
try {
if (securityMgr.checkUIViewPermissions(request.getRemoteUser)) {
response.setContentType("%s;charset=utf-8".format(servletParams.contentType))
response.setStatus(HttpServletResponse.SC_OK)
val result = servletParams.responder(request)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
response.getWriter.println(servletParams.extractFn(result))
} else {
response.setStatus(HttpServletResponse.SC_UNAUTHORIZED)
response.setHeader("Cache-Control", "no-cache, no-store, must-revalidate")
response.sendError(HttpServletResponse.SC_UNAUTHORIZED,
"User is not authorized to access this page.")
}
} catch {
case e: IllegalArgumentException =>
response.sendError(HttpServletResponse.SC_BAD_REQUEST, e.getMessage)
case e: Exception =>
logWarning(s"GET ${request.getRequestURI} failed: $e", e)
throw e
}
}
// SPARK-5983 ensure TRACE is not supported
protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse): Unit = {
res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
}
/** Create a context handler that responds to a request with the given path prefix */
def createServletHandler[T <% AnyRef](
path: String,
servletParams: ServletParams[T],
securityMgr: SecurityManager,
basePath: String = ""): ServletContextHandler = {
createServletHandler(path, createServlet(servletParams, securityMgr), basePath)
}
/** Create a context handler that responds to a request with the given path prefix */
def createServletHandler(
path: String,
servlet: HttpServlet,
basePath: String): ServletContextHandler = {
val prefixedPath = attachPrefix(basePath, path)
val contextHandler = new ServletContextHandler
val holder = new ServletHolder(servlet)
contextHandler.setContextPath(prefixedPath)
contextHandler.addServlet(holder, "/")
contextHandler
}
/** Create a handler that always redirects the user to the given path */
def createRedirectHandler(
srcPath: String,
destPath: String,
beforeRedirect: HttpServletRequest => Unit = x => (),
basePath: String = "",
httpMethods: Set[String] = Set("GET")): ServletContextHandler = {
val prefixedDestPath = attachPrefix(basePath, destPath)
val servlet = new HttpServlet {
override def doGet(request: HttpServletRequest, response: HttpServletResponse): Unit = {
if (httpMethods.contains("GET")) {
doRequest(request, response)
} else {
response.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
override def doPost(request: HttpServletRequest, response: HttpServletResponse): Unit = {
if (httpMethods.contains("POST")) {
doRequest(request, response)
} else {
response.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
private def doRequest(request: HttpServletRequest, response: HttpServletResponse): Unit = {
beforeRedirect(request)
// Make sure we don't end up with "//" in the middle
val newUrl = new URL(new URL(request.getRequestURL.toString), prefixedDestPath).toString
response.sendRedirect(newUrl)
}
// SPARK-5983 ensure TRACE is not supported
protected override def doTrace(req: HttpServletRequest, res: HttpServletResponse): Unit = {
res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED)
}
}
createServletHandler(srcPath, servlet, basePath)
}
/** Create a handler for serving files from a static directory */
def createStaticHandler(resourceBase: String, path: String): ServletContextHandler = {
val contextHandler = new ServletContextHandler
contextHandler.setInitParameter("org.eclipse.jetty.servlet.Default.gzip", "false")
val staticHandler = new DefaultServlet
val holder = new ServletHolder(staticHandler)
Option(Utils.getSparkClassLoader.getResource(resourceBase)) match {
case Some(res) =>
holder.setInitParameter("resourceBase", res.toString)
case None =>
throw new Exception("Could not find resource path for Web UI: " + resourceBase)
}
contextHandler.setContextPath(path)
contextHandler.addServlet(holder, "/")
contextHandler
}
/** Add filters, if any, to the given list of ServletContextHandlers */
def addFilters(handlers: Seq[ServletContextHandler], conf: SparkConf) {
val filters: Array[String] = conf.get("spark.ui.filters", "").split(',').map(_.trim())
filters.foreach {
case filter : String =>
if (!filter.isEmpty) {
logInfo("Adding filter: " + filter)
val holder : FilterHolder = new FilterHolder()
holder.setClassName(filter)
// Get any parameters for each filter
conf.get("spark." + filter + ".params", "").split(',').map(_.trim()).toSet.foreach {
param: String =>
if (!param.isEmpty) {
val parts = param.split("=")
if (parts.length == 2) holder.setInitParameter(parts(0), parts(1))
}
}
val prefix = s"spark.$filter.param."
conf.getAll
.filter { case (k, v) => k.length() > prefix.length() && k.startsWith(prefix) }
.foreach { case (k, v) => holder.setInitParameter(k.substring(prefix.length()), v) }
val enumDispatcher = java.util.EnumSet.of(DispatcherType.ASYNC, DispatcherType.ERROR,
DispatcherType.FORWARD, DispatcherType.INCLUDE, DispatcherType.REQUEST)
handlers.foreach { case(handler) => handler.addFilter(holder, "/*", enumDispatcher) }
}
}
}
/**
* Attempt to start a Jetty server bound to the supplied hostName:port using the given
* context handlers.
*
* If the desired port number is contended, continues incrementing ports until a free port is
* found. Return the jetty Server object, the chosen port, and a mutable collection of handlers.
*/
def startJettyServer(
hostName: String,
port: Int,
handlers: Seq[ServletContextHandler],
conf: SparkConf,
serverName: String = ""): ServerInfo = {
addFilters(handlers, conf)
val collection = new ContextHandlerCollection
val gzipHandlers = handlers.map { h =>
val gzipHandler = new GzipHandler
gzipHandler.setHandler(h)
gzipHandler
}
collection.setHandlers(gzipHandlers.toArray)
// Bind to the given port, or throw a java.net.BindException if the port is occupied
def connect(currentPort: Int): (Server, Int) = {
val server = new Server(new InetSocketAddress(hostName, currentPort))
val pool = new QueuedThreadPool
pool.setDaemon(true)
server.setThreadPool(pool)
val errorHandler = new ErrorHandler()
errorHandler.setShowStacks(true)
server.addBean(errorHandler)
server.setHandler(collection)
try {
server.start()
(server, server.getConnectors.head.getLocalPort)
} catch {
case e: Exception =>
server.stop()
pool.stop()
throw e
}
}
val (server, boundPort) = Utils.startServiceOnPort[Server](port, connect, conf, serverName)
ServerInfo(server, boundPort, collection)
}
/** Attach a prefix to the given path, but avoid returning an empty path */
private def attachPrefix(basePath: String, relativePath: String): String = {
if (basePath == "") relativePath else (basePath + relativePath).stripSuffix("/")
}
}
private[spark] case class ServerInfo(
server: Server,
boundPort: Int,
rootHandler: ContextHandlerCollection)
| andrewor14/iolap | core/src/main/scala/org/apache/spark/ui/JettyUtils.scala | Scala | apache-2.0 | 10,592 |
package com.github.gtache.lsp.contributors
import com.github.gtache.lsp.contributors.psi.LSPPsiElement
import com.github.gtache.lsp.editor.EditorEventManager
import com.github.gtache.lsp.utils.FileUtils
import com.intellij.lang.documentation.DocumentationProvider
import com.intellij.openapi.diagnostic.Logger
import com.intellij.psi.{PsiElement, PsiFile, PsiManager}
/**
* A documentation provider for LSP (is called when CTRL is pushed while staying on a token)
*/
class LSPDocumentationProvider extends DocumentationProvider {
private val LOG: Logger = Logger.getInstance(classOf[LSPDocumentationProvider])
override def getUrlFor(element: PsiElement, originalElement: PsiElement): java.util.List[String] = {
null
}
override def getDocumentationElementForLookupItem(psiManager: PsiManager, obj: scala.Any, element: PsiElement): PsiElement = {
null
}
override def getDocumentationElementForLink(psiManager: PsiManager, link: String, context: PsiElement): PsiElement = {
null
}
override def generateDoc(element: PsiElement, originalElement: PsiElement): String = {
getQuickNavigateInfo(element, originalElement)
}
override def getQuickNavigateInfo(element: PsiElement, originalElement: PsiElement): String = {
element match {
case l: LSPPsiElement =>
EditorEventManager.forUri(FileUtils.VFSToURI(l.getContainingFile.getVirtualFile)).fold("")(m => m.requestDoc(m.editor, l.getTextOffset))
case p: PsiFile =>
val editor = FileUtils.editorFromPsiFile(p)
EditorEventManager.forEditor(editor).fold("")(m => m.requestDoc(editor, editor.getCaretModel.getCurrentCaret.getOffset))
case _ => ""
}
}
} | gtache/intellij-lsp | intellij-lsp/src/com/github/gtache/lsp/contributors/LSPDocumentationProvider.scala | Scala | apache-2.0 | 1,690 |
import scala.quoted.*
object Macro {
inline def ff: Unit = ${impl(Type.of[Int])}
def impl(t: Type[Int])(using Quotes): Expr[Unit] = '{}
}
| dotty-staging/dotty | tests/pos-macros/macro-with-type/Macro_1.scala | Scala | apache-2.0 | 142 |
object Test {
def f[X] = ()
f[List]
// Checking edge cases that should not compile with kind-polymorphism
trait X[A <: AnyKind, F[_ <: AnyKind]] { type B = F[A] }
val i0: X[Option, Double]#B = Some(5) // error
val i1: X[Option, List]#B = Some(5) // error
val i2: X[Option[Double], List]#B = Some(5) // error
val i3: X[Option[Double], ({ type l[X[_]] = X[Int] })#l]#B = Some(5) // error
val i4: X[Double, ({ type l[X[_]] = X[Int] })#l]#B = 5.0 // error
val i6: X[Either, ({ type l[X[_]] = X[Int] })#l]#B = Some(5) // error
val i7: X[Either, List]#B = Some(5) // error
trait Foo[A[_[_]]]
val i8: X[Foo, ({ type l[X[_]] = X[Int] })#l]#B = Some(5) // error
trait X2[A <: AnyKind, B <: AnyKind] { def run[F[_ <: AnyKind]]: F[A] => F[B] }
val x21 = {
new X2[Int, Int] { def run[F[_]]: F[Int] => F[Int] = identity[F[Int]] }
.asInstanceOf[X2[List, Option[Double]]].run[({ type l[X[_]] = X[Int] })#l]
}
val x22 = {
new X2[Int, Int] { def run[F[_]]: F[Int] => F[Int] = identity[F[Int]] }
.asInstanceOf[X2[List, Option[Double]]].run[List]
}
trait X3[A <: AnyKind, B <: AnyKind, C <: AnyKind] { def run[F[_ <: AnyKind, _ <: AnyKind]]: F[A, C] => F[B, C] }
val x31 = {
new X3[Int, Int, String] { def run[F[_, _]]: F[Int, String] => F[Int, String] = identity[F[Int, String]] }
.asInstanceOf[X3[Option[Double], List, String]].run[Map]
}
val x32 = {
new X3[Int, Int, String] { def run[F[_, _]]: F[Int, String] => F[Int, String] = identity[F[Int, String]] }
.asInstanceOf[X3[List, Option[Double], String]].run[Map]
}
trait X4[A <: AnyKind, B <: AnyKind, C] { def run[F[_ <: AnyKind, _]]: F[A, C] => F[B, C] }
trait Foo2[A]
trait Bar[A]
trait Bar2[A, B]
trait Toto[F[_], A]
val x41 = {
new X3[Foo2, Foo2, Int] { def run[F[_[_], A]]: F[Foo2, Int] => F[Foo2, Int] = identity[F[Foo2, Int]] }
.asInstanceOf[X3[Bar, Bar, Int]].run[Bar2]
}
}
| som-snytt/dotty | tests/neg/anykind3.scala | Scala | apache-2.0 | 1,949 |
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.xml
import com.ctc.wstx.stax.WstxInputFactory
import org.codehaus.stax2.XMLStreamReader2
import org.scalatest.{FunSuite, Matchers}
import java.io.StringReader
import javax.xml.stream.{XMLInputFactory, XMLStreamException}
import javax.xml.stream.XMLStreamConstants.START_ELEMENT
import RichXMLStreamReader2.toRichXMLStreamReader2
final class TestRichXMLStreamReader2 extends FunSuite with Matchers {
test("seekToRootElement()") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement("root")
sr.getDepth should equal (1)
sr.getEventType should equal (START_ELEMENT)
sr.getLocalName() should equal ("root")
}
test("seekToSiblingElement - Exception") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
intercept[XMLStreamException] { sr.seekToSiblingElement("header") }
}
test("seekToChildElement()") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
sr.seekToChildElement("items")
}
test("seekToChildElement() - Exception") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
intercept[XMLStreamException] { sr.seekToChildElement("items_foo") }
}
test("seekToNextSiblingElement()") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
sr.seekToChildElement()
sr.getLocalName() should equal ("header")
sr.seekToSiblingElement()
sr.getLocalName() should equal ("items")
sr.seekToSiblingElement()
sr.getLocalName() should equal ("trailer")
intercept[XMLStreamException] { sr.seekToSiblingElement() }
}
test("Simple Document Traversing") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
sr.seekToChildElement("items")
sr.seekToChildElement("item")
sr.readChildElementText("name") should equal ("Item 1 Name")
sr.seekToSiblingElement("item")
sr.seekToEndOfParentElement()
sr.seekToSiblingElement("trailer")
sr.seekToChildElement("name")
sr.readElementText() should equal ("Trailer Name")
}
test("foreach - root/items/item") {
val sr: XMLStreamReader2 = createSR()
val builder = Vector.newBuilder[String]
sr.foreach("root/items/item") {
builder += sr.readChildElementText("name")
}
builder.result should equal (Vector("Item 1 Name", "Item 2 Name"))
}
test("foreach - root/items/item/name") {
val sr: XMLStreamReader2 = createSR()
val builder = Vector.newBuilder[String]
sr.foreach("root/items/item/name") {
builder += sr.readElementText()
}
builder.result should equal (Vector("Item 1 Name", "Item 2 Name"))
}
test("readElementAsXMLString - 1") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
sr.seekToChildElement("items")
sr.readElementAsXMLString should equal (
"""<items>
| <item idx="1">
| <name>Item 1 Name</name>
| </item>
| <item idx="2">
| <name>Item 2 Name</name>
| </item>
| <items foo="bar">
| <item idx="1">
| <name>Sub Item 1 name</name>
| </item>
| </items>
| </items>""".stripMargin)
sr.seekToSiblingElement()
sr.getLocalName() should equal ("trailer")
}
test("readElementAsXMLString - 2") {
val sr: XMLStreamReader2 = createSR()
sr.seekToRootElement()
sr.seekToChildElement("items") // The outer <items> element
sr.seekToChildElement("items") // The inner nested <items> element
sr.readElementAsXMLString should equal (
"""<items foo="bar">
| <item idx="1">
| <name>Sub Item 1 name</name>
| </item>
| </items>""".stripMargin)
sr.seekToEndOfParentElement() // Gets us back to the outer <items> element
sr.seekToSiblingElement() // Should advance us to the <trailer> element
sr.getLocalName() should equal ("trailer")
}
private def createSR(): XMLStreamReader2 = {
val inputFactory: WstxInputFactory = new WstxInputFactory()
inputFactory.setProperty(XMLInputFactory.SUPPORT_DTD, false)
inputFactory.configureForSpeed()
inputFactory.createXMLStreamReader(new StringReader(xml)).asInstanceOf[XMLStreamReader2]
}
val xml = """
<?xml version='1.0' encoding='UTF-8'?>
<root>
<header>
<name>Header Name</name>
</header>
<items>
<item idx="1">
<name>Item 1 Name</name>
</item>
<item idx="2">
<name>Item 2 Name</name>
</item>
<items foo="bar">
<item idx="1">
<name>Sub Item 1 name</name>
</item>
</items>
</items>
<trailer>
<name>Trailer Name</name>
</trailer>
</root>
""".trim
} | frugalmechanic/fm-xml | src/test/scala/fm/xml/TestRichXMLStreamReader2.scala | Scala | apache-2.0 | 5,337 |
import org.scalameter.{Bench, Gen}
object ComparisonSpec extends Bench.OfflineReport with SmallDocSpec with NestedDocSpec with MultiFieldDocSpec {
override lazy val scale: Gen[Int] = Gen.range("scale")(0, 1000, 20)
}
| evojam/mongo-drivers-benchmarks | test/ComparisonSpec.scala | Scala | apache-2.0 | 222 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.catalogs.doperations.exceptions
import io.deepsense.deeplang.DOperation
case class DOperationNotFoundException(operationId: DOperation.Id)
extends DOperationsCatalogException(s"DOperation not found: $operationId")
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/catalogs/doperations/exceptions/DOperationNotFoundException.scala | Scala | apache-2.0 | 847 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.data
import com.google.common.cache.{CacheBuilder, CacheLoader}
import com.typesafe.scalalogging.slf4j.Logging
import org.geotools.data._
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureIterator, SimpleFeatureSource}
import org.geotools.feature.visitor.{BoundsVisitor, MaxVisitor, MinVisitor}
import org.locationtech.geomesa.core.process.knn.KNNVisitor
import org.locationtech.geomesa.core.process.proximity.ProximityVisitor
import org.locationtech.geomesa.core.process.query.QueryVisitor
import org.locationtech.geomesa.core.process.tube.TubeVisitor
import org.locationtech.geomesa.core.process.unique.AttributeVisitor
import org.locationtech.geomesa.core.util.{SelfClosingIterator, TryLoggingFailure}
import org.opengis.feature.FeatureVisitor
import org.opengis.feature.`type`.Name
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import org.opengis.filter.sort.SortBy
import org.opengis.util.ProgressListener
trait AccumuloAbstractFeatureSource extends AbstractFeatureSource with Logging with TryLoggingFailure {
self =>
import org.locationtech.geomesa.utils.geotools.Conversions._
val dataStore: AccumuloDataStore
val featureName: Name
def addFeatureListener(listener: FeatureListener) {}
def removeFeatureListener(listener: FeatureListener) {}
def getSchema: SimpleFeatureType = getDataStore.getSchema(featureName)
def getDataStore: AccumuloDataStore = dataStore
override def getCount(query: Query) = getFeaturesNoCache(query).features().size
override def getQueryCapabilities =
new QueryCapabilities() {
override def isOffsetSupported = false
override def isReliableFIDSupported = true
override def isUseProvidedFIDSupported = true
override def supportsSorting(sortAttributes: Array[SortBy]) = true
}
protected def getFeaturesNoCache(query: Query): SimpleFeatureCollection = {
AccumuloDataStore.setQueryTransforms(query, getSchema)
new AccumuloFeatureCollection(self, query)
}
override def getFeatures(query: Query): SimpleFeatureCollection =
tryLoggingFailures(getFeaturesNoCache(query))
override def getFeatures(filter: Filter): SimpleFeatureCollection =
getFeatures(new Query(getSchema().getTypeName, filter))
}
class AccumuloFeatureSource(val dataStore: AccumuloDataStore, val featureName: Name)
extends AccumuloAbstractFeatureSource
class AccumuloFeatureCollection(source: SimpleFeatureSource, query: Query)
extends DefaultFeatureResults(source, query) {
val ds = source.getDataStore.asInstanceOf[AccumuloDataStore]
override def getSchema: SimpleFeatureType =
if(query.getHints.containsKey(TRANSFORMS)) query.getHints.get(TRANSFORM_SCHEMA).asInstanceOf[SimpleFeatureType]
else super.getSchema
override def accepts(visitor: FeatureVisitor, progress: ProgressListener) =
visitor match {
// TODO GEOMESA-421 implement min/max iterators
case v: MinVisitor => v.setValue(ds.getTimeBounds(query.getTypeName).getStart.toDate)
case v: MaxVisitor => v.setValue(ds.getTimeBounds(query.getTypeName).getEnd.toDate)
case v: BoundsVisitor => v.reset(ds.getBounds(query))
case v: TubeVisitor => v.setValue(v.tubeSelect(source, query))
case v: ProximityVisitor => v.setValue(v.proximitySearch(source, query))
case v: QueryVisitor => v.setValue(v.query(source, query))
case v: KNNVisitor => v.setValue(v.kNNSearch(source,query))
case v: AttributeVisitor => v.setValue(v.unique(source, query))
case _ => super.accepts(visitor, progress)
}
override def reader(): FeatureReader[SimpleFeatureType, SimpleFeature] = super.reader()
}
class CachingAccumuloFeatureCollection(source: SimpleFeatureSource, query: Query)
extends AccumuloFeatureCollection(source, query) {
lazy val featureList = {
// use ListBuffer for constant append time and size
val buf = scala.collection.mutable.ListBuffer.empty[SimpleFeature]
val iter = super.features
while (iter.hasNext) {
buf.append(iter.next())
}
iter.close()
buf
}
override def features = new SimpleFeatureIterator() {
private val iter = featureList.iterator
override def hasNext = iter.hasNext
override def next = iter.next
override def close = {}
}
override def size = featureList.length
}
trait CachingFeatureSource extends AccumuloAbstractFeatureSource {
self: AccumuloAbstractFeatureSource =>
private val featureCache =
CacheBuilder.newBuilder().build(
new CacheLoader[Query, SimpleFeatureCollection] {
override def load(query: Query): SimpleFeatureCollection =
new CachingAccumuloFeatureCollection(self, query)
})
override def getFeatures(query: Query): SimpleFeatureCollection = {
// geotools bug in Query.hashCode
if (query.getStartIndex == null) {
query.setStartIndex(0)
}
featureCache.get(query)
}
override def getCount(query: Query): Int = getFeatures(query).size()
}
| kevinwheeler/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/data/AccumuloFeatureSource.scala | Scala | apache-2.0 | 5,708 |
package notification.controllers
import java.util.UUID
import authentication.AuthAction
import com.amazonaws.services.cloudwatch.model.StandardUnit
import metrics.{CloudWatchMetrics, MetricDataPoint}
import models.{TopicTypes, _}
import notification.models.PushResult
import notification.services
import notification.services.{ArticlePurge, Configuration, NewsstandSender, NotificationSender}
import org.joda.time.{DateTime, DateTimeZone}
import org.slf4j.{Logger, LoggerFactory}
import play.api.libs.json.Json.toJson
import play.api.mvc._
import tracking.Repository.RepositoryResult
import tracking.SentNotificationReportRepository
import scala.concurrent.{ExecutionContext, Future}
import scala.util.control.NonFatal
final class Main(
configuration: Configuration,
notificationSender: NotificationSender,
newsstandSender: NewsstandSender,
notificationReportRepository: SentNotificationReportRepository,
articlePurge: ArticlePurge,
metrics: CloudWatchMetrics,
controllerComponents: ControllerComponents,
authAction: AuthAction
)(implicit executionContext: ExecutionContext)
extends AbstractController(controllerComponents) {
private val logger: Logger = LoggerFactory.getLogger(this.getClass)
val weekendReadingTopic = Topic(TopicTypes.TagSeries, "membership/series/weekend-reading")
val weekendRoundUpTopic = Topic(TopicTypes.TagSeries, "membership/series/weekend-round-up")
def healthCheck = Action {
Ok("Good")
}
def pushNewsstand: Action[AnyContent] = authAction.async { request =>
if(request.isPermittedTopicType(TopicTypes.Newsstand)){
val id = UUID.randomUUID()
newsstandSender.sendNotification(id) map { _ =>
logger.info("Newsstand notification sent")
metrics.send(MetricDataPoint(name = "SuccessfulNewstandSend", value = 1, unit = StandardUnit.Count))
Created(toJson(PushResult(id)))
} recover {
case NonFatal(error) =>
logger.error(s"Newsstand notification failed: $error")
metrics.send(MetricDataPoint(name = "SuccessfulNewstandSend", value = 0, unit = StandardUnit.Count))
InternalServerError(s"Newsstand notification failed: $error")
}
}
else {
Future.successful(Unauthorized(s"This API key is not valid for ${TopicTypes.Newsstand}."))
}
}
def pushTopics: Action[Notification] = authAction.async(parse.json[Notification]) { request =>
val startTime = System.currentTimeMillis()
val notification = request.body
val topics = notification.topic
val MaxTopics = 20
(topics.size match {
case 0 => Future.successful(BadRequest("Empty topic list"))
case a: Int if a > MaxTopics => Future.successful(BadRequest(s"Too many topics, maximum: $MaxTopics"))
case _ if !topics.forall{topic => request.isPermittedTopicType(topic.`type`)} =>
Future.successful(Unauthorized(s"This API key is not valid for ${topics.filterNot(topic => request.isPermittedTopicType(topic.`type`))}."))
case _ =>
val result = pushWithDuplicateProtection(notification)
result.foreach(_ => logger.info(s"Spent ${System.currentTimeMillis() - startTime} milliseconds processing notification ${notification.id}"))
result
}) recoverWith {
case NonFatal(exception) => {
logger.warn(s"Pushing notification failed: $notification", exception)
Future.successful(InternalServerError)
}
}
}
private def pushWithDuplicateProtection(notification: Notification): Future[Result] = {
val isDuplicate = notificationReportRepository.getByUuid(notification.id).map(_.isRight)
isDuplicate.flatMap {
case true => Future.successful(BadRequest(s"${notification.id} has been sent before - refusing to resend"))
case false => pushGeneric(notification)
}
}
private def pushGeneric(notification: Notification) = {
prepareReportAndSendPush(notification) flatMap {
case Right(report) =>
reportPushSent(notification, List(report)) map {
case Right(_) =>
logger.info(s"Notification was sent: $notification")
Created(toJson(PushResult(notification.id)))
case Left(error) =>
logger.error(s"Notification ($notification) sent but report could not be stored ($error)")
Created(toJson(PushResult(notification.id).withReportingError(error)))
}
case Left(error) =>
logger.error(s"Notification ($notification) could not be sent: $error")
Future.successful(InternalServerError)
}
}
private def decacheArticle(notification: Notification): Future[Unit] = {
articlePurge.purgeFromNotification(notification)
.map(_ => ())
.recover {
case NonFatal(e) =>
logger.warn(s"Unable to decache article for notification ${notification.id}", e)
()
}
}
private def prepareReportAndSendPush(notification: Notification): Future[Either[services.SenderError, SenderReport]] = {
val notificationReport = NotificationReport.create(notification.id, notification.`type`, notification, DateTime.now(DateTimeZone.UTC), List(), None)
for {
initialEmptyNotificationReport <- notificationReportRepository.store(notificationReport)
_ <- decacheArticle(notification)
sentPush <- initialEmptyNotificationReport match {
case Left(error) => Future.failed(new Exception(error.message))
case Right(_) => notificationSender.sendNotification(notification)
}
} yield sentPush
}
private def reportPushSent(notification: Notification, reports: List[SenderReport]): Future[RepositoryResult[Unit]] =
notificationReportRepository.update(NotificationReport.create(notification, reports))
}
| guardian/mobile-n10n | notification/app/notification/controllers/Main.scala | Scala | apache-2.0 | 5,714 |
package gitbucket.core.servlet
import java.io.File
import java.sql.{DriverManager, Connection}
import gitbucket.core.plugin.PluginRegistry
import gitbucket.core.service.SystemSettingsService
import gitbucket.core.util._
import org.apache.commons.io.FileUtils
import javax.servlet.{ServletContextListener, ServletContextEvent}
import org.slf4j.LoggerFactory
import Directory._
import ControlUtil._
import JDBCUtil._
import org.eclipse.jgit.api.Git
import gitbucket.core.util.Versions
import gitbucket.core.util.Directory
object AutoUpdate {
/**
* The history of versions. A head of this sequence is the current BitBucket version.
*/
val versions = Seq(
new Version(3, 7) with SystemSettingsService {
override def update(conn: Connection, cl: ClassLoader): Unit = {
super.update(conn, cl)
val settings = loadSystemSettings()
if(settings.notification){
saveSystemSettings(settings.copy(useSMTP = true))
}
}
},
new Version(3, 6),
new Version(3, 5),
new Version(3, 4),
new Version(3, 3),
new Version(3, 2),
new Version(3, 1),
new Version(3, 0),
new Version(2, 8),
new Version(2, 7) {
override def update(conn: Connection, cl: ClassLoader): Unit = {
super.update(conn, cl)
conn.select("SELECT * FROM REPOSITORY"){ rs =>
// Rename attached files directory from /issues to /comments
val userName = rs.getString("USER_NAME")
val repoName = rs.getString("REPOSITORY_NAME")
defining(Directory.getAttachedDir(userName, repoName)){ newDir =>
val oldDir = new File(newDir.getParentFile, "issues")
if(oldDir.exists && oldDir.isDirectory){
oldDir.renameTo(newDir)
}
}
// Update ORIGIN_USER_NAME and ORIGIN_REPOSITORY_NAME if it does not exist
val originalUserName = rs.getString("ORIGIN_USER_NAME")
val originalRepoName = rs.getString("ORIGIN_REPOSITORY_NAME")
if(originalUserName != null && originalRepoName != null){
if(conn.selectInt("SELECT COUNT(*) FROM REPOSITORY WHERE USER_NAME = ? AND REPOSITORY_NAME = ?",
originalUserName, originalRepoName) == 0){
conn.update("UPDATE REPOSITORY SET ORIGIN_USER_NAME = NULL, ORIGIN_REPOSITORY_NAME = NULL " +
"WHERE USER_NAME = ? AND REPOSITORY_NAME = ?", userName, repoName)
}
}
// Update PARENT_USER_NAME and PARENT_REPOSITORY_NAME if it does not exist
val parentUserName = rs.getString("PARENT_USER_NAME")
val parentRepoName = rs.getString("PARENT_REPOSITORY_NAME")
if(parentUserName != null && parentRepoName != null){
if(conn.selectInt("SELECT COUNT(*) FROM REPOSITORY WHERE USER_NAME = ? AND REPOSITORY_NAME = ?",
parentUserName, parentRepoName) == 0){
conn.update("UPDATE REPOSITORY SET PARENT_USER_NAME = NULL, PARENT_REPOSITORY_NAME = NULL " +
"WHERE USER_NAME = ? AND REPOSITORY_NAME = ?", userName, repoName)
}
}
}
}
},
new Version(2, 6),
new Version(2, 5),
new Version(2, 4),
new Version(2, 3) {
override def update(conn: Connection, cl: ClassLoader): Unit = {
super.update(conn, cl)
conn.select("SELECT ACTIVITY_ID, ADDITIONAL_INFO FROM ACTIVITY WHERE ACTIVITY_TYPE='push'"){ rs =>
val curInfo = rs.getString("ADDITIONAL_INFO")
val newInfo = curInfo.split("\\n").filter(_ matches "^[0-9a-z]{40}:.*").mkString("\\n")
if (curInfo != newInfo) {
conn.update("UPDATE ACTIVITY SET ADDITIONAL_INFO = ? WHERE ACTIVITY_ID = ?", newInfo, rs.getInt("ACTIVITY_ID"))
}
}
ignore {
FileUtils.deleteDirectory(Directory.getPluginCacheDir())
//FileUtils.deleteDirectory(new File(Directory.PluginHome))
}
}
},
new Version(2, 2),
new Version(2, 1),
new Version(2, 0){
override def update(conn: Connection, cl: ClassLoader): Unit = {
import eu.medsea.mimeutil.{MimeUtil2, MimeType}
val mimeUtil = new MimeUtil2()
mimeUtil.registerMimeDetector("eu.medsea.mimeutil.detector.MagicMimeMimeDetector")
super.update(conn, cl)
conn.select("SELECT USER_NAME, REPOSITORY_NAME FROM REPOSITORY"){ rs =>
defining(Directory.getAttachedDir(rs.getString("USER_NAME"), rs.getString("REPOSITORY_NAME"))){ dir =>
if(dir.exists && dir.isDirectory){
dir.listFiles.foreach { file =>
if(file.getName.indexOf('.') < 0){
val mimeType = MimeUtil2.getMostSpecificMimeType(mimeUtil.getMimeTypes(file, new MimeType("application/octet-stream"))).toString
if(mimeType.startsWith("image/")){
file.renameTo(new File(file.getParent, file.getName + "." + mimeType.split("/")(1)))
}
}
}
}
}
}
}
},
Version(1, 13),
Version(1, 12),
Version(1, 11),
Version(1, 10),
Version(1, 9),
Version(1, 8),
Version(1, 7),
Version(1, 6),
Version(1, 5),
Version(1, 4),
new Version(1, 3){
override def update(conn: Connection, cl: ClassLoader): Unit = {
super.update(conn, cl)
// Fix wiki repository configuration
conn.select("SELECT USER_NAME, REPOSITORY_NAME FROM REPOSITORY"){ rs =>
using(Git.open(getWikiRepositoryDir(rs.getString("USER_NAME"), rs.getString("REPOSITORY_NAME")))){ git =>
defining(git.getRepository.getConfig){ config =>
if(!config.getBoolean("http", "receivepack", false)){
config.setBoolean("http", null, "receivepack", true)
config.save
}
}
}
}
}
},
Version(1, 2),
Version(1, 1),
Version(1, 0),
Version(0, 0)
)
/**
* The head version of BitBucket.
*/
val headVersion = versions.head
/**
* The version file (GITBUCKET_HOME/version).
*/
lazy val versionFile = new File(GitBucketHome, "version")
/**
* Returns the current version from the version file.
*/
def getCurrentVersion(): Version = {
if(versionFile.exists){
FileUtils.readFileToString(versionFile, "UTF-8").trim.split("\\\\.") match {
case Array(majorVersion, minorVersion) => {
versions.find { v =>
v.majorVersion == majorVersion.toInt && v.minorVersion == minorVersion.toInt
}.getOrElse(Version(0, 0))
}
case _ => Version(0, 0)
}
} else Version(0, 0)
}
}
| lefou/gitbucket | src/main/scala/gitbucket/core/servlet/AutoUpdate.scala | Scala | apache-2.0 | 6,711 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.message.client
case class PreparedStatementMessage ( statement : String, values : Seq[Any])
extends ClientMessage( ClientMessage.PreparedStatement ) | ilangostl/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/message/client/PreparedStatementMessage.scala | Scala | apache-2.0 | 832 |
package com.cloudray.scalapress.plugin.compatibility
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
import com.cloudray.scalapress.item.{Item, ItemDao}
import com.cloudray.scalapress.folder.{Folder, FolderDao}
import org.mockito.Mockito
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
class ECRedirectControllerTest extends FunSuite with MockitoSugar with OneInstancePerTest {
val context = new ScalapressContext
context.itemDao = mock[ItemDao]
context.folderDao = mock[FolderDao]
val controller = new ECRedirectController(context)
val obj = new Item
obj.id = 14
obj.name = "big man tshirts"
Mockito.when(context.itemDao.find(14)).thenReturn(obj)
val f = new Folder
f.id = 28
f.name = "big man tshirts"
Mockito.when(context.folderDao.find(28)).thenReturn(f)
test("category.do redirects to folder page") {
val url = controller.category(28)
assert("redirect:/folder-28-big-man-tshirts" === url)
}
test("item.do redirects to item page") {
val url = controller.item(14)
assert("redirect:/item-14-big-man-tshirts" === url)
}
test("invalid category id redirects to home page") {
val url = controller.category(645)
assert("redirect:/" === url)
}
test("invalid item id redirects to home page") {
val url = controller.item(97)
assert("redirect:/" === url)
}
}
| vidyacraghav/scalapress | src/test/scala/com/cloudray/scalapress/plugin/compatibility/ECRedirectControllerTest.scala | Scala | apache-2.0 | 1,417 |
package mesosphere.marathon.core.appinfo
import mesosphere.marathon.state.AppDefinition
trait AppSelector {
def matches(app: AppDefinition): Boolean
}
object AppSelector {
def apply(matchesFunc: AppDefinition => Boolean): AppSelector = new AppSelector {
override def matches(app: AppDefinition): Boolean = matchesFunc(app)
}
def all: AppSelector = AppSelector(_ => true)
def forall(selectors: Iterable[AppSelector]): AppSelector = new AllAppSelectorsMustMatch(selectors)
private[appinfo] class AllAppSelectorsMustMatch(selectors: Iterable[AppSelector]) extends AppSelector {
override def matches(app: AppDefinition): Boolean = selectors.forall(_.matches(app))
}
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/appinfo/AppSelector.scala | Scala | apache-2.0 | 692 |
import java.io._
import java.util.zip._
class C {
def isWrapper(is: FileInputStream): InputStream = {
val pb = new PushbackInputStream(is, 2)
val signature = new Array[Byte](2)
pb.read(signature)
pb.unread(signature)
if (signature.sameElements(Array(0x1F, 0x8B))) {
new GZIPInputStream(new BufferedInputStream(pb))
} else {
pb
}
}
}
| shimib/scala | test/files/neg/t9636.scala | Scala | bsd-3-clause | 379 |
/*
* Copyright (c) 2011, Daniel Spiewak
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
* - Neither the name of "Anti-XML" nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.codecommit.antixml
import java.util.Scanner
import org.specs2.execute.Pending
import org.specs2.mutable._
class XMLSpecs extends Specification {
import XML._
"xml parsing" should {
"parse an empty elem" in {
fromString("<test/>") mustEqual elem("test")
}
"parse an elem with text" in {
fromString("<test>This is a test</test>") mustEqual elem("test", Text("This is a test"))
}
"parse an elem with sub-elements" in {
fromString("<test><sub1/><sub2/></test>") mustEqual elem("test", elem("sub1"), elem("sub2"))
}
"parse a deeply-nested structure" in {
fromString("<test><sub1><subsub1><subsubsub1/><subsubsub2/></subsub1></sub1><sub2/><sub3><subsub1/></sub3></test>") mustEqual elem("test", elem("sub1", elem("subsub1", elem("subsubsub1"), elem("subsubsub2"))), elem("sub2"), elem("sub3", elem("subsub1")))
}
"parse mixed content" in {
fromString("<test>This is a <inner-test/> of great glory!</test>") mustEqual elem("test", Text("This is a "), elem("inner-test"), Text(" of great glory!"))
}
"preserve whitespace" in {
fromString("<test>\\n \\n\\t\\n</test>") mustEqual elem("test", Text("\\n \\n\\t\\n"))
}
"preserve prefixes" in {
val ns = "urn:my-urn:quux";
fromString("<my:test xmlns:my='urn:my-urn:quux'/>") mustEqual Elem(Some("my"), "test", Attributes(), Map("my" -> ns), Group[Node]())
}
"parse prefixes" in {
fromString("<my:test xmlns:my='urn:my-urn:quux'></my:test>").name mustEqual "test"
}
}
"fromSource" should {
import scala.io.Source
"match the semantics of fromString" in {
val str = "<test><sub1><subsub1><subsubsub1/><subsubsub2/></subsub1></sub1><sub2/><sub3><subsub1/></sub3></test>"
fromSource(Source fromString str) mustEqual fromString(str)
}
"load large files without difficulty" in {
val is = getClass.getResourceAsStream("/discogs_20110201_labels.xml")
fromSource(Source fromInputStream is) must not(throwA[StackOverflowError])
}
}
"fromString" should {
"parse a complex document without stack overflow" in {
val stream = getClass.getResourceAsStream("/jira-rss-derby-project.xml")
val string = new Scanner(stream, "UTF-8").useDelimiter("\\\\A").next
XML.fromString(string) must not(throwA[StackOverflowError])
}
}
def elem(name: QName, children: Node*) = Elem(name.prefix, name.name, Attributes(), Map(), Group(children: _*))
}
| djspiewak/anti-xml | src/test/scala/com/codecommit/antixml/XMLSpecs.scala | Scala | bsd-3-clause | 4,084 |
package io.github.dmitrib.elasticsearch.cli
import com.beust.jcommander.{Parameter, Parameters}
import java.util
import org.elasticsearch.index.query.{QueryBuilder, FilterBuilders, QueryBuilders}
import org.elasticsearch.action.search.SearchType
import org.elasticsearch.common.unit.TimeValue
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import io.github.dmitrib.elasticsearch.cli.EsTool._
trait ScanCommandParams extends {
@Parameter(names = Array("--query"), description = "Search query in Lucene syntax")
var query: String = _
def queryBuilder = {
val q: QueryBuilder = Option(query)
.map(QueryBuilders.queryString)
.getOrElse(QueryBuilders.matchAllQuery())
Option(scriptFilter).fold(q) { script =>
QueryBuilders.filteredQuery(q, FilterBuilders.scriptFilter(script))
}
}
@Parameter(
names = Array("--hits-per-shard"),
description = "Number of hits to extract in each iteration from each shard")
var _hitsPerShard: Integer = _
lazy val hitsPerShard = Option(_hitsPerShard).fold {
val resp = client.admin().indices().prepareStats(index).get(TimeValue.timeValueMinutes(5))
val stats = resp.getTotal
val primaryShardCount = resp.getShards.count(_.getShardRouting.primary)
val docCount = stats.getDocs.getCount
val indexSize = stats.getStore.getSizeInBytes
val hps = (5000000D/(indexSize.toDouble/docCount*primaryShardCount)).toInt
System.err.println(s"using $hps hits per shard")
hps
} { hps =>
hps.intValue
}
@Parameter(
names = Array("--retry-max"),
description = "Number of retries for a failed scan request"
)
var retryMax = 3
@Parameter(
names = Array("--routing"),
description = "Param to calculate a shard to execute search"
)
var routing: String = _
@Parameter(
names = Array("--shard"),
description = "Shard number on which to execute search"
)
var shard: String = _
@Parameter(
names = Array("--script-filter"),
description = "Script to filter results"
)
var scriptFilter: String = _
@Parameter(
names = Array("--exclude"),
description = "A wildcard pattern for fields to exclude from source, can be specified multiple times")
val excludeFields: util.List[String] = new util.ArrayList[String]
@Parameter(
names = Array("--include"),
description = "A wildcard pattern for fields to include in source, can be specified multiple times")
val includeFields: util.List[String] = new util.ArrayList[String]
@Parameter(
names = Array("--src-only"),
description = "print only source JSON")
val srcOnly = false
@Parameter(
names = Array("--src-id-tsv"),
description = "print ID and source separated by TAB")
val srcIdTsv = false
}
@Parameters(commandDescription = "Read search results using scroll")
object ScanCommand extends ScanCommandParams with Runnable {
import EsTool._
def run() {
val reqBuilder = client.prepareSearch(index)
.setSearchType(SearchType.SCAN)
.setScroll(new TimeValue(600000))
.setQuery(queryBuilder)
.setSize(hitsPerShard)
.setTimeout(new TimeValue(requestTimeoutMins, TimeUnit.MINUTES))
Option(kind).foreach(reqBuilder.setTypes(_))
Option(routing).foreach(reqBuilder.setRouting)
Option(shard) foreach { s =>
reqBuilder.setPreference(s"_shards:$s")
}
if (!excludeFields.isEmpty || !includeFields.isEmpty) {
reqBuilder.setFetchSource(
includeFields.asScala.toArray,
excludeFields.asScala.toArray
)
}
val (it, total) = EsUtil.scan(client, reqBuilder, retryMax, requestTimeoutMins)
System.err.println(s"total hits: $total")
it.flatMap(_.getHits.getHits).zip(Stream.from(1).iterator).foreach { case (hit, count) =>
if (count % (total/100) == 0) {
System.err.println(s"${count/(total/100)}%")
}
println(hitToString(hit.getId, hit.getSourceAsString, srcOnly, srcIdTsv))
}
}
}
| gmoskovicz/elasticsearch-cli | src/main/scala/io/github/dmitrib/elasticsearch/cli/ScanCommand.scala | Scala | apache-2.0 | 3,984 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import scala.collection._
import java.util.concurrent._
import java.util.concurrent.atomic._
import kafka.network._
import kafka.utils._
import kafka.metrics.KafkaMetricsGroup
import java.util
import com.yammer.metrics.core.Gauge
/**
* A request whose processing needs to be delayed for at most the given delayMs
* The associated keys are used for bookeeping, and represent the "trigger" that causes this request to check if it is satisfied,
* for example a key could be a (topic, partition) pair.
*/
class DelayedRequest(val keys: Seq[Any], val request: RequestChannel.Request, delayMs: Long) extends DelayedItem[RequestChannel.Request](request, delayMs) {
val satisfied = new AtomicBoolean(false)
}
/**
* A helper class for dealing with asynchronous requests with a timeout. A DelayedRequest has a request to delay
* and also a list of keys that can trigger the action. Implementations can add customized logic to control what it means for a given
* request to be satisfied. For example it could be that we are waiting for user-specified number of acks on a given (topic, partition)
* to be able to respond to a request or it could be that we are waiting for a given number of bytes to accumulate on a given request
* to be able to respond to that request (in the simple case we might wait for at least one byte to avoid busy waiting).
*
* For us the key is generally a (topic, partition) pair.
* By calling
* watch(delayedRequest)
* we will add triggers for each of the given keys. It is up to the user to then call
* val satisfied = update(key, request)
* when a request relevant to the given key occurs. This triggers bookeeping logic and returns back any requests satisfied by this
* new request.
*
* An implementation provides extends two helper functions
* def checkSatisfied(request: R, delayed: T): Boolean
* this function returns true if the given request (in combination with whatever previous requests have happened) satisfies the delayed
* request delayed. This method will likely also need to do whatever bookkeeping is necessary.
*
* The second function is
* def expire(delayed: T)
* this function handles delayed requests that have hit their time limit without being satisfied.
*
*/
abstract class RequestPurgatory[T <: DelayedRequest, R](brokerId: Int = 0, purgeInterval: Int = 10000)
extends Logging with KafkaMetricsGroup {
/* a list of requests watching each key */
private val watchersForKey = new Pool[Any, Watchers](Some((key: Any) => new Watchers))
private val requestCounter = new AtomicInteger(0)
newGauge(
"PurgatorySize",
new Gauge[Int] {
def getValue = watchersForKey.values.map(_.numRequests).sum + expiredRequestReaper.numRequests
}
)
newGauge(
"NumDelayedRequests",
new Gauge[Int] {
def getValue = expiredRequestReaper.unsatisfied.get()
}
)
/* background thread expiring requests that have been waiting too long */
private val expiredRequestReaper = new ExpiredRequestReaper
private val expirationThread = Utils.newThread(name="request-expiration-task", runnable=expiredRequestReaper, daemon=false)
expirationThread.start()
/**
* Add a new delayed request watching the contained keys
*/
def watch(delayedRequest: T) {
requestCounter.getAndIncrement()
for(key <- delayedRequest.keys) {
var lst = watchersFor(key)
lst.add(delayedRequest)
}
expiredRequestReaper.enqueue(delayedRequest)
}
/**
* Update any watchers and return a list of newly satisfied requests.
*/
def update(key: Any, request: R): Seq[T] = {
val w = watchersForKey.get(key)
if(w == null)
Seq.empty
else
w.collectSatisfiedRequests(request)
}
private def watchersFor(key: Any) = watchersForKey.getAndMaybePut(key)
/**
* Check if this request satisfied this delayed request
*/
protected def checkSatisfied(request: R, delayed: T): Boolean
/**
* Handle an expired delayed request
*/
protected def expire(delayed: T)
/**
* Shutdown the expirey thread
*/
def shutdown() {
expiredRequestReaper.shutdown()
}
/**
* A linked list of DelayedRequests watching some key with some associated
* bookkeeping logic.
*/
private class Watchers {
private val requests = new util.ArrayList[T]
def numRequests = requests.size
def add(t: T) {
synchronized {
requests.add(t)
}
}
def purgeSatisfied(): Int = {
synchronized {
val iter = requests.iterator()
var purged = 0
while(iter.hasNext) {
val curr = iter.next
if(curr.satisfied.get()) {
iter.remove()
purged += 1
}
}
purged
}
}
def collectSatisfiedRequests(request: R): Seq[T] = {
val response = new mutable.ArrayBuffer[T]
synchronized {
val iter = requests.iterator()
while(iter.hasNext) {
val curr = iter.next
if(curr.satisfied.get) {
// another thread has satisfied this request, remove it
iter.remove()
} else {
// synchronize on curr to avoid any race condition with expire
// on client-side.
val satisfied = curr synchronized checkSatisfied(request, curr)
if(satisfied) {
iter.remove()
val updated = curr.satisfied.compareAndSet(false, true)
if(updated == true) {
response += curr
expiredRequestReaper.satisfyRequest()
}
}
}
}
}
response
}
}
/**
* Runnable to expire requests that have sat unfullfilled past their deadline
*/
private class ExpiredRequestReaper extends Runnable with Logging {
this.logIdent = "ExpiredRequestReaper-%d ".format(brokerId)
private val delayed = new DelayQueue[T]
private val running = new AtomicBoolean(true)
private val shutdownLatch = new CountDownLatch(1)
/* The count of elements in the delay queue that are unsatisfied */
private [kafka] val unsatisfied = new AtomicInteger(0)
def numRequests = delayed.size()
/** Main loop for the expiry thread */
def run() {
while(running.get) {
try {
val curr = pollExpired()
if (curr != null) {
curr synchronized {
expire(curr)
}
}
if (requestCounter.get >= purgeInterval) { // see if we need to force a full purge
requestCounter.set(0)
val purged = purgeSatisfied()
debug("Purged %d requests from delay queue.".format(purged))
val numPurgedFromWatchers = watchersForKey.values.map(_.purgeSatisfied()).sum
debug("Purged %d (watcher) requests.".format(numPurgedFromWatchers))
}
} catch {
case e: Exception =>
error("Error in long poll expiry thread: ", e)
}
}
shutdownLatch.countDown()
}
/** Add a request to be expired */
def enqueue(t: T) {
delayed.add(t)
unsatisfied.incrementAndGet()
}
/** Shutdown the expiry thread*/
def shutdown() {
debug("Shutting down.")
running.set(false)
shutdownLatch.await()
debug("Shut down complete.")
}
/** Record the fact that we satisfied a request in the stats for the expiry queue */
def satisfyRequest(): Unit = unsatisfied.getAndDecrement()
/**
* Get the next expired event
*/
private def pollExpired(): T = {
while(true) {
val curr = delayed.poll(200L, TimeUnit.MILLISECONDS)
if (curr == null)
return null.asInstanceOf[T]
val updated = curr.satisfied.compareAndSet(false, true)
if(updated) {
unsatisfied.getAndDecrement()
return curr
}
}
throw new RuntimeException("This should not happen")
}
/**
* Delete all expired events from the delay queue
*/
private def purgeSatisfied(): Int = {
var purged = 0
val iter = delayed.iterator()
while(iter.hasNext) {
val curr = iter.next()
if(curr.satisfied.get) {
iter.remove()
purged += 1
}
}
purged
}
}
}
| akosiaris/kafka | core/src/main/scala/kafka/server/RequestPurgatory.scala | Scala | apache-2.0 | 9,141 |
/*
* Copyright 2016 Lightcopy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.util.Try
import org.apache.parquet.schema.MessageType
import org.apache.spark.sql.types._
/**
* Utility functions to provide list of supported fields, validate and prune columns.
*/
object ParquetSchemaUtils {
// Supported top-level Spark SQL data types
val SUPPORTED_TYPES: Set[DataType] =
Set(IntegerType, LongType, StringType, DateType, TimestampType)
/**
* Validate input schema as `StructType` and throw exception if schema does not match expected
* column types or is empty (see list of supported fields above). Note that this is used in
* statistics conversion, so when adding new type, one should update statistics.
*/
def validateStructType(schema: StructType): Unit = {
if (schema.isEmpty) {
throw new UnsupportedOperationException(s"Empty schema $schema is not supported, please " +
s"provide at least one column of a type ${SUPPORTED_TYPES.mkString("[", ", ", "]")}")
}
schema.fields.foreach { field =>
if (!SUPPORTED_TYPES.contains(field.dataType)) {
throw new UnsupportedOperationException(
"Schema contains unsupported type, " +
s"field=$field, " +
s"schema=${schema.simpleString}, " +
s"supported types=${SUPPORTED_TYPES.mkString("[", ", ", "]")}")
}
}
}
/**
* Prune invalid columns from StructType leaving only supported data types. Only works for
* top-level columns at this point.
*/
def pruneStructType(schema: StructType): StructType = {
val updatedFields = schema.fields.filter { field =>
SUPPORTED_TYPES.contains(field.dataType)
}
StructType(updatedFields)
}
/**
* Extract top level columns from schema and return them as (field name - field index) pairs.
* This does not contain duplicate pairs, neither pairs with different index but the same column
* name.
*/
def topLevelUniqueColumns(schema: MessageType): Seq[(String, Int)] = {
// make sure that names are unique for top level columns
val uniqueColumns = mutable.HashSet[String]()
schema.getFields.asScala.map { field =>
if (uniqueColumns.contains(field.getName)) {
throw new IllegalArgumentException(s"""
| Found field [$field] with duplicate column name '${field.getName}'.
| Schema $schema
| This situation is currently not supported, ensure that names of all top level columns
| in schema are unique""".stripMargin)
}
uniqueColumns.add(field.getName)
val index = schema.getFieldIndex(field.getName)
(field.getName, index)
}
}
/** Update field with provided metadata, performs replacement, not merge */
private def withMetadata(field: StructField, metadata: Metadata): StructField = {
StructField(field.name, field.dataType, field.nullable, metadata)
}
/**
* Merge schemas with preserved metadata for top-level fields.
* TODO: implement merge for nested types.
*/
def merge(schema1: StructType, schema2: StructType): StructType = {
// perform field merge, this does not merge metadata
val mergedSchema = schema1.merge(schema2)
// update field with extracted and merged metadata
val updatedFields = mergedSchema.map { field =>
val field1 = Try(schema1(field.name)).toOption
val field2 = Try(schema2(field.name)).toOption
(field1, field2) match {
case (Some(value1), Some(value2)) =>
val metadata = new MetadataBuilder().
withMetadata(value1.metadata).
withMetadata(value2.metadata).build
if (metadata == field.metadata) field else withMetadata(field, metadata)
case (Some(value1), None) =>
if (value1 == field) field else withMetadata(field, value1.metadata)
case (None, Some(value2)) =>
if (value2 == field) field else withMetadata(field, value2.metadata)
case other =>
field
}
}
// return final merged schema
StructType(updatedFields)
}
}
| lightcopy/parquet-index | src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetSchemaUtils.scala | Scala | apache-2.0 | 4,719 |
package mesosphere.marathon
import org.rogach.scallop.ScallopConf
import java.net.InetSocketAddress
import mesosphere.util.BackToTheFuture
import scala.concurrent.duration._
trait ZookeeperConf extends ScallopConf {
private val userAndPass = """[^/@]+"""
private val hostAndPort = """[A-z0-9-.]+(?::\\d+)?"""
private val zkNode = """[^/]+"""
private val zkURLPattern = s"""^zk://(?:$userAndPass@)?($hostAndPort(?:,$hostAndPort)*)(/$zkNode(?:/$zkNode)*)$$""".r
@Deprecated
lazy val zooKeeperHostString = opt[String]("zk_hosts",
descr = "[DEPRECATED use zk] The list of ZooKeeper servers for storing state",
default = Some("localhost:2181"))
@Deprecated
lazy val zooKeeperPath = opt[String]("zk_state",
descr = "[DEPRECATED use zk] Path in ZooKeeper for storing state",
default = Some("/marathon"))
lazy val zooKeeperTimeout = opt[Long]("zk_timeout",
descr = "The timeout for ZooKeeper in milliseconds",
default = Some(10000L))
lazy val zooKeeperUrl = opt[String]("zk",
descr = "ZooKeeper URL for storing state. Format: zk://host1:port1,host2:port2,.../path",
validate = (in) => zkURLPattern.pattern.matcher(in).matches()
)
lazy val zooKeeperMaxVersions = opt[Int]("zk_max_versions",
descr = "Limit the number of versions, stored for one entity."
)
//do not allow mixing of hostState and url
conflicts(zooKeeperHostString, List(zooKeeperUrl))
conflicts(zooKeeperPath, List(zooKeeperUrl))
conflicts(zooKeeperUrl, List(zooKeeperHostString, zooKeeperPath))
def zooKeeperStatePath(): String = "%s/state".format(zkPath)
def zooKeeperLeaderPath(): String = "%s/leader".format(zkPath)
def zooKeeperServerSetPath(): String = "%s/apps".format(zkPath)
def zooKeeperHostAddresses: Seq[InetSocketAddress] =
for (s <- zkHosts.split(",")) yield {
val splits = s.split(":")
require(splits.length == 2, "expected host:port for zk servers")
new InetSocketAddress(splits(0), splits(1).toInt)
}
def zkURL(): String = zooKeeperUrl.get.getOrElse(s"zk://${zooKeeperHostString()}${zooKeeperPath()}")
lazy val zkHosts = zkURL match { case zkURLPattern(server, _) => server }
lazy val zkPath = zkURL match { case zkURLPattern(_, path) => path }
lazy val zkTimeoutDuration = Duration(zooKeeperTimeout(), MILLISECONDS)
lazy val zkFutureTimeout = BackToTheFuture.Timeout(zkTimeoutDuration)
}
| 14Zen/marathon | src/main/scala/mesosphere/marathon/ZookeeperConf.scala | Scala | apache-2.0 | 2,388 |
package pl.touk.nussknacker.engine.requestresponse.deployment
import java.io.{File, PrintWriter}
import java.nio.charset.StandardCharsets
import pl.touk.nussknacker.engine.api.process.ProcessName
import io.circe.syntax._
import pl.touk.nussknacker.engine.api.CirceUtil
import pl.touk.nussknacker.engine.util.Implicits.SourceIsReleasable
import scala.io.Source
import scala.util.Using
trait ProcessRepository {
def add(id: ProcessName, deploymentData: RequestResponseDeploymentData) : Unit
def remove(id: ProcessName) : Unit
def loadAll: Map[ProcessName, RequestResponseDeploymentData]
}
class EmptyProcessRepository extends ProcessRepository {
override def add(id: ProcessName, deploymentData: RequestResponseDeploymentData): Unit = {}
override def remove(id: ProcessName): Unit = {}
override def loadAll: Map[ProcessName, RequestResponseDeploymentData] = Map()
}
object FileProcessRepository {
def apply(path: String) : FileProcessRepository = {
val dir = new File(path)
dir.mkdirs()
if (!dir.isDirectory || !dir.canRead) {
throw new IllegalArgumentException(s"Cannot use $dir for storing scenarios")
}
new FileProcessRepository(dir)
}
}
class FileProcessRepository(path: File) extends ProcessRepository {
override def add(id: ProcessName, deploymentData: RequestResponseDeploymentData): Unit = {
val outFile = new File(path, id.value)
Using.resource(new PrintWriter(outFile, StandardCharsets.UTF_8.name())) { writer =>
writer.write(deploymentData.asJson.spaces2)
}
}
override def remove(id: ProcessName): Unit = {
new File(path, id.value).delete()
}
private def fileToString(file: File) =
Using.resource(Source.fromFile(file, StandardCharsets.UTF_8.name())) { s =>
s.getLines().mkString("\\n")
}
override def loadAll: Map[ProcessName, RequestResponseDeploymentData] = path.listFiles().filter(_.isFile).map { file =>
ProcessName(file.getName) -> CirceUtil.decodeJson[RequestResponseDeploymentData](fileToString(file))
.fold(error => throw new IllegalStateException(s"Could not decode deployment data for file: $file", error), identity)
}.toMap
}
| TouK/nussknacker | engine/lite/request-response/runtime/src/main/scala/pl/touk/nussknacker/engine/requestresponse/deployment/ProcessRepository.scala | Scala | apache-2.0 | 2,170 |
/*
* (C) Copyright 2015 Atomic BITS (http://atomicbits.io).
*
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Affero General Public License
* (AGPL) version 3.0 which accompanies this distribution, and is available in
* the LICENSE file or at http://www.gnu.org/licenses/agpl-3.0.en.html
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Affero General Public License for more details.
*
* Contributors:
* Peter Rigole
*
*/
package io.atomicbits.scraml.parser.model
/**
* Created by peter on 17/05/15, Atomic BITS bvba (http://atomicbits.io).
*/
case class Parameter(parameterType: ParameterType, required: Boolean, repeated: Boolean = false)
object Parameter {
def apply(uriParameter: org.raml.model.parameter.AbstractParam): Parameter = {
uriParameter.getType match {
case org.raml.model.ParamType.STRING => Parameter(StringType, uriParameter.isRequired, uriParameter.isRepeat)
case org.raml.model.ParamType.NUMBER => Parameter(NumberType, uriParameter.isRequired, uriParameter.isRepeat)
case org.raml.model.ParamType.INTEGER => Parameter(IntegerType, uriParameter.isRequired, uriParameter.isRepeat)
case org.raml.model.ParamType.BOOLEAN => Parameter(BooleanType, uriParameter.isRequired, uriParameter.isRepeat)
case org.raml.model.ParamType.DATE => Parameter(DateType, uriParameter.isRequired, uriParameter.isRepeat)
case org.raml.model.ParamType.FILE => Parameter(FileType, uriParameter.isRequired, uriParameter.isRepeat)
}
}
}
| rcavalcanti/scraml | modules/scraml-parser/src/main/scala/io/atomicbits/scraml/parser/model/Parameter.scala | Scala | agpl-3.0 | 1,727 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.typed
import java.io.Serializable
import java.util.Random
import cascading.flow.FlowDef
import cascading.pipe.{ Each, Pipe }
import cascading.tap.Tap
import cascading.tuple.{ Fields, Tuple => CTuple, TupleEntry }
import com.twitter.algebird.{ Aggregator, Monoid, Semigroup }
import com.twitter.scalding.TupleConverter.{ TupleEntryConverter, singleConverter, tuple2Converter }
import com.twitter.scalding.TupleSetter.{ singleSetter, tup2Setter }
import com.twitter.scalding._
import com.twitter.scalding.serialization.macros.impl.BinaryOrdering._
/**
* factory methods for TypedPipe, which is the typed representation of distributed lists in scalding.
* This object is here rather than in the typed package because a lot of code was written using
* the functions in the object, which we do not see how to hide with package object tricks.
*/
object TypedPipe extends Serializable {
import Dsl.flowDefToRichFlowDef
/**
* Create a TypedPipe from a cascading Pipe, some Fields and the type T
* Avoid this if you can. Prefer from(TypedSource).
*/
def from[T](pipe: Pipe, fields: Fields)(implicit flowDef: FlowDef, mode: Mode, conv: TupleConverter[T]): TypedPipe[T] = {
val localFlow = flowDef.onlyUpstreamFrom(pipe)
new TypedPipeInst[T](pipe, fields, localFlow, mode, Converter(conv))
}
/**
* Create a TypedPipe from a TypedSource. This is the preferred way to make a TypedPipe
*/
def from[T](source: TypedSource[T]): TypedPipe[T] =
TypedPipeFactory({ (fd, mode) =>
val pipe = source.read(fd, mode)
from(pipe, source.sourceFields)(fd, mode, source.converter)
})
/**
* Create a TypedPipe from an Iterable in memory.
*/
def from[T](iter: Iterable[T]): TypedPipe[T] =
IterablePipe[T](iter)
/**
* Input must be a Pipe with exactly one Field
* Avoid this method and prefer from(TypedSource) if possible
*/
def fromSingleField[T](pipe: Pipe)(implicit fd: FlowDef, mode: Mode): TypedPipe[T] =
from(pipe, new Fields(0))(fd, mode, singleConverter[T])
/**
* Create an empty TypedPipe. This is sometimes useful when a method must return
* a TypedPipe, but sometimes at runtime we can check a condition and see that
* it should be empty.
* This is the zero of the Monoid[TypedPipe]
*/
def empty: TypedPipe[Nothing] = EmptyTypedPipe
/**
* This enables pipe.hashJoin(that) or pipe.join(that) syntax
* This is a safe enrichment because hashJoinable and CoGroupable are
* only used in the argument position or to give cogroup, join, leftJoin, rightJoin, outerJoin
* methods. Since those methods are unlikely to be used on TypedPipe in the future, this
* enrichment seems safe.
*
* This method is the Vitaly-was-right method.
*/
implicit def toHashJoinable[K, V](pipe: TypedPipe[(K, V)])(implicit ord: Ordering[K]): HashJoinable[K, V] =
new HashJoinable[K, V] {
def mapped = pipe
def keyOrdering = ord
def reducers = None
val descriptions: Seq[String] = List(LineNumber.tryNonScaldingCaller.toString)
def joinFunction = CoGroupable.castingJoinFunction[V]
}
/**
* TypedPipe instances are monoids. They are isomorphic to multisets.
*/
implicit def typedPipeMonoid[T]: Monoid[TypedPipe[T]] = new Monoid[TypedPipe[T]] {
def zero = empty
def plus(left: TypedPipe[T], right: TypedPipe[T]): TypedPipe[T] =
left ++ right
}
}
/**
* Think of a TypedPipe as a distributed unordered list that may or may not yet
* have been materialized in memory or disk.
*
* Represents a phase in a distributed computation on an input data source
* Wraps a cascading Pipe object, and holds the transformation done up until that point
*/
trait TypedPipe[+T] extends Serializable {
/**
* Implements a cross product. The right side should be tiny
* This gives the same results as
* {code for { l <- list1; l2 <- list2 } yield (l, l2) }
*/
def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)]
/**
* This is the fundamental mapper operation.
* It behaves in a way similar to List.flatMap, which means that each
* item is fed to the input function, which can return 0, 1, or many outputs
* (as a TraversableOnce) per input. The returned results will be iterated through once
* and then flattened into a single TypedPipe which is passed to the next step in the
* pipeline.
*
* This behavior makes it a powerful operator -- it can be used to filter records
* (by returning 0 items for a given input), it can be used the way map is used
* (by returning 1 item per input), it can be used to explode 1 input into many outputs,
* or even a combination of all of the above at once.
*/
def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U]
/**
* Export back to a raw cascading Pipe. useful for interop with the scalding
* Fields API or with Cascading code.
* Avoid this if possible. Prefer to write to TypedSink.
*/
final def toPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl._
// Ensure we hook into all pipes coming out of the typed API to apply the FlowState's properties on their pipes
val pipe = asPipe[U](fieldNames).applyFlowConfigProperties(flowDef)
RichPipe.setPipeDescriptions(pipe, List(LineNumber.tryNonScaldingCaller.toString))
}
/**
* Provide the internal implementation to get from a typed pipe to a cascading Pipe
*/
protected def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe
/////////////////////////////////////////////
//
// The following have default implementations in terms of the above
//
/////////////////////////////////////////////
import Dsl._
/**
* Merge two TypedPipes (no order is guaranteed)
* This is only realized when a group (or join) is
* performed.
*/
def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case EmptyTypedPipe => this
case IterablePipe(thatIter) if thatIter.isEmpty => this
case _ => MergedTypedPipe(this, other)
}
/**
* Aggregate all items in this pipe into a single ValuePipe
*
* Aggregators are composable reductions that allow you to glue together
* several reductions and process them in one pass.
*
* Same as groupAll.aggregate.values
*/
def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
ComputedValue(groupAll.aggregate(agg).values)
/**
* Put the items in this into the keys, and unit as the value in a Group
* in some sense, this is the dual of groupAll
*/
@annotation.implicitNotFound(msg = "For asKeys method to work, the type in TypedPipe must have an Ordering.")
def asKeys[U >: T](implicit ord: Ordering[U]): Grouped[U, Unit] =
map((_, ())).group
/**
* If T <:< U, then this is safe to treat as TypedPipe[U] due to covariance
*/
protected def raiseTo[U](implicit ev: T <:< U): TypedPipe[U] =
this.asInstanceOf[TypedPipe[U]]
/**
* Filter and map. See scala.collection.List.collect.
* {@code
* collect { case Some(x) => fn(x) }
* }
*/
def collect[U](fn: PartialFunction[T, U]): TypedPipe[U] =
filter(fn.isDefinedAt(_)).map(fn)
/**
* Attach a ValuePipe to each element this TypedPipe
*/
def cross[V](p: ValuePipe[V]): TypedPipe[(T, V)] =
p match {
case EmptyValue => EmptyTypedPipe
case LiteralValue(v) => map { (_, v) }
case ComputedValue(pipe) => cross(pipe)
}
/** prints the current pipe to stdout */
def debug: TypedPipe[T] = onRawSingle(_.debug)
/** adds a description to the pipe */
def withDescription(description: String): TypedPipe[T] = new WithDescriptionTypedPipe[T](this, description)
/**
* Returns the set of distinct elements in the TypedPipe
* This is the same as: .map((_, ())).group.sum.keys
* If you want a distinct while joining, consider:
* instead of:
* {@code
* a.join(b.distinct.asKeys)
* }
* manually do the distinct:
* {@code
* a.join(b.asKeys.sum)
* }
* The latter creates 1 map/reduce phase rather than 2
*/
@annotation.implicitNotFound(msg = "For distinct method to work, the type in TypedPipe must have an Ordering.")
def distinct(implicit ord: Ordering[_ >: T]): TypedPipe[T] =
asKeys(ord.asInstanceOf[Ordering[T]]).sum.keys
/**
* Returns the set of distinct elements identified by a given lambda extractor in the TypedPipe
*/
@annotation.implicitNotFound(msg = "For distinctBy method to work, the type to distinct on in the TypedPipe must have an Ordering.")
def distinctBy[U](fn: T => U, numReducers: Option[Int] = None)(implicit ord: Ordering[_ >: U]): TypedPipe[T] = {
// cast because Ordering is not contravariant, but should be (and this cast is safe)
implicit val ordT: Ordering[U] = ord.asInstanceOf[Ordering[U]]
// Semigroup to handle duplicates for a given key might have different values.
implicit val sg = new Semigroup[T] {
def plus(a: T, b: T) = b
}
val op = map{ tup => (fn(tup), tup) }.sumByKey
val reduced = numReducers match {
case Some(red) => op.withReducers(red)
case None => op
}
reduced.map(_._2)
}
/** Merge two TypedPipes of different types by using Either */
def either[R](that: TypedPipe[R]): TypedPipe[Either[T, R]] =
map(Left(_)) ++ (that.map(Right(_)))
/**
* Sometimes useful for implementing custom joins with groupBy + mapValueStream when you know
* that the value/key can fit in memory. Beware.
*/
def eitherValues[K, V, R](that: TypedPipe[(K, R)])(implicit ev: T <:< (K, V)): TypedPipe[(K, Either[V, R])] =
mapValues { (v: V) => Left(v) } ++ (that.mapValues { (r: R) => Right(r) })
/**
* If you are going to create two branches or forks,
* it may be more efficient to call this method first
* which will create a node in the cascading graph.
* Without this, both full branches of the fork will be
* put into separate cascading pipes, which can, in some cases,
* be slower.
*
* Ideally the planner would see this
*/
def fork: TypedPipe[T] = onRawSingle(identity)
/**
* WARNING This is dangerous, and may not be what you think.
*
* limit the output to AT MOST count items.
* useful for debugging, but probably that's about it.
* The number may be less than count, and not sampled by any particular method
*
* This may change in the future to be exact, but that will add 1 MR step
*/
def limit(count: Int): TypedPipe[T] = onRawSingle(_.limit(count))
/** Transform each element via the function f */
def map[U](f: T => U): TypedPipe[U] = flatMap { t => Iterator(f(t)) }
/** Transform only the values (sometimes requires giving the types due to scala type inference) */
def mapValues[K, V, U](f: V => U)(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].map { case (k, v) => (k, f(v)) }
/** Similar to mapValues, but allows to return a collection of outputs for each input value */
def flatMapValues[K, V, U](f: V => TraversableOnce[U])(implicit ev: T <:< (K, V)): TypedPipe[(K, U)] =
raiseTo[(K, V)].flatMap { case (k, v) => f(v).map { v2 => k -> v2 } }
/**
* Keep only items that satisfy this predicate
*/
def filter(f: T => Boolean): TypedPipe[T] =
flatMap { t => if (f(t)) Iterator(t) else Iterator.empty }
// This is just to appease for comprehension
def withFilter(f: T => Boolean): TypedPipe[T] = filter(f)
/**
* If T is a (K, V) for some V, then we can use this function to filter.
* Prefer to use this if your filter only touches the key.
*
* This is here to match the function in KeyedListLike, where it is optimized
*/
def filterKeys[K](fn: K => Boolean)(implicit ev: T <:< (K, Any)): TypedPipe[T] =
filter { ka => fn(ka.asInstanceOf[(K, Any)]._1) }
/**
* Keep only items that don't satisfy the predicate.
* `filterNot` is the same as `filter` with a negated predicate.
*/
def filterNot(f: T => Boolean): TypedPipe[T] =
filter(!f(_))
/** flatten an Iterable */
def flatten[U](implicit ev: T <:< TraversableOnce[U]): TypedPipe[U] =
flatMap { _.asInstanceOf[TraversableOnce[U]] } // don't use ev which may not be serializable
/**
* flatten just the values
* This is more useful on KeyedListLike, but added here to reduce assymmetry in the APIs
*/
def flattenValues[K, U](implicit ev: T <:< (K, TraversableOnce[U])): TypedPipe[(K, U)] =
raiseTo[(K, TraversableOnce[U])].flatMap { case (k, us) => us.map((k, _)) }
protected def onRawSingle(onPipe: Pipe => Pipe): TypedPipe[T] = {
val self = this
TypedPipeFactory({ (fd, m) =>
val pipe = self.toPipe[T](new Fields(java.lang.Integer.valueOf(0)))(fd, m, singleSetter)
TypedPipe.fromSingleField[T](onPipe(pipe))(fd, m)
})
}
/**
* Force a materialization of this pipe prior to the next operation.
* This is useful if you filter almost everything before a hashJoin, for instance.
* This is useful for experts who see some heuristic of the planner causing
* slower performance.
*/
def forceToDisk: TypedPipe[T] = onRawSingle(_.forceToDisk)
/**
* This is the default means of grouping all pairs with the same key. Generally this triggers 1 Map/Reduce transition
*/
def group[K, V](implicit ev: <:<[T, (K, V)], ord: Ordering[K]): Grouped[K, V] =
//If the type of T is not (K,V), then at compile time, this will fail. It uses implicits to do
//a compile time check that one type is equivalent to another. If T is not (K,V), we can't
//automatically group. We cast because it is safe to do so, and we need to convert to K,V, but
//the ev is not needed for the cast. In fact, you can do the cast with ev(t) and it will return
//it as (K,V), but the problem is, ev is not serializable. So we do the cast, which due to ev
//being present, will always pass.
Grouped(raiseTo[(K, V)]).withDescription(LineNumber.tryNonScaldingCaller.toString)
/** Send all items to a single reducer */
def groupAll: Grouped[Unit, T] = groupBy(x => ())(ordSer[Unit]).withReducers(1)
/** Given a key function, add the key, then call .group */
def groupBy[K](g: T => K)(implicit ord: Ordering[K]): Grouped[K, T] =
map { t => (g(t), t) }.group
/**
* Forces a shuffle by randomly assigning each item into one
* of the partitions.
*
* This is for the case where you mappers take a long time, and
* it is faster to shuffle them to more reducers and then operate.
*
* You probably want shard if you are just forcing a shuffle.
*/
def groupRandomly(partitions: Int): Grouped[Int, T] = {
// Make it lazy so all mappers get their own:
lazy val rng = new java.util.Random(123) // seed this so it is repeatable
groupBy { _ => rng.nextInt(partitions) }(ordSer[Int])
.withReducers(partitions)
}
/**
* Partitions this into two pipes according to a predicate.
*
* Sometimes what you really want is a groupBy in these cases.
*/
def partition(p: T => Boolean): (TypedPipe[T], TypedPipe[T]) = {
val forked = fork
(forked.filter(p), forked.filterNot(p))
}
private[this] def defaultSeed: Long = System.identityHashCode(this) * 2654435761L ^ System.currentTimeMillis
/**
* Sample uniformly independently at random each element of the pipe
* does not require a reduce step.
*/
def sample(percent: Double): TypedPipe[T] = sample(percent, defaultSeed)
/**
* Sample uniformly independently at random each element of the pipe with
* a given seed.
* Does not require a reduce step.
*/
def sample(percent: Double, seed: Long): TypedPipe[T] = {
// Make sure to fix the seed, otherwise restarts cause subtle errors
lazy val rand = new Random(seed)
filter(_ => rand.nextDouble < percent)
}
/**
* This does a sum of values WITHOUT triggering a shuffle.
* the contract is, if followed by a group.sum the result is the same
* with or without this present, and it never increases the number of
* items. BUT due to the cost of caching, it might not be faster if
* there is poor key locality.
*
* It is only useful for expert tuning,
* and best avoided unless you are struggling with performance problems.
* If you are not sure you need this, you probably don't.
*
* The main use case is to reduce the values down before a key expansion
* such as is often done in a data cube.
*/
def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] = {
val fields: Fields = ('key, 'value)
val selfKV = raiseTo[(K, V)]
TypedPipeFactory({ (fd, mode) =>
val pipe = selfKV.toPipe(fields)(fd, mode, tup2Setter)
val msr = new MapsideReduce(sg, 'key, 'value, None)(singleConverter[V], singleSetter[V])
TypedPipe.from[(K, V)](pipe.eachTo(fields -> fields) { _ => msr }, fields)(fd, mode, tuple2Converter)
})
}
/**
* Used to force a shuffle into a given size of nodes.
* Only use this if your mappers are taking far longer than
* the time to shuffle.
*/
def shard(partitions: Int): TypedPipe[T] = {
// Make it lazy so all mappers get their own:
lazy val rng = new java.util.Random(123) // seed this so it is repeatable
groupBy { _ => rng.nextInt }(ordSer[Int])
.withReducers(partitions).forceToReducers.values
}
/**
* Reasonably common shortcut for cases of total associative/commutative reduction
* returns a ValuePipe with only one element if there is any input, otherwise EmptyValue.
*/
def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] = ComputedValue(groupAll.sum[U].values)
/**
* Reasonably common shortcut for cases of associative/commutative reduction by Key
*/
def sumByKey[K, V](implicit ev: T <:< (K, V), ord: Ordering[K], plus: Semigroup[V]): UnsortedGrouped[K, V] =
group[K, V].sum[V]
/**
* This is used when you are working with Execution[T] to create loops.
* You might do this to checkpoint and then flatMap Execution to continue
* from there. Probably only useful if you need to flatMap it twice to fan
* out the data into two children jobs.
*
* This writes the current TypedPipe into a temporary file
* and then opens it after complete so that you can continue from that point
*/
def forceToDiskExecution: Execution[TypedPipe[T]] = Execution
.getConfigMode
.flatMap {
case (conf, mode) =>
mode match {
case _: CascadingLocal => // Local or Test mode
val dest = new MemorySink[T]
writeExecution(dest).map { _ => TypedPipe.from(dest.readResults) }
case _: HadoopMode =>
// come up with unique temporary filename, use the config here
// TODO: refactor into TemporarySequenceFile class
val tmpDir = conf.get("hadoop.tmp.dir")
.orElse(conf.get("cascading.tmp.dir"))
.getOrElse("/tmp")
val tmpSeq = tmpDir + "/scalding/snapshot-" + java.util.UUID.randomUUID + ".seq"
val dest = source.TypedSequenceFile[T](tmpSeq)
writeThrough(dest)
}
}
/**
* This gives an Execution that when run evaluates the TypedPipe,
* writes it to disk, and then gives you an Iterable that reads from
* disk on the submit node each time .iterator is called.
* Because of how scala Iterables work, mapping/flatMapping/filtering
* the Iterable forces a read of the entire thing. If you need it to
* be lazy, call .iterator and use the Iterator inside instead.
*/
def toIterableExecution: Execution[Iterable[T]] =
forceToDiskExecution.flatMap(_.toIterableExecution)
/** use a TupleUnpacker to flatten U out into a cascading Tuple */
def unpackToPipe[U >: T](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, up: TupleUnpacker[U]): Pipe = {
val setter = up.newSetter(fieldNames)
toPipe[U](fieldNames)(fd, mode, setter)
}
/**
* This attaches a function that is called at the end of the map phase on
* EACH of the tasks that are executing.
* This is for expert use only. You probably won't ever need it. Try hard
* to avoid it. Execution also has onComplete that can run when an Execution
* has completed.
*/
def onComplete(fn: () => Unit): TypedPipe[T] = new WithOnComplete[T](this, fn)
/**
* Safely write to a TypedSink[T]. If you want to write to a Source (not a Sink)
* you need to do something like: toPipe(fieldNames).write(dest)
* @return a pipe equivalent to the current pipe.
*/
def write(dest: TypedSink[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = {
// Make sure that we don't render the whole pipeline twice:
val res = fork
dest.writeFrom(res.toPipe[T](dest.sinkFields)(flowDef, mode, dest.setter))
res
}
/**
* This is the functionally pure approach to building jobs. Note,
* that you have to call run on the result or flatMap/zip it
* into an Execution that is run for anything to happen here.
*/
def writeExecution(dest: TypedSink[T]): Execution[Unit] =
Execution.write(this, dest)
/**
* If you want to write to a specific location, and then read from
* that location going forward, use this.
*/
def writeThrough[U >: T](dest: TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
writeExecution(dest)
.map(_ => TypedPipe.from(dest))
/**
* If you want to writeThrough to a specific file if it doesn't already exist,
* and otherwise just read from it going forward, use this.
*/
def make[U >: T](dest: FileSource with TypedSink[T] with TypedSource[U]): Execution[TypedPipe[U]] =
Execution.getMode.flatMap { mode =>
try {
dest.validateTaps(mode)
Execution.from(TypedPipe.from(dest))
} catch {
case ivs: InvalidSourceException => writeThrough(dest)
}
}
/** Just keep the keys, or ._1 (if this type is a Tuple2) */
def keys[K](implicit ev: <:<[T, (K, Any)]): TypedPipe[K] =
// avoid capturing ev in the closure:
raiseTo[(K, Any)].map(_._1)
/** swap the keys with the values */
def swap[K, V](implicit ev: <:<[T, (K, V)]): TypedPipe[(V, K)] =
raiseTo[(K, V)].map(_.swap)
/** Just keep the values, or ._2 (if this type is a Tuple2) */
def values[V](implicit ev: <:<[T, (Any, V)]): TypedPipe[V] =
raiseTo[(Any, V)].map(_._2)
/**
* ValuePipe may be empty, so, this attaches it as an Option
* cross is the same as leftCross(p).collect { case (t, Some(v)) => (t, v) }
*/
def leftCross[V](p: ValuePipe[V]): TypedPipe[(T, Option[V])] =
p match {
case EmptyValue => map { (_, None) }
case LiteralValue(v) => map { (_, Some(v)) }
case ComputedValue(pipe) => leftCross(pipe)
}
/** uses hashJoin but attaches None if thatPipe is empty */
def leftCross[V](thatPipe: TypedPipe[V]): TypedPipe[(T, Option[V])] =
map(((), _)).hashLeftJoin(thatPipe.groupAll).values
/**
* common pattern of attaching a value and then map
* recommended style:
* {@code
* mapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def mapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => V): TypedPipe[V] =
leftCross(value).map(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then flatMap
* recommended style:
* {@code
* flatMapWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def flatMapWithValue[U, V](value: ValuePipe[U])(f: (T, Option[U]) => TraversableOnce[V]): TypedPipe[V] =
leftCross(value).flatMap(t => f(t._1, t._2))
/**
* common pattern of attaching a value and then filter
* recommended style:
* {@code
* filterWithValue(vpu) {
* case (t, Some(u)) => op(t, u)
* case (t, None) => // if you never expect this:
* sys.error("unexpected empty value pipe")
* }
* }
*/
def filterWithValue[U](value: ValuePipe[U])(f: (T, Option[U]) => Boolean): TypedPipe[T] =
leftCross(value).filter(t => f(t._1, t._2)).map(_._1)
/**
* These operations look like joins, but they do not force any communication
* of the current TypedPipe. They are mapping operations where this pipe is streamed
* through one item at a time.
*
* WARNING These behave semantically very differently than cogroup.
* This is because we handle (K,V) tuples on the left as we see them.
* The iterable on the right is over all elements with a matching key K, and it may be empty
* if there are no values for this key K.
*/
def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
smaller.hashCogroupOn(ev(this))(joiner)
/** Do an inner-join without shuffling this TypedPipe, but replicating argument to all tasks */
def hashJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, W))] =
hashCogroup[K, V, W, (V, W)](smaller)(Joiner.hashInner2)
/** Do an leftjoin without shuffling this TypedPipe, but replicating argument to all tasks */
def hashLeftJoin[K, V, W](smaller: HashJoinable[K, W])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, (V, Option[W]))] =
hashCogroup[K, V, W, (V, Option[W])](smaller)(Joiner.hashLeft2)
/**
* For each element, do a map-side (hash) left join to look up a value
*/
def hashLookup[K >: T, V](grouped: HashJoinable[K, V]): TypedPipe[(K, Option[V])] =
map((_, ()))
.hashLeftJoin(grouped)
.map { case (t, (_, optV)) => (t, optV) }
/**
* Enables joining when this TypedPipe has some keys with many many values and
* but many with very few values. For instance, a graph where some nodes have
* millions of neighbors, but most have only a few.
*
* We build a (count-min) sketch of each key's frequency, and we use that
* to shard the heavy keys across many reducers.
* This increases communication cost in order to reduce the maximum time needed
* to complete the join.
*
* {@code pipe.sketch(100).join(thatPipe) }
* will add an extra map/reduce job over a standard join to create the count-min-sketch.
* This will generally only be beneficial if you have really heavy skew, where without
* this you have 1 or 2 reducers taking hours longer than the rest.
*/
def sketch[K, V](reducers: Int,
eps: Double = 1.0E-5, //272k width = 1MB per row
delta: Double = 0.01, //5 rows (= 5 hashes)
seed: Int = 12345)(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)],
serialization: K => Array[Byte],
ordering: Ordering[K]): Sketched[K, V] =
Sketched(ev(this), reducers, delta, eps, seed)
/**
* If any errors happen below this line, but before a groupBy, write to a TypedSink
*/
def addTrap[U >: T](trapSink: Source with TypedSink[T])(implicit conv: TupleConverter[U]): TypedPipe[U] =
TypedPipeFactory({ (flowDef, mode) =>
val fields = trapSink.sinkFields
// TODO: with diamonds in the graph, this might not be correct
val pipe = RichPipe.assignName(fork.toPipe[T](fields)(flowDef, mode, trapSink.setter))
flowDef.addTrap(pipe, trapSink.createTap(Write)(mode))
TypedPipe.from[U](pipe, fields)(flowDef, mode, conv)
})
}
/**
* This object is the EmptyTypedPipe. Prefer to create it with TypedPipe.empty
*/
final case object EmptyTypedPipe extends TypedPipe[Nothing] {
override def aggregate[B, C](agg: Aggregator[Nothing, B, C]): ValuePipe[C] = EmptyValue
// Cross product with empty is always empty.
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(Nothing, U)] = this
override def distinct(implicit ord: Ordering[_ >: Nothing]) = this
override def flatMap[U](f: Nothing => TraversableOnce[U]) = this
override def fork: TypedPipe[Nothing] = this
override def forceToDisk = this
override def leftCross[V](p: ValuePipe[V]) = this
override def limit(count: Int) = this
override def debug: TypedPipe[Nothing] = this
override def ++[U >: Nothing](other: TypedPipe[U]): TypedPipe[U] = other
override def asPipe[U >: Nothing](fieldNames: Fields)(implicit fd: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
IterableSource(Iterable.empty, fieldNames)(setter, singleConverter[U]).read(fd, mode)
override def toIterableExecution: Execution[Iterable[Nothing]] = Execution.from(Iterable.empty)
override def forceToDiskExecution: Execution[TypedPipe[Nothing]] = Execution.from(this)
override def sum[U >: Nothing](implicit plus: Semigroup[U]): ValuePipe[U] = EmptyValue
override def sumByLocalKeys[K, V](implicit ev: Nothing <:< (K, V), sg: Semigroup[V]) = this
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[Nothing] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
this
}
/**
* Creates a TypedPipe from an Iterable[T]. Prefer TypedPipe.from.
*
* If you avoid toPipe, this class is more efficient than IterableSource.
*/
final case class IterablePipe[T](iterable: Iterable[T]) extends TypedPipe[T] {
override def aggregate[B, C](agg: Aggregator[T, B, C]): ValuePipe[C] =
Some(iterable)
.filterNot(_.isEmpty)
.map(it => LiteralValue(agg(it)))
.getOrElse(EmptyValue)
override def ++[U >: T](other: TypedPipe[U]): TypedPipe[U] = other match {
case IterablePipe(thatIter) => IterablePipe(iterable ++ thatIter)
case EmptyTypedPipe => this
case _ if iterable.isEmpty => other
case _ => MergedTypedPipe(this, other)
}
override def cross[U](tiny: TypedPipe[U]) =
tiny.flatMap { u => iterable.map { (_, u) } }
override def filter(f: T => Boolean): TypedPipe[T] =
iterable.filter(f) match {
case eit if eit.isEmpty => EmptyTypedPipe
case filtered => IterablePipe(filtered)
}
/**
* When flatMap is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def flatMap[U](f: T => TraversableOnce[U]) =
toSourcePipe.flatMap(f)
override def fork: TypedPipe[T] = this
override def forceToDisk = this
override def limit(count: Int): TypedPipe[T] = IterablePipe(iterable.take(count))
/**
* When map is called on an IterablePipe, we defer to make sure that f is
* applied lazily, which avoids OOM issues when the returned value from the
* map is larger than the input
*/
override def map[U](f: T => U): TypedPipe[U] =
toSourcePipe.map(f)
override def forceToDiskExecution: Execution[TypedPipe[T]] = Execution.from(this)
override def sum[U >: T](implicit plus: Semigroup[U]): ValuePipe[U] =
Semigroup.sumOption[U](iterable).map(LiteralValue(_))
.getOrElse(EmptyValue)
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) = {
val kvit = raiseTo[(K, V)] match {
case IterablePipe(kviter) => kviter
case p => sys.error("This must be IterablePipe: " + p.toString)
}
IterablePipe(kvit.groupBy(_._1)
// use map to force this so it is not lazy.
.map {
case (k, kvs) =>
// These lists are never empty, get is safe.
(k, Semigroup.sumOption(kvs.iterator.map(_._2)).get)
})
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe =
// It is slightly more efficient to use this rather than toSourcePipe.toPipe(fieldNames)
IterableSource[U](iterable, fieldNames)(setter, singleConverter[U]).read(flowDef, mode)
private[this] def toSourcePipe =
TypedPipe.from(
IterableSource[T](iterable, new Fields("0"))(singleSetter, singleConverter))
override def toIterableExecution: Execution[Iterable[T]] = Execution.from(iterable)
}
/**
* This is an implementation detail (and should be marked private)
*/
object TypedPipeFactory {
def apply[T](next: (FlowDef, Mode) => TypedPipe[T]): TypedPipeFactory[T] = {
val memo = new java.util.WeakHashMap[FlowDef, (Mode, TypedPipe[T])]()
val fn = { (fd: FlowDef, m: Mode) =>
memo.synchronized {
memo.get(fd) match {
case null =>
val res = next(fd, m)
memo.put(fd, (m, res))
res
case (memoMode, pipe) if memoMode == m => pipe
case (memoMode, pipe) =>
sys.error("FlowDef reused on different Mode. Original: %s, now: %s".format(memoMode, m))
}
}
}
new TypedPipeFactory(NoStackAndThen(fn.tupled))
}
def unapply[T](tp: TypedPipe[T]): Option[NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]] =
tp match {
case tp: TypedPipeFactory[_] =>
Some(tp.asInstanceOf[TypedPipeFactory[T]].next)
case _ => None
}
}
/**
* This is a TypedPipe that delays having access
* to the FlowDef and Mode until toPipe is called
*/
class TypedPipeFactory[T] private (@transient val next: NoStackAndThen[(FlowDef, Mode), TypedPipe[T]]) extends TypedPipe[T] {
private[this] def andThen[U](fn: TypedPipe[T] => TypedPipe[U]): TypedPipe[U] =
new TypedPipeFactory(next.andThen(fn))
override def cross[U](tiny: TypedPipe[U]) = andThen(_.cross(tiny))
override def filter(f: T => Boolean): TypedPipe[T] = andThen(_.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] = andThen(_.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] = andThen(_.map(f))
override def limit(count: Int) = andThen(_.limit(count))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]) =
andThen(_.sumByLocalKeys[K, V])
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) =
// unwrap in a loop, without recursing
unwrap(this).toPipe[U](fieldNames)(flowDef, mode, setter)
override def toIterableExecution: Execution[Iterable[T]] = Execution.getConfigMode.flatMap {
case (conf, mode) =>
// This can only terminate in TypedPipeInst, which will
// keep the reference to this flowDef
val flowDef = new FlowDef
val nextPipe = unwrap(this)(flowDef, mode)
nextPipe.toIterableExecution
}
@annotation.tailrec
private def unwrap(pipe: TypedPipe[T])(implicit flowDef: FlowDef, mode: Mode): TypedPipe[T] = pipe match {
case TypedPipeFactory(n) => unwrap(n(flowDef, mode))
case tp => tp
}
}
/**
* This is an instance of a TypedPipe that wraps a cascading Pipe
*/
class TypedPipeInst[T] private[scalding] (@transient inpipe: Pipe,
fields: Fields,
@transient localFlowDef: FlowDef,
@transient val mode: Mode,
flatMapFn: FlatMapFn[T]) extends TypedPipe[T] {
/**
* If this TypedPipeInst represents a Source that was opened with no
* filtering or mapping
*/
private[scalding] def openIfHead: Option[(Tap[_, _, _], Fields, FlatMapFn[T])] =
// Keep this local
if (inpipe.getPrevious.isEmpty) {
val srcs = localFlowDef.getSources
if (srcs.containsKey(inpipe.getName)) {
Some((srcs.get(inpipe.getName), fields, flatMapFn))
} else {
sys.error("Invalid head: pipe has no previous, but there is no registered source.")
}
} else None
def checkMode(m: Mode): Unit =
// This check is not likely to fail unless someone does something really strange.
// for historical reasons, it is not checked by the typed system
assert(m == mode,
"Cannot switch Mode between TypedSource.read and toPipe calls. Pipe: %s, call: %s".format(mode, m))
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case MergedTypedPipe(l, r) => MergedTypedPipe(cross(l), cross(r))
case IterablePipe(iter) => flatMap { t => iter.map { (t, _) } }
// This should work for any, TODO, should we just call this?
case _ => map(((), _)).hashJoin(tiny.groupAll).values
}
override def filter(f: T => Boolean): TypedPipe[T] =
new TypedPipeInst[T](inpipe, fields, localFlowDef, mode, flatMapFn.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.flatMap(f))
override def map[U](f: T => U): TypedPipe[U] =
new TypedPipeInst[U](inpipe, fields, localFlowDef, mode, flatMapFn.map(f))
/**
* Avoid this method if possible. Prefer to stay in the TypedAPI until
* you write out.
*
* This actually runs all the pure map functions in one Cascading Each
* This approach is more efficient than untyped scalding because we
* don't use TupleConverters/Setters after each map.
*/
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, m: Mode, setter: TupleSetter[U]): Pipe = {
import Dsl.flowDefToRichFlowDef
checkMode(m)
flowDef.mergeFrom(localFlowDef)
RichPipe(inpipe).flatMapTo[TupleEntry, U](fields -> fieldNames)(flatMapFn)
}
override def toIterableExecution: Execution[Iterable[T]] =
openIfHead match {
// TODO: it might be good to apply flatMaps locally,
// since we obviously need to iterate all,
// but filters we might want the cluster to apply
// for us. So unwind until you hit the first filter, snapshot,
// then apply the unwound functions
case Some((tap, fields, Converter(conv))) =>
// To convert from java iterator to scala below
import scala.collection.JavaConverters._
Execution.getConfigMode.map {
case (conf, m) =>
// Verify the mode has not changed due to invalid TypedPipe DAG construction
checkMode(m)
new Iterable[T] {
def iterator = m.openForRead(conf, tap).asScala.map(tup => conv(tup.selectEntry(fields)))
}
}
case _ => forceToDiskExecution.flatMap(_.toIterableExecution)
}
}
final case class MergedTypedPipe[T](left: TypedPipe[T], right: TypedPipe[T]) extends TypedPipe[T] {
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] = tiny match {
case EmptyTypedPipe => EmptyTypedPipe
case _ => MergedTypedPipe(left.cross(tiny), right.cross(tiny))
}
override def debug: TypedPipe[T] =
MergedTypedPipe(left.debug, right.debug)
override def filter(f: T => Boolean): TypedPipe[T] =
MergedTypedPipe(left.filter(f), right.filter(f))
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
MergedTypedPipe(left.flatMap(f), right.flatMap(f))
override def sample(percent: Double, seed: Long): TypedPipe[T] =
MergedTypedPipe(left.sample(percent, seed), right.sample(percent, seed))
override def sumByLocalKeys[K, V](implicit ev: T <:< (K, V), sg: Semigroup[V]): TypedPipe[(K, V)] =
MergedTypedPipe(left.sumByLocalKeys, right.sumByLocalKeys)
override def map[U](f: T => U): TypedPipe[U] =
MergedTypedPipe(left.map(f), right.map(f))
override def fork: TypedPipe[T] =
MergedTypedPipe(left.fork, right.fork)
@annotation.tailrec
private def flattenMerge(toFlatten: List[TypedPipe[T]], acc: List[TypedPipe[T]])(implicit fd: FlowDef, m: Mode): List[TypedPipe[T]] =
toFlatten match {
case MergedTypedPipe(l, r) :: rest => flattenMerge(l :: r :: rest, acc)
case TypedPipeFactory(next) :: rest => flattenMerge(next(fd, m) :: rest, acc)
case nonmerge :: rest => flattenMerge(rest, nonmerge :: acc)
case Nil => acc
}
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]): Pipe = {
/*
* Cascading can't handle duplicate pipes in merges. What we do here is see if any pipe appears
* multiple times and if it does we can do self merges using flatMap.
* Finally, if there is actually more than one distinct TypedPipe, we use the cascading
* merge primitive. When using the merge primitive we rename all pipes going into it as
* Cascading cannot handle multiple pipes with the same name.
*/
val merged = flattenMerge(List(this), Nil)
// check for repeated pipes
.groupBy(identity)
.mapValues(_.size)
.map {
case (pipe, 1) => pipe
case (pipe, cnt) => pipe.flatMap(List.fill(cnt)(_).iterator)
}
.map(_.toPipe[U](fieldNames)(flowDef, mode, setter))
.toList
if (merged.size == 1) {
// there is no actual merging here, no need to rename:
merged.head
} else {
new cascading.pipe.Merge(merged.map(RichPipe.assignName): _*)
}
}
override def hashCogroup[K, V, W, R](smaller: HashJoinable[K, W])(joiner: (K, V, Iterable[W]) => Iterator[R])(implicit ev: TypedPipe[T] <:< TypedPipe[(K, V)]): TypedPipe[(K, R)] =
MergedTypedPipe(left.hashCogroup(smaller)(joiner), right.hashCogroup(smaller)(joiner))
}
case class WithOnComplete[T](typedPipe: TypedPipe[T], fn: () => Unit) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
new Each(pipe, Fields.ALL, new CleanupIdentityFunction(fn), Fields.REPLACE)
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithOnComplete(typedPipe.cross(tiny), fn)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithOnComplete(typedPipe.flatMap(f), fn)
}
case class WithDescriptionTypedPipe[T](typedPipe: TypedPipe[T], description: String) extends TypedPipe[T] {
override def asPipe[U >: T](fieldNames: Fields)(implicit flowDef: FlowDef, mode: Mode, setter: TupleSetter[U]) = {
val pipe = typedPipe.toPipe[U](fieldNames)(flowDef, mode, setter)
RichPipe.setPipeDescriptions(pipe, List(description))
}
override def cross[U](tiny: TypedPipe[U]): TypedPipe[(T, U)] =
WithDescriptionTypedPipe(typedPipe.cross(tiny), description)
override def flatMap[U](f: T => TraversableOnce[U]): TypedPipe[U] =
WithDescriptionTypedPipe(typedPipe.flatMap(f), description)
}
/**
* This class is for the syntax enrichment enabling
* .joinBy on TypedPipes. To access this, do
* import Syntax.joinOnMappablePipe
*/
class MappablePipeJoinEnrichment[T](pipe: TypedPipe[T]) {
def joinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, U)] = pipe.groupBy(g).withReducers(reducers).join(smaller.groupBy(h))
def leftJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (T, Option[U])] = pipe.groupBy(g).withReducers(reducers).leftJoin(smaller.groupBy(h))
def rightJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], U)] = pipe.groupBy(g).withReducers(reducers).rightJoin(smaller.groupBy(h))
def outerJoinBy[K, U](smaller: TypedPipe[U])(g: (T => K), h: (U => K), reducers: Int = -1)(implicit ord: Ordering[K]): CoGrouped[K, (Option[T], Option[U])] = pipe.groupBy(g).withReducers(reducers).outerJoin(smaller.groupBy(h))
}
/**
* These are named syntax extensions that users can optionally import.
* Avoid import Syntax._
*/
object Syntax {
implicit def joinOnMappablePipe[T](p: TypedPipe[T]): MappablePipeJoinEnrichment[T] = new MappablePipeJoinEnrichment(p)
}
| avp1984/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/TypedPipe.scala | Scala | apache-2.0 | 44,218 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt
import java.io.{ File, Writer }
import inc.Relations
object DotGraph {
private def fToString(roots: Iterable[File]): (File => String) =
(x: File) => sourceToString(roots, x)
def sources(relations: Relations, outputDirectory: File, sourceRoots: Iterable[File]) {
val toString = fToString(sourceRoots)
apply(relations, outputDirectory, toString, toString)
}
def packages(relations: Relations, outputDirectory: File, sourceRoots: Iterable[File]) {
val packageOnly = (path: String) =>
{
val last = path.lastIndexOf(File.separatorChar)
val packagePath = (if (last > 0) path.substring(0, last) else path).trim
if (packagePath.isEmpty) "" else packagePath.replace(File.separatorChar, '.')
}
val toString = packageOnly compose fToString(sourceRoots)
apply(relations, outputDirectory, toString, toString)
}
def apply(relations: Relations, outputDir: File, sourceToString: File => String, externalToString: File => String) {
def file(name: String) = new File(outputDir, name)
IO.createDirectory(outputDir)
generateGraph(file("int-source-deps"), "dependencies", relations.internalSrcDep, sourceToString, sourceToString)
generateGraph(file("binary-dependencies"), "externalDependencies", relations.binaryDep, externalToString, sourceToString)
}
def generateGraph[Key, Value](file: File, graphName: String, relation: Relation[Key, Value],
keyToString: Key => String, valueToString: Value => String) {
import scala.collection.mutable.{ HashMap, HashSet }
val mappedGraph = new HashMap[String, HashSet[String]]
for ((key, values) <- relation.forwardMap; keyString = keyToString(key); value <- values)
mappedGraph.getOrElseUpdate(keyString, new HashSet[String]) += valueToString(value)
val mappings =
for {
(dependsOn, dependants) <- mappedGraph.toSeq
dependant <- dependants
if dependant != dependsOn && !dependsOn.isEmpty && !dependant.isEmpty
} yield "\"" + dependant + "\" -> \"" + dependsOn + "\""
val lines =
("digraph " + graphName + " {") +:
mappings :+
"}"
IO.writeLines(file, lines)
}
def sourceToString(roots: Iterable[File], source: File) =
relativized(roots, source).trim.stripSuffix(".scala").stripSuffix(".java")
private def relativized(roots: Iterable[File], path: File): String =
{
val relativized = roots.flatMap(root => IO.relativize(root, path))
val shortest = (Int.MaxValue /: relativized)(_ min _.length)
relativized.find(_.length == shortest).getOrElse(path.getName)
}
} | niktrop/sbt | main/actions/src/main/scala/sbt/DotGraph.scala | Scala | bsd-3-clause | 2,691 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.tailhq.dynaml.models.lm
import breeze.linalg.DenseVector
import io.github.tailhq.dynaml.evaluation.Metrics
import io.github.tailhq.dynaml.models.LinearModel
import io.github.tailhq.dynaml.optimization.GloballyOptimizable
import scala.util.Random
/**
* @author tailhq date: 4/4/16.
*
* A generalised linear model for local data sets.
* This is extended for Regression in [[RegularizedGLM]]
* and for binary classification in [[LogisticGLM]] and [[ProbitGLM]]
*/
abstract class GeneralizedLinearModel[T](
data: Stream[(DenseVector[Double], Double)], numPoints: Int,
map: (DenseVector[Double]) => DenseVector[Double] = identity[DenseVector[Double]])
extends GenericGLM[Stream[(DenseVector[Double], Double)], T](data, numPoints, map)
with GloballyOptimizable {
override protected val g: Stream[(DenseVector[Double], Double)] = data
val task: String
override val h: (Double) => Double = identity
featureMap = map
def dimensions = featureMap(data.head._1).length
/**
* Initialize parameters to a vector of ones.
* */
override def initParams(): DenseVector[Double] =
DenseVector.ones[Double](dimensions + 1)
override protected var params: DenseVector[Double] = initParams()
override protected var hyper_parameters: List[String] =
List("regularization")
/**
* Calculates the energy of the configuration,
* in most global optimization algorithms
* we aim to find an approximate value of
* the hyper-parameters such that this function
* is minimized.
*
* @param h The value of the hyper-parameters in the configuration space
* @param options Optional parameters about configuration
* @return Configuration Energy E(h)
**/
override def energy(h: Map[String, Double],
options: Map[String, String]): Double = {
setState(h)
val folds: Int = options("folds").toInt
val shuffle = Random.shuffle((1L to numPoints).toList)
val avg_metrics: DenseVector[Double] = (1 to folds).map { a =>
//For the ath fold
//partition the data
//ceil(a-1*npoints/folds) -- ceil(a*npoints/folds)
//as test and the rest as training
val test = shuffle.slice((a - 1) * numPoints / folds, a * numPoints / folds)
val (trainingData, testData) = g.zipWithIndex.partition((c) => !test.contains(c._2))
val tempParams = optimizer.optimize(numPoints,
prepareData(trainingData.map(_._1)),
initParams())
val scoresAndLabels = testData.map(_._1).map(p =>
(this.h(tempParams dot DenseVector(featureMap(p._1).toArray ++ Array(1.0))), p._2))
val metrics = Metrics("classification")(
scoresAndLabels.toList,
testData.length,
logFlag = true)
val res: DenseVector[Double] = metrics.kpi() / folds.toDouble
res
}.reduce(_+_)
//Perform n-fold cross validation
task match {
case "regression" => avg_metrics(1)
case "classification" => 1 - avg_metrics(2)
}
}
override protected var current_state: Map[String, Double] = Map("regularization" -> 0.001)
/**
* Set the model "state" which
* contains values of its hyper-parameters
* with respect to the covariance and noise
* kernels.
* */
def setState(s: Map[String, Double]): this.type ={
this.setRegParam(s("regularization"))
current_state = Map("regularization" -> s("regularization"))
this
}
}
object GeneralizedLinearModel {
/**
* Create a generalized linear model.
*
* @param data The training data as a stream of tuples
* @param task Set to 'regression' or 'classification'
* @param map Feature map or basis functions
* @param modeltype Set to either 'logit' or 'probit'
*
* */
def apply[T](data: Stream[(DenseVector[Double], Double)],
task: String = "regression",
map: (DenseVector[Double]) => DenseVector[Double] =
identity[DenseVector[Double]],
modeltype: String = "") = task match {
case "regression" => new RegularizedGLM(data, data.length, map).asInstanceOf[GeneralizedLinearModel[T]]
case "classification" => modeltype match {
case "probit" => new ProbitGLM(data, data.length, map).asInstanceOf[GeneralizedLinearModel[T]]
case _ => new LogisticGLM(data, data.length, map).asInstanceOf[GeneralizedLinearModel[T]]
}
}
}
| mandar2812/DynaML | dynaml-core/src/main/scala/io/github/tailhq/dynaml/models/lm/GeneralizedLinearModel.scala | Scala | apache-2.0 | 5,178 |
package squants.experimental.unitgroups.si.strict
import org.scalatest.{FlatSpec, Matchers}
import squants.energy.Energy
import squants.mass.Mass
import squants.space.Length
import squants.experimental.unitgroups.ImplicitDimensions.space._
import squants.experimental.unitgroups.ImplicitDimensions.energy._
import squants.experimental.unitgroups.ImplicitDimensions.mass._
import squants.experimental.unitgroups.UnitGroup
import squants.experimental.unitgroups.si.strict.implicits._
class StrictSiSpec extends FlatSpec with Matchers {
behavior of "Strict SI UnitGroups"
"The Strict SI UnitGroups" should "be implicitly derived for arbitrary dimensions" in {
implicitly[UnitGroup[Length]].units should not be empty
implicitly[UnitGroup[Mass]].units should not be empty
implicitly[UnitGroup[Energy]].units should not be empty
}
}
| underscorenico/squants | shared/src/test/scala/squants/experimental/unitgroups/si/strict/StrictSiSpec.scala | Scala | apache-2.0 | 850 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.queryapitests
import java.net.URI
import scala.collection.immutable
import org.junit.Test
import org.scalatest.Suite
import nl.ebpi.yaidom.core.EName
import nl.ebpi.yaidom.queryapi.DocumentApi
import nl.ebpi.yaidom.queryapi.HasENameApi.ToHasElemApi
import nl.ebpi.yaidom.queryapi.ScopedElemApi
import nl.ebpi.yaidom.queryapi.XmlBaseSupport
/**
* XML Base test case. It tests some expectations about XML Base support for different element implementations.
* Hence, for DOM and Scala XML wrapper elements, it tests expectations about XML Base support for the underlying libraries.
*
* @author Chris de Vreeze
*/
abstract class AbstractXmlBaseTest extends Suite {
private val XmlBaseEName = EName("http://www.w3.org/XML/1998/namespace", "base")
private val XLinkNs = "http://www.w3.org/1999/xlink"
type E <: ScopedElemApi[E]
type E2 <: ScopedElemApi[E2]
protected def getDocument(path: String, docUri: URI): DocumentApi[E]
protected def getDocument(path: String): DocumentApi[E]
protected def getBaseUri(elem: E): URI
protected def getParentBaseUri(elem: E): URI
protected def getDocumentUri(elem: E): URI
protected def getReverseAncestryOrSelf(elem: E): immutable.IndexedSeq[E2]
// Naive resolveUri method
protected def resolveUri(uri: URI, baseUriOption: Option[URI]): URI = {
val baseUri = baseUriOption.getOrElse(new URI(""))
if (uri.toString.isEmpty) baseUri else baseUri.resolve(uri)
}
@Test def testXmlBase(): Unit = {
val doc = getDocument("/nl/ebpi/yaidom/queryapitests/xmlBaseTestFile.xml")
testXmlBase(doc.documentElement)
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty1(e))
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty2(e))
}
@Test def testXmlBase2(): Unit = {
val doc = getDocument("/nl/ebpi/yaidom/queryapitests/xmlBaseTestFile.xml")
val elem = doc.documentElement
testXmlBase(elem)
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty1(e))
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty2(e))
}
@Test def testXmlBase3(): Unit = {
val doc = getDocument("/nl/ebpi/yaidom/queryapitests/xmlBaseTestFile.xml", new URI("http://bogusBaseUri"))
val elem = doc.documentElement
testXmlBase(elem)
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty1(e))
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty2(e))
}
@Test def testXmlBase4(): Unit = {
val doc = getDocument("/nl/ebpi/yaidom/queryapitests/xmlBaseTestFile.xml")
val elem = doc.documentElement.findElem(_.resolvedName == EName("olist")).get
testXmlBaseOfNonRootElem(elem)
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty1(e))
doc.documentElement.findAllElemsOrSelf.foreach(e => testXmlBaseProperty2(e))
}
@Test def testOtherXmlBase(): Unit = {
val elem = getTestElem
assertResult(new URI("http://example.org/wine/")) {
getBaseUri(elem)
}
assertResult(new URI("http://example.org/wine/rose")) {
val e = elem.getChildElem(_.localName == "e2")
getBaseUri(e)
}
elem.findAllElemsOrSelf.foreach(e => testXmlBaseProperty1(e))
elem.findAllElemsOrSelf.foreach(e => testXmlBaseProperty2(e))
}
private def testXmlBase(elem: E): Unit = {
assertResult(2) {
elem.filterElemsOrSelf(e => e.attributeOption(XmlBaseEName).isDefined).size
}
assertResult(new URI("http://example.org/today/")) {
getBaseUri(elem)
}
assertResult(Set(new URI("http://example.org/hotpicks/"))) {
elem.filterElems(EName("olist")).map(e => getBaseUri(e)).toSet
}
assertResult(Set(
new URI("http://example.org/today/new.xml"),
new URI("http://example.org/hotpicks/pick1.xml"),
new URI("http://example.org/hotpicks/pick2.xml"),
new URI("http://example.org/hotpicks/pick3.xml"))) {
val uris =
elem.filterElems(EName("link")) map { e =>
val href = new URI(e.attribute(EName(XLinkNs, "href")))
resolveUri(href, Some(getBaseUri(e)))
}
uris.toSet
}
}
private def testXmlBaseOfNonRootElem(elem: E): Unit = {
require(elem.resolvedName == EName("olist"))
assertResult(new URI("http://example.org/hotpicks/")) {
getBaseUri(elem)
}
assertResult(Set(
new URI("http://example.org/hotpicks/pick1.xml"),
new URI("http://example.org/hotpicks/pick2.xml"),
new URI("http://example.org/hotpicks/pick3.xml"))) {
val uris =
elem.filterElems(EName("link")) map { e =>
val href = new URI(e.attribute(EName(XLinkNs, "href")))
resolveUri(href, Some(getBaseUri(e)))
}
uris.toSet
}
}
private def getTestElem: E = {
val doc = getDocument("/nl/ebpi/yaidom/queryapitests/miniXmlBaseTestFile.xml", new URI(""))
doc.documentElement
}
/**
* Tests an XML Base property relating it to the document URI and the ancestry-or-self.
*/
private def testXmlBaseProperty1(elem: E): Unit = {
val ancestorsOrSelf = getReverseAncestryOrSelf(elem)
val expectedBaseUri =
ancestorsOrSelf.foldLeft(getDocumentUri(elem)) {
case (currBaseUri, e) =>
e.attributeOption(XmlBaseEName).map(s => resolveUri(new URI(s), Some(currBaseUri))).getOrElse(currBaseUri)
}
assertResult(expectedBaseUri) {
getBaseUri(elem)
}
}
/**
* Tests an XML Base property relating it to the parent base URI and the element itself.
*/
private def testXmlBaseProperty2(elem: E): Unit = {
val parentBaseUri = getParentBaseUri(elem)
val expectedBaseUri =
elem.attributeOption(XmlBaseEName).map(s => resolveUri(new URI(s), Some(parentBaseUri))).getOrElse(parentBaseUri)
assertResult(expectedBaseUri) {
getBaseUri(elem)
}
}
}
| EBPI/yaidom | src/test/scala/nl/ebpi/yaidom/queryapitests/AbstractXmlBaseTest.scala | Scala | apache-2.0 | 6,505 |
package com.twitter.finagle.client
import com.twitter.conversions.time._
import com.twitter.finagle.pool.{WatermarkPool, CachingPool, BufferingPool}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.DefaultTimer
import com.twitter.util.{Timer, Duration}
/**
* Create a watermark pool backed by a caching pool. This is the
* default pooling setup of Finagle.
*
* @param low The low watermark used in the Watermark pool. If there
* is sufficient request concurrency, no fewer connections will be
* maintained by the pool.
*
* @param high The high watermark. The pool will not maintain more
* connections than this.
*
* @param bufferSize Specifies the size of the lock-free buffer in front of
* the pool configuration. Skipped if 0.
*
* @param idleTime The amount of idle time for which a connection is
* cached. This is applied to connections that number greater than
* the low watermark but fewer than the high.
*
* @param maxWaiters The maximum number of connection requests that
* are queued when the connection concurrency exceeds the high
* watermark.
*/
case class DefaultPool[Req, Rep](
low: Int = 0,
high: Int = Int.MaxValue,
bufferSize: Int = 0,
idleTime: Duration = Duration.Top,
maxWaiters: Int = Int.MaxValue,
timer: Timer = DefaultTimer.twitter
) extends (StatsReceiver => Transformer[Req, Rep]) {
def apply(statsReceiver: StatsReceiver) = inputFactory => {
val factory =
if (idleTime <= 0.seconds || high <= low) inputFactory else
new CachingPool(inputFactory, high - low, idleTime, timer, statsReceiver)
val pool = new WatermarkPool(factory, low, high, statsReceiver, maxWaiters)
if (bufferSize <= 0) pool else new BufferingPool(pool, bufferSize)
}
}
| firebase/finagle | finagle-core/src/main/scala/com/twitter/finagle/client/DefaultPool.scala | Scala | apache-2.0 | 1,771 |
package uk.org.openeyes.jsonschema.core
import java.net.URI
import org.json4s._
import java.io.{File,FileReader,Reader}
class SchemaLoader(parse: (JsonInput, Boolean) => JValue) {
def loadFromDir(path: String): Map[String, JValue] = {
val root = new File(path)
if (!root.isDirectory) throw new IllegalArgumentException("Path '" + path + "' is not a directory")
val rootLen = root.getAbsolutePath.length
findFiles(root).map(f => {
JField(f.getAbsolutePath.substring(rootLen + 1).replaceAll("""\\.json$""", "").replaceAll("/", "."), parse(new java.io.FileReader(f), true))
}).toMap
}
private def findFiles(base: File): Seq[File] = {
Option[Array[File]](base.listFiles).getOrElse(Array()).partition(_.isDirectory) match {
case (dirs, files) =>
files.filter(_.getPath().matches(""".*\\.json""")) ++
dirs.filter(!_.getPath().matches("""\\..*""")).flatMap(findFiles)
}
}
}
| openeyes/oe-json-schema | core/src/main/scala/SchemaLoader.scala | Scala | gpl-3.0 | 933 |
package org.repwatch.models
class PhoneNumber()
| csunwold/repwatch | domain/src/main/scala/org/repwatch/models/PhoneNumber.scala | Scala | gpl-3.0 | 49 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Mon Sep 21 15:05:06 EDT 2009
* @see LICENSE (MIT style license file).
*/
package scalation.animation
import scala.collection.mutable.{HashSet, ListBuffer}
import scala.math.abs
import scalation.animation.Counter.{nextE, nextN, nextT}
import scalation.scala2d.{CurvilinearShape, Ellipse, QCurve, Rectangle, R2}
import scalation.scala2d.Colors._
import scalation.scala2d.Shapes.{Dimension, Graphics2D, RectangularShape}
import scalation.util.Error
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Dgraph` class is for defining graph structures suitable for animation.
* Graphs consist of nodes, edges and tokens. Tokens can be positioned within
* nodes or on edges. A graph animation class that uses this class would typically
* move the tokens by changing there location over time. This class supports both
* directed graphs and bipartite graphs. Directed graphs contain only primary
* nodes, while bipartite graphs have both primary and secondary nodes along with
* the rule that edges must go from primaries to secondaries or secondaries to
* primaries. Bipartite graphs can be used to represent Petri Nets by letting
* Transitions be primary nodes and Places be secondary nodes. Everything can be
* labeled (nodes, edges and tokens as well as the graph itself). Nodes and edges
* may be added to/removed from graphs, while tokens may be added to/removed from
* either nodes or edges. Tokens may also be free (not bound to nodes or edges).
*/
class Dgraph (name: String = "Dgraph", bipartite: Boolean = false)
extends Error
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** This class is used to represent nodes in the graph.
* @param shape the shape of the node
* @param label the label for the created node
* @param primary whether it is a primary/transition/true or secondary/place node/false
* @param color the color of the node
* @param x the x-coordinate (top left)
* @param y the y-coordinate (top left)
* @param w the width
* @param h the height
*/
case class Node (shape: RectangularShape, label: String, primary: Boolean, var color: Color,
x: Double, y: Double, w: Double, h: Double)
{
{
shape.setFrame (x, y, w, h)
} // primary constructor
/** Node identifier
*/
val id = nextN ()
/** List of outgoing edges
*/
val outEdges = ListBuffer [Edge] ()
/** List of tokens current in this node
*/
val tokens = ListBuffer [Token] ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the color.
* @param color the new color
*/
def setColor (color2: Color) { color = color2 }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add an outgoing edge to this node.
* @param edge the edge to add
*/
def addEdge (edge: Edge): Boolean =
{
if (bipartite && edge.from.primary == edge.to.primary) {
flaw ("addEdge", "node types for edge endpoints may not be the same")
return false
} // if
outEdges += edge
true
} // addEdge
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove an outgoing edge from this node.
* @param edge the edge to remove
*/
def removeEdge (edge: Edge) { outEdges -= edge }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a token from this node.
* @param token the token to add
*/
def addToken (token: Token) { tokens += token }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove a token from this node.
* @param token the token to remove
*/
def removeToken (token: Token) { tokens -= token }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert this node to a string.
*/
override def toString = "Node " + label + " [ " + id + " ]"
} // Node class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** This class is used to represent edges in the graph. If bend = 0, a
* straight line is created, otherwise a quadratic curve is created.
* @param shape the shape (line/curve) of the edge
* @param label the label for the created edge
* @param primary whether it is a primary/transition/true or secondary/place node/false
* @param color the color of the edge
* @param from the origination node
* @param to the destination node
* @param bend the amount of bend in the curve
*/
case class Edge (shape: CurvilinearShape, label: String, primary: Boolean, var color: Color,
from: Node, to: Node, bend: Double)
{
/** A very small real number
*/
private val EPSILON = 1E-7
{
from.addEdge (this) // add this edge to outgoing edges of from node
var x1 = from.shape.getCenterX ()
val y1 = from.shape.getCenterY ()
var x2 = to.shape.getCenterX ()
val y2 = to.shape.getCenterY ()
if (x1 < x2) {
x1 += from.shape.getWidth () / 2.0
x2 -= to.shape.getWidth () / 2.0
} else {
x1 -= from.shape.getWidth () / 2.0
x2 += to.shape.getWidth () / 2.0
} // if
if (abs (bend) < EPSILON) {
shape.setLine (R2 (x1, y1), R2 (x2, y2))
} else {
shape.setLine (R2 (x1, y1), R2 (x2, y2), bend)
} // if
} // primary constructor
/** Edge identifier
*/
private val id = nextE ()
/** List of tokens current on this edge.
*/
val tokens = ListBuffer [Token] ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct an edge with no bend.
* @param shape the shape (line/curve) of the edge
* @param label the label for the created edge
* @param primary whether it is a primary/transition/true or secondary/place node/false
* @param color the color of the edge
* @param from the origination node
* @param to the destination node
*/
def this (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, from: Node, to: Node)
{
this (shape, label, primary, color, from, to, 0.0)
} // Edge constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct an edge as a line with explicit coordinates.
* @param shape the shape (line) of the edge
* @param label the label for the created edge
* @param primary whether it is a primary/transition/true or secondary/place node/false
* @param color the color of the edge
* @param from the origination node
* @param to the destination node
* @param x1 the x-coordinate of the edge's start
* @param y1 the y-coordinate of the edge's start
* @param x2 the x-coordinate of the edge's end
* @param y2 the y-coordinate of the edge's end
*/
def this (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, from: Node, to: Node,
x1: Double, y1: Double, x2: Double, y2: Double)
{
this (shape, label, primary, color, from, to, 0.0)
from.addEdge (this) // add this edge to outgoing edges of from node
shape.setLine (R2 (x1, y1), R2 (x2, y2))
} // Edge constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct an edge as a curve with explicit coordinates.
* @param shape the shape (curve) of the edge
* @param label the label for the created edge
* @param primary whether it is a primary/transition/true or secondary/place node/false
* @param color the color of the edge
* @param from the origination node
* @param to the destination node
* @param x1 the x-coordinate of the edge's start
* @param y1 the y-coordinate of the edge's start
* @param xc the x-coordinate of the edge's control point
* @param yc the y-coordinate of the edge's control point
* @param x2 the x-coordinate of the edge's end
* @param y2 the y-coordinate of the edge's end
*/
def this (shape: CurvilinearShape, label: String, primary: Boolean, color: Color, from: Node, to: Node,
x1: Double, y1: Double, xc: Double, yc: Double, x2: Double, y2: Double)
{
this (shape, label, primary, color, from, to, 0.0)
from.addEdge (this) // add this edge to outgoing edges of from node
shape.setLine (R2 (x1, y1), R2 (xc, yc), R2 (x2, y2))
} // Edge constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the color.
* @param color the new color
*/
def setColor (color2: Color) { color = color2 }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a token from this node.
* @param token the token to add
*/
def addToken (token: Token) { tokens += token }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Convert this edge to a string.
*/
override def toString = "Edge " + label + " [ " + id + " ]"
} // Edge class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** This class is used to represent tokens in the graph.
* @param shape the shape of the token
* @param label the label for the created token
* @param primary whether the token is primary/free/true to secondary/bound/false
* @param color the color of the token
* @param onNode the node the token is on
* @param w the width of the token
* @param h the height of the token
*/
case class Token (shape: RectangularShape, label: String, primary: Boolean, var color: Color,
var onNode: Node, val w: Double, val h: Double)
{
{
if (onNode != null) {
onNode.addToken (this)
val x = onNode.shape.getCenterX () - w / 2.0
val y = onNode.shape.getCenterY () - h / 2.0
shape.setFrame (x, y, w, h)
} // if
} // primary constructor
/** Token identifier
*/
private val id = nextT ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a primary/free token with explicit coordinates.
* Such tokens are free to move anywhere in the drawing panel.
* @param shape the shape of the token
* @param label the label for the created token
* @param color the color of the token
* @param x the x-coordinate of the token's location
* @param y the y-coordinate of the token's location
* @param w the width of the token
* @param h the height of the token
*/
def this (shape: RectangularShape, label: String, primary: Boolean, color: Color,
x: Double, y: Double, w: Double, h: Double)
{
this (shape, label, true, color, null, w, h)
shape.setFrame (x, y, w, h)
} // Token constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the color.
* @param color the new color
*/
def setColor (color2: Color) { color = color2 }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set the node the token is on.
* @param onNode2 the node the token is on
*/
def setOnNode (onNode2: Node) { onNode = onNode2 }
} // Token class
/** List of nodes in the graph
*/
val nodes = ListBuffer [Node] ()
/** List of edges in the graph
*/
val edges = ListBuffer [Edge] ()
/** List of free tokens in the graph (bound tokens must be in a nodes or edges list)
*/
val freeTokens = ListBuffer [Token] ()
/** Whether the nodes have been visited (internal use only)
*/
private val visited = new HashSet [Node] ()
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a node to the graph.
* @param n the node to add
*/
def addNode (n: Node) { nodes += n }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove a node from the graph.
* @param n the node to remove
*/
def removeNode (n: Node) { nodes -= n }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add an edge to the graph.
* @param e the edge to add
*/
def addEdge (e: Edge) { edges += e }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove an edge from the graph.
* @param e the edge to remove
*/
def removeEdge (e: Edge) { edges -= e }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Add a free token to the graph.
* @param t the free token to add
*/
def addFreeToken (t: Token) { freeTokens += t }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Remove a free token from the graph.
* @param t the free token to remove
*/
def removeFreeToken (t: Token) { freeTokens -= t }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get all the root nodes (those with no incoming edges).
*/
def getRoots =
{
val roots = new ListBuffer [Node] ()
for (n <- nodes) {
var keep = true
for (e <- edges) {
if (n == e.to) keep = false
} // for
if (keep) roots += n
} // for
roots
} // getRoots
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Mark all nodes as unvisited by clearing them from the hash set.
*/
private def clearVisited () { visited.clear () }
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Recursively visit all nodes in the graph.
* @param n the current node
* @param level the recursion level
*/
def traverse (n: Node, level: Int)
{
for (i <- 0 until level) print ("\\t")
println (n) // print visited node
//visited.add (n)
val outgoing = n.outEdges
if (outgoing != null) {
for (oEdge <- outgoing) {
val next = oEdge.to
traverse (next, level + 1)
//if ( ! visited. contains (next)) traverse (next, level + 1)
} // for
} // if
} // traverse
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Traverse the graph printing out its nodes and showing connectivity by indentation.
*/
def traverseNodes ()
{
clearVisited ()
//traverse (nodes.get (0), 0) // only from node 0
for (r <- getRoots) traverse (r, 0) // from all roots
} // traverseNodes
} // Dgraph class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `Counter` object maintains counters.
*/
object Counter
{
private var nCounter = 0
private var eCounter = 0
private var tCounter = 0
def nextN () = { nCounter += 1; nCounter }
def nextE () = { eCounter += 1; eCounter }
def nextT () = { tCounter += 1; tCounter }
} // Counter object
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `DgraphTest` object to test the `Dgraph` class.
*/
object DgraphTest extends App
{
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Build and test a directed graph.
*/
private def testDirectedGraph (g: Dgraph)
{
// Create nodes
val n1 = g.Node (Ellipse (), "node1", true, red, 100, 200, 20, 20)
val n2 = g.Node (Ellipse (), "node2", true, blue, 300, 100, 20, 20)
val n3 = g.Node (Ellipse (), "node3", true, green, 300, 300, 20, 20)
val n4 = g.Node (Ellipse (), "node4", true, purple, 500, 200, 20, 20)
// Create edges
val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n2) // 120, 210, 300, 110)
n1.addEdge (e1)
val e2 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 120, 210, 300, 310)
n1.addEdge (e2)
val e3 = new g.Edge (QCurve (), "edge1", true, black, n2, n4) // 320, 110, 500, 210)
n2.addEdge (e3)
val e4 = new g.Edge (QCurve (), "edge1", true, black, n3, n4) // 320, 310, 500, 210)
n3.addEdge (e4)
// Add the nodes and edges to the directed graph
g.addNode (n1)
g.addNode (n2)
g.addNode (n3)
g.addNode (n4)
g.addEdge (e1)
g.addEdge (e2)
g.addEdge (e3)
g.addEdge (e4)
// Traverse the directed graph printing out its nodes
g.traverseNodes ()
} // testDirectedGraph
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Build and test a bipartite graph.
*/
private def testBipartiteGraph (g: Dgraph)
{
// Create nodes
val n1 = g.Node (Ellipse (), "node1", false, orange, 100, 100, 30, 30)
val n2 = g.Node (Ellipse (), "node2", false, orange, 100, 300, 30, 30)
val n3 = g.Node (Rectangle (), "node2", true, lightgreen, 300, 185, 30, 60)
val n4 = g.Node (Ellipse (), "node4", false, red, 500, 100, 30, 30)
val n5 = g.Node (Ellipse (), "node5", false, red, 500, 300, 30, 30)
// Create edges
val e1 = new g.Edge (QCurve (), "edge1", true, black, n1, n3) // 130, 115, 300, 215)
n1.addEdge (e1)
val e2 = new g.Edge (QCurve (), "edge2", true, black, n2, n3) // 130, 315, 300, 215)
n2.addEdge (e2)
val e3 = new g.Edge (QCurve (), "edge3", true, black, n3, n4) // 330, 215, 500, 115)
n3.addEdge (e3)
val e4 = new g.Edge (QCurve (), "edge4", true, black, n3, n5) // 330, 215, 500, 315)
n3.addEdge (e4)
// Add the nodes and edges to the directed graph
g.addNode (n1)
g.addNode (n2)
g.addNode (n3)
g.addNode (n4)
g.addNode (n5)
g.addEdge (e1)
g.addEdge (e2)
g.addEdge (e3)
g.addEdge (e4)
// Traverse the directed graph printing out its nodes
g.traverseNodes ()
} // testBipartiteGraph
println ("Run DgraphTest - Bipartite Graph Test\\n")
val bg = new Dgraph ("Bipartite_Graph", true)
testBipartiteGraph (bg)
println ("Run DgraphTest - Directed Graph Test\\n")
val dg = new Dgraph ("Directed_Graph", false)
testDirectedGraph (dg)
} // DgraphTest object
| NBKlepp/fda | scalation_1.2/src/main/scala/scalation/animation/Dgraph.scala | Scala | mit | 20,104 |
package test6
trait A {
object Foo extends Module[Foo[_]]
class Foo[TResult]
def b = new B
}
| twitter-forks/sbt | sbt/src/sbt-test/compiler-project/inc-pickled-existential/src/main/scala/A.scala | Scala | bsd-3-clause | 102 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.io.File
import scala.language.postfixOps
import scala.util.Random
import org.apache.spark.sql.catalyst.plans.logical.OneRowRelation
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.test.{SharedSQLContext}
/**
* DataFrame是一个分布式的,按照命名列的形式组织的数据集合,与关系型数据库中的数据库表类似
*/
class DataFrameSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("analysis error should be eagerly reported") {//分析错误应急切的报告
// Eager analysis.
withSQLConf(SQLConf.DATAFRAME_EAGER_ANALYSIS.key -> "true") {
intercept[Exception] { testData.select('nonExistentName) }
intercept[Exception] {//df.agg() 求聚合用的相关函数
testData.groupBy('key).agg(Map("nonExistentName" -> "sum"))
}
intercept[Exception] {
testData.groupBy("nonExistentName").agg(Map("key" -> "sum"))//df.agg() 求聚合用的相关函数
}
intercept[Exception] {
testData.groupBy($"abcd").agg(Map("key" -> "sum"))
}
}
// No more eager analysis once the flag is turned off
//一旦关闭标志不再进行更多的分析
withSQLConf(SQLConf.DATAFRAME_EAGER_ANALYSIS.key -> "false") {
testData.select('nonExistentName)
}
}
test("dataframe toString") {//dataframe转换字符串
assert(testData.toString === "[key: int, value: string]")
//列名获取方式
assert(testData("key").toString === "key")
assert($"test".toString === "test")
}
test("rename nested groupby") {//重命名嵌套查询
val df = Seq((1, (1, 1))).toDF()
/**
*+---+-----+
| _1| _2|
+---+-----+
| 1|[1,1]|
+---+-----+
*/
//df.show()
checkAnswer(
//第一列分组,合计第二列,第一个值
df.groupBy("_1").agg(sum("_2._1")).toDF("key", "total"),
Row(1, 1) :: Nil)
}
test("invalid plan toString, debug mode") {//无效的计划方法,调试模式
// Turn on debug mode so we can see invalid query plans.
//打开调试模式,这样我们就可以看到无效的查询计划。
import org.apache.spark.sql.execution.debug._
withSQLConf(SQLConf.DATAFRAME_EAGER_ANALYSIS.key -> "true") {
sqlContext.debug()
val badPlan = testData.select('badColumn)
assert(badPlan.toString contains badPlan.queryExecution.toString,
"toString on bad query plans should include the query execution but was:\\n" +
badPlan.toString)
}
}
test("access complex data") {//访问复杂的数据
/**
*+-----------+-----+---------+-----+
| m| s| a| b|
+-----------+-----+---------+-----+
|Map(1 -> 1)|[1,1]|[1, 1, 1]| true|
|Map(2 -> 2)|[2,2]|[2, 2, 2]|false|
+-----------+-----+---------+-----+
*/
complexData.show()
//过虑a列,getItem(0)数组第一个值
assert(complexData.filter(complexData("a").getItem(0) === 2).count() == 1)
//过虑m列,getItem(1)数组第二个值
assert(complexData.filter(complexData("m").getItem("1") === 1).count() == 1)
//过虑s列,取字段Key的值,
assert(complexData.filter(complexData("s").getField("key") === 1).count() == 1)
}
test("table scan") {//表扫描
//默认数据集testData,
checkAnswer(
testData,
testData.collect().toSeq)
}
test("empty data frame") {//空数据集
assert(sqlContext.emptyDataFrame.columns.toSeq === Seq.empty[String])
assert(sqlContext.emptyDataFrame.count() === 0)
}
test("head and take") {//头和take取值相等
assert(testData.take(2) === testData.collect().take(2))
assert(testData.head(2) === testData.collect().take(2))
assert(testData.head(2).head.schema === testData.schema)
}
test("simple explode") {//简单的把字符串分割为数组
//元组形式
val df = Seq(Tuple1("a b c"), Tuple1("d e")).toDF("words")
/**
*+-----+
|words|
+-----+
|a b c|
| d e|
+-----+
*/
df.show()
checkAnswer(
//简单的把字符串分割为数组
df.explode("words", "word") { word: String => word.split(" ").toSeq }.select('word),
Row("a") :: Row("b") :: Row("c") :: Row("d") ::Row("e") :: Nil
)
}
test("explode") {//把字符串分割为数组
val df = Seq((1, "a b c"), (2, "a b"), (3, "a")).toDF("number", "letters")
val df2 =
df.explode('letters) {
//使用case Row形式分隔,注意Tuple1元组形式
case Row(letters: String) => letters.split(" ").map(Tuple1(_)).toSeq
}
/**
* +------+-------+---+
|number|letters| _1|
+------+-------+---+
| 1| a b c| a|
| 1| a b c| b|
| 1| a b c| c|
| 2| a b| a|
| 2| a b| b|
| 3| a| a|
+------+-------+---+
*/
df2.select('_1 as 'letter, 'number).groupBy('letter).agg(countDistinct('number)).show()
/** +------+----------------------+
|letter|COUNT(DISTINCT number)|
+------+----------------------+
| a| 3|
| b| 2|
| c| 1|
+------+----------------------+**/
checkAnswer(
df2
.select('_1 as 'letter, 'number)
.groupBy('letter)
//countDistinct去重统计
.agg(countDistinct('number)),//df.agg() 求聚合用的相关函数
Row("a", 3) :: Row("b", 2) :: Row("c", 1) :: Nil
)
}
//explode函数把字符串分割为数组
test("SPARK-8930: explode should fail with a meaningful message if it takes a star") {
val df = Seq(("1", "1,2"), ("2", "4"), ("3", "7,8,9")).toDF("prefix", "csv")
/**
df.show()
+------+-----+
|prefix| csv|
+------+-----+
| 1| 1,2|
| 2| 4|
| 3|7,8,9|
+------+-----+*/
val e = intercept[AnalysisException] {
df.explode($"*") { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
}.queryExecution.assertAnalyzed()
}
assert(e.getMessage.contains(
"Cannot explode *, explode can only be applied on a specific column."))
df.explode('prefix, 'csv) { case Row(prefix: String, csv: String) =>
csv.split(",").map(v => Tuple1(prefix + ":" + v)).toSeq
}.queryExecution.assertAnalyzed()
}
test("explode alias and star") {//把字符串分割为数组的别名和星号
val df = Seq((Array("a"), 1)).toDF("a", "b")
checkAnswer(
df.select(explode($"a").as("a"), $"*"),
Row("a", Seq("a"), 1) :: Nil)
}
test("selectExpr") {//选择表达式
/**
*testData.show()
+---+-----+
| 1| 1|
| 2| 2|
| 3| 3|
| 4| 4|
| 5| 5|
| 16| 16|
| 17| 17|
| 18| 18|
| 19| 19|
| 20| 20|
+---+-----+ */
//testData.selectExpr("abs(key)", "value").show()
checkAnswer(
testData.selectExpr("abs(key)", "value"),
testData.collect().map(row => Row(math.abs(row.getInt(0)), row.getString(1))).toSeq)
}
test("selectExpr with alias") {//选择表达式的别名
/**
*+---+
| k|
+---+
| 1|
| 2|
| 3|
+---+*/
testData.selectExpr("key as k").select("k").show()
checkAnswer(
//表达式使用别名
testData.selectExpr("key as k").select("k"),
testData.select("key").collect().toSeq)
}
test("filterExpr") {//过滤操作
checkAnswer(
testData.filter("key > 90"),
//使用集合过虑
testData.collect().filter(_.getInt(0) > 90).toSeq)
}
test("filterExpr using where") {//使用where过滤操作
checkAnswer(
testData.where("key > 50"),
testData.collect().filter(_.getInt(0) > 50).toSeq)
}
test("repartition") {//重新分配分区
checkAnswer(
testData.select('key).repartition(10).select('key),
testData.select('key).collect().toSeq)
}
test("coalesce") {//合并分区,引用字段使用
assert(testData.select('key).coalesce(1).rdd.partitions.size === 1)
checkAnswer(
testData.select('key).coalesce(1).select('key),
testData.select('key).collect().toSeq)
}
test("convert $\\"attribute name\\" into unresolved attribute") {//转换 为解决属性
checkAnswer(
testData.where($"key" === lit(1)).select($"value"),
Row("1"))
}
//将Scala Symbol'attrname转换为未解析的属性
test("convert Scala Symbol 'attrname into unresolved attribute") {
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select *") {
checkAnswer(
testData.select($"*"),
testData.collect().toSeq)
}
test("simple select") {//简单的选择
checkAnswer(
testData.where('key === lit(1)).select('value),
Row("1"))
}
test("select with functions") {//选择功能
checkAnswer(
testData.select(sum('value), avg('value), count(lit(1))),
Row(5050.0, 50.5, 100))
checkAnswer(
testData2.select('a + 'b, 'a < 'b),
Seq(
Row(2, false),
Row(3, true),
Row(3, false),
Row(4, false),
Row(4, false),
Row(5, false)))
checkAnswer(
testData2.select(sumDistinct('a)),
Row(6))
}
test("global sorting") {//全局排序
checkAnswer(
//使用字段排序
testData2.orderBy('a.asc, 'b.asc),
Seq(Row(1, 1), Row(1, 2), Row(2, 1), Row(2, 2), Row(3, 1), Row(3, 2)))
checkAnswer(//使用函数排序
testData2.orderBy(asc("a"), desc("b")),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.asc, 'b.desc),
Seq(Row(1, 2), Row(1, 1), Row(2, 2), Row(2, 1), Row(3, 2), Row(3, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.desc),
Seq(Row(3, 2), Row(3, 1), Row(2, 2), Row(2, 1), Row(1, 2), Row(1, 1)))
checkAnswer(
testData2.orderBy('a.desc, 'b.asc),
Seq(Row(3, 1), Row(3, 2), Row(2, 1), Row(2, 2), Row(1, 1), Row(1, 2)))
/**
*+---------+--------------------+
| data| nestedData|
+---------+--------------------+
|[1, 2, 3]|[WrappedArray(1, ...|
|[2, 3, 4]|[WrappedArray(2, ...|
+---------+--------------------+
*/
arrayData.toDF().show()
arrayData.toDF().orderBy('data.getItem(0).desc).show()
checkAnswer(
//使用data数组字段第一个值排序(即1)
arrayData.toDF().orderBy('data.getItem(0).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).toSeq)
checkAnswer(
//使用data数组字段第一个值排序(即1),降序
arrayData.toDF().orderBy('data.getItem(0).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(0)).reverse.toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).asc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).toSeq)
checkAnswer(
arrayData.toDF().orderBy('data.getItem(1).desc),
arrayData.toDF().collect().sortBy(_.getAs[Seq[Int]](0)(1)).reverse.toSeq)
}
test("limit") {//限制
//取10条数据
testData.limit(10).show()
checkAnswer(
testData.limit(10),
testData.take(10).toSeq)
checkAnswer(
arrayData.toDF().limit(1),
arrayData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
checkAnswer(
mapData.toDF().limit(1),
mapData.take(1).map(r => Row.fromSeq(r.productIterator.toSeq)))
}
test("except") {//返回两个结果集的差(即从左查询中返回右查询没有找到的所有非重复值)
lowerCaseData.except(upperCaseData).show()
checkAnswer(
lowerCaseData.except(upperCaseData),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(lowerCaseData.except(lowerCaseData), Nil)
checkAnswer(upperCaseData.except(upperCaseData), Nil)
}
test("intersect") {//返回 两个结果集的交集(即两个查询都返回的所有非重复值)
checkAnswer(
lowerCaseData.intersect(lowerCaseData),
Row(1, "a") ::
Row(2, "b") ::
Row(3, "c") ::
Row(4, "d") :: Nil)
checkAnswer(lowerCaseData.intersect(upperCaseData), Nil)
}
test("udf") {//自定义方法
val foo = udf((a: Int, b: String) => a.toString + b)
checkAnswer(
//SELECT *, foo(key, value) FROM testData
testData.select($"*", foo('key, 'value)).limit(3),
Row(1, "1", "11") :: Row(2, "2", "22") :: Row(3, "3", "33") :: Nil
)
}
test("deprecated callUdf in SQLContext") {//不赞成使用callUdf
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
val sqlctx = df.sqlContext
sqlctx.udf.register("simpleUdf", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUdf("simpleUdf", $"value")),
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("callUDF in SQLContext") {//
val df = Seq(("id1", 1), ("id2", 4), ("id3", 5)).toDF("id", "value")
val sqlctx = df.sqlContext
sqlctx.udf.register("simpleUDF", (v: Int) => v * v)
checkAnswer(
df.select($"id", callUDF("simpleUDF", $"value")),
Row("id1", 1) :: Row("id2", 16) :: Row("id3", 25) :: Nil)
}
test("withColumn") {//使用列
//使用新列
val df = testData.toDF().withColumn("newCol", col("key") + 1)
//df.show()
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "value", "newCol"))
}
test("replace column using withColumn") {//替换使用的列
val df2 = sqlContext.sparkContext.parallelize(Array(1, 2, 3)).toDF("x")
val df3 = df2.withColumn("x", df2("x") + 1)
checkAnswer(
df3.select("x"),
Row(2) :: Row(3) :: Row(4) :: Nil)
}
test("drop column using drop") {//删除使用列
val df = testData.drop("key")
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
test("drop unknown column (no-op)") {//删除未知的列
/**
*+---+-----+
|key|value|
+---+-----+
| 1| 1|
| 2| 2|
+---+-----+
*/
testData.show()
//删除未知的列
val df = testData.drop("random")
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
//删除列使用列的引用
test("drop column using drop with column reference") {
val col = testData("key")
//删除引用的列
val df = testData.drop(col)
df.show()
checkAnswer(
df,
testData.collect().map(x => Row(x.getString(1))).toSeq)
assert(df.schema.map(_.name) === Seq("value"))
}
//删除未知列使用的列
test("drop unknown column (no-op) with column reference") {
val col = Column("random")
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
assert(df.schema.map(_.name) === Seq("key", "value"))
}
//使用列引用将具有相同名称(no-op)的未知列删除
test("drop unknown column with same name (no-op) with column reference") {
val col = Column("key")//删除未知的列,但列没有引用
val df = testData.drop(col)
checkAnswer(
df,
testData.collect().toSeq)
//查找列名的方法
assert(df.schema.map(_.name) === Seq("key", "value"))
}
//使用列引用加入重复列后删除列
test("drop column after join with duplicate columns using column reference") {
/**
*+--------+------+
|personId|salary|
+--------+------+
| 0|2000.0|
| 1|1000.0|
+--------+------+
*/
salary.show()//重命名列
val newSalary = salary.withColumnRenamed("personId", "id")
/**
*+---+------+
| id|salary|
+---+------+
| 0|2000.0|
| 1|1000.0|
+---+------+
*/
newSalary.show()
val col = newSalary("id")
// this join will result in duplicate "id" columns
//此连接将导致重复的“ID”列
val joinedDf = person.join(newSalary,
person("id") === newSalary("id"), "inner")
// remove only the "id" column that was associated with newSalary
//只删除“ID”列,与薪酬
val df = joinedDf.drop(col)
checkAnswer(
df,
joinedDf.collect().map {
case Row(id: Int, name: String, age: Int, idToDrop: Int, salary: Double) =>
Row(id, name, age, salary)
}.toSeq)
//查找列名的方法
assert(df.schema.map(_.name) === Seq("id", "name", "age", "salary"))
assert(df("id") == person("id"))
}
test("withColumnRenamed") {//列的重命名
val df = testData.toDF().withColumn("newCol", col("key") + 1)
.withColumnRenamed("value", "valueRenamed")
checkAnswer(
df,
testData.collect().map { case Row(key: Int, value: String) =>
Row(key, value, key + 1)
}.toSeq)
assert(df.schema.map(_.name) === Seq("key", "valueRenamed", "newCol"))
}
test("describe") {//描述
val describeTestData = Seq(
("Bob", 16, 176),
("Alice", 32, 164),
("David", 60, 192),
("Amy", 24, 180)).toDF("name", "age", "height")
val describeResult = Seq(
Row("count", "4", "4"),
Row("mean", "33.0", "178.0"),
Row("stddev", "16.583123951777", "10.0"),
Row("min", "16", "164"),
Row("max", "60", "192"))
val emptyDescribeResult = Seq(
Row("count", "0", "0"),
Row("mean", null, null),
Row("stddev", null, null),
Row("min", null, null),
Row("max", null, null))
//定义一个方法,获得列名字的序列,参数DataFrame
def getSchemaAsSeq(df: DataFrame): Seq[String] = df.schema.map(_.name)
//获得两列的描述DataFrame
val describeTwoCols = describeTestData.describe("age", "height")
//getSchemaAsSeq(describeTwoCols).foreach {println }
assert(getSchemaAsSeq(describeTwoCols) === Seq("summary", "age", "height"))
checkAnswer(describeTwoCols, describeResult)
// All aggregate value should have been cast to string
describeTwoCols.collect().foreach { row =>
assert(row.get(1).isInstanceOf[String], "expected string but found " + row.get(1).getClass)
assert(row.get(2).isInstanceOf[String], "expected string but found " + row.get(2).getClass)
}
val describeAllCols = describeTestData.describe()
//为什么多一列summary
assert(getSchemaAsSeq(describeAllCols) === Seq("summary", "age", "height"))
checkAnswer(describeAllCols, describeResult)
val describeOneCol = describeTestData.describe("age")
assert(getSchemaAsSeq(describeOneCol) === Seq("summary", "age"))
checkAnswer(describeOneCol, describeResult.map { case Row(s, d, _) => Row(s, d)} )
val describeNoCol = describeTestData.select("name").describe()
assert(getSchemaAsSeq(describeNoCol) === Seq("summary"))
checkAnswer(describeNoCol, describeResult.map { case Row(s, _, _) => Row(s)} )
val emptyDescription = describeTestData.limit(0).describe()
assert(getSchemaAsSeq(emptyDescription) === Seq("summary", "age", "height"))
checkAnswer(emptyDescription, emptyDescribeResult)
}
test("apply on query results (SPARK-5462)") {//应用查询结果
val df = testData.sqlContext.sql("select key from testData")
checkAnswer(df.select(df("key")), testData.select('key).collect().toSeq)
}
test("inputFiles") {//输入文件
withTempDir { dir =>
val df = Seq((1, 22)).toDF("a", "b")
//parquet目录
val parquetDir = new File(dir, "parquet").getCanonicalPath
//println(parquetDir)
//保存parquet格式文件
//df.write.json(parquetDir)
df.write.parquet(parquetDir)
//读取数据自动转换DataFrame
val parquetDF = sqlContext.read.parquet(parquetDir)
//判断输入文件不为空nonEmpty,nonEmpty测试可遍历迭代器是不是为空
//读取数据成功
//println(parquetDF.inputFiles.nonEmpty)
assert(parquetDF.inputFiles.nonEmpty)
val jsonDir = new File(dir, "json").getCanonicalPath
println(jsonDir)
//写Json文件
df.write.json(jsonDir)
//读取保存的json文件
val jsonDF = sqlContext.read.json(jsonDir)
//判断读取json文件是否成功
assert(parquetDF.inputFiles.nonEmpty)
val unioned = jsonDF.unionAll(parquetDF).inputFiles.sorted
val allFiles = (jsonDF.inputFiles ++ parquetDF.inputFiles).toSet.toArray.sorted
assert(unioned === allFiles)
}
}
ignore("show") {
// This test case is intended ignored, but to make sure it compiles correctly
//这个测试用例的目的是忽略,但要确保它正确编译
//默认显示20行数据
testData.select($"*").show()
//show显示数据行数
testData.select($"*").show(1000)
}
test("showString: truncate = [true, false]") {//截断
val longString = Array.fill(21)("1").mkString
val df = sqlContext.sparkContext.parallelize(Seq("1", longString)).toDF()
df.show()
val expectedAnswerForFalse = """+---------------------+
||_1 |
|+---------------------+
||1 |
||111111111111111111111|
|+---------------------+
|""".stripMargin
// assert(df.showString(10, false) === expectedAnswerForFalse)
val expectedAnswerForTrue = """+--------------------+
|| _1|
|+--------------------+
|| 1|
||11111111111111111...|
|+--------------------+
|""".stripMargin
// assert(df.showString(10, true) === expectedAnswerForTrue)
}
test("showString(negative)") {//负的
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
// assert(testData.select($"*").showString(-1) === expectedAnswer)
}
test("showString(0)") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|only showing top 0 rows
|""".stripMargin
// assert(testData.select($"*").showString(0) === expectedAnswer)
}
test("showString: array") {//数组
val df = Seq(
(Array(1, 2, 3), Array(1, 2, 3)),
(Array(2, 3, 4), Array(2, 3, 4))
).toDF()
val expectedAnswer = """+---------+---------+
|| _1| _2|
|+---------+---------+
||[1, 2, 3]|[1, 2, 3]|
||[2, 3, 4]|[2, 3, 4]|
|+---------+---------+
|""".stripMargin
// assert(df.showString(10) === expectedAnswer)
}
test("showString: minimum column width") {//最小的列宽度
val df = Seq(
(1, 1),
(2, 2)
).toDF()
val expectedAnswer = """+---+---+
|| _1| _2|
|+---+---+
|| 1| 1|
|| 2| 2|
|+---+---+
|""".stripMargin
//assert(df.showString(10) === expectedAnswer)
}
test("SPARK-7319 showString") {
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|| 1| 1|
|+---+-----+
|only showing top 1 row
|""".stripMargin
// assert(testData.select($"*").showString(1) === expectedAnswer)
}
test("SPARK-7327 show with empty dataFrame") {//显示空的dataFrame
val expectedAnswer = """+---+-----+
||key|value|
|+---+-----+
|+---+-----+
|""".stripMargin
// assert(testData.select($"*").filter($"key" < 0).showString(1) === expectedAnswer)
}
//StructType代表一张表,StructField代表一个字段
/* test("createDataFrame(RDD[Row], StructType) should convert UDTs (SPARK-6672)") {
val rowRDD = sqlContext.sparkContext.parallelize(Seq(Row(new ExamplePoint(1.0, 2.0))))
//StructType代表一张表,StructField代表一个字段
val schema = StructType(Array(StructField("point", new ExamplePointUDT(), false)))
//schema方式创建RDD
val df = sqlContext.createDataFrame(rowRDD, schema)
df.rdd.collect()
}*/
test("SPARK-6899: type should match when using codegen") {//配合使用时代码生成
withSQLConf(SQLConf.CODEGEN_ENABLED.key -> "true") {
checkAnswer(
decimalData.agg(avg('a)),//df.agg() 求聚合用的相关函数
Row(new java.math.BigDecimal(2.0)))
}
}
test("SPARK-7133: Implement struct, array, and map field accessor") {//实现结构,数组,和Map字段的访问
assert(complexData.filter(complexData("a")(0) === 2).count() == 1)
assert(complexData.filter(complexData("m")("1") === 1).count() == 1)
assert(complexData.filter(complexData("s")("key") === 1).count() == 1)
assert(complexData.filter(complexData("m")(complexData("s")("value")) === 1).count() == 1)
assert(complexData.filter(complexData("a")(complexData("s")("key")) === 1).count() == 1)
}
//支持引号属性
test("SPARK-7551: support backticks for DataFrame attribute resolution") {
val df = sqlContext.read.json(sqlContext.sparkContext.makeRDD(
"""{"a.b": {"c": {"d..e": {"f": 1}}}}""" :: Nil))
checkAnswer(
df.select(df("`a.b`.c.`d..e`.`f`")),
Row(1)
)
val df2 = sqlContext.read.json(sqlContext.sparkContext.makeRDD(
"""{"a b": {"c": {"d e": {"f": 1}}}}""" :: Nil))
checkAnswer(
df2.select(df2("`a b`.c.d e.f")),
Row(1)
)
def checkError(testFun: => Unit): Unit = {
val e = intercept[org.apache.spark.sql.AnalysisException] {
testFun
}
assert(e.getMessage.contains("syntax error in attribute name:"))
}
checkError(df("`abc.`c`"))
checkError(df("`abc`..d"))
checkError(df("`a`.b."))
checkError(df("`a.b`.c.`d"))
}
test("SPARK-7324 dropDuplicates") {//
val testData = sqlContext.sparkContext.parallelize(
(2, 1, 2) :: (1, 1, 1) ::
(1, 2, 1) :: (2, 1, 2) ::
(2, 2, 2) :: (2, 2, 1) ::
(2, 1, 1) :: (1, 1, 2) ::
(1, 2, 2) :: (1, 2, 1) :: Nil).toDF("key", "value1", "value2")
/**
*+---+------+------+
|key|value1|value2|
+---+------+------+
| 2| 1| 2|
| 1| 1| 1|
| 1| 2| 1|
| 2| 1| 2|
| 2| 2| 2|
| 2| 2| 1|
| 2| 1| 1|
| 1| 1| 2|
| 1| 2| 2|
| 1| 2| 1|
+---+------+------+ */
testData.show()
/**
* +---+------+------+
|key|value1|value2|
+---+------+------+
| 1| 2| 1|
| 2| 2| 2|
| 1| 2| 2|
| 2| 1| 1|
| 2| 1| 2|
| 1| 1| 1|
| 2| 2| 1|
| 1| 1| 2|
+---+------+------+
*/
//删除重复的记录
testData.dropDuplicates().show()
checkAnswer(
testData.dropDuplicates(),
Seq(Row(2, 1, 2), Row(1, 1, 1), Row(1, 2, 1),
Row(2, 2, 2), Row(2, 1, 1), Row(2, 2, 1),
Row(1, 1, 2), Row(1, 2, 2)))
checkAnswer(
//删除重复的指定列
testData.dropDuplicates(Seq("key", "value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("value1", "value2")),
Seq(Row(2, 1, 2), Row(1, 2, 1), Row(1, 1, 1), Row(2, 2, 2)))
checkAnswer(
testData.dropDuplicates(Seq("key")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value1")),
Seq(Row(2, 1, 2), Row(1, 2, 1)))
checkAnswer(
testData.dropDuplicates(Seq("value2")),
Seq(Row(2, 1, 2), Row(1, 1, 1)))
}
test("SPARK-7150 range api") {//范围
// numSlice is greater than length numslice大于长度
val res1 = sqlContext.range(0, 10, 1, 15).select("id")
assert(res1.count == 10)//df.agg() 求聚合用的相关函数
assert(res1.agg(sum("id")).as("sumid").collect() === Seq(Row(45)))
val res2 = sqlContext.range(3, 15, 3, 2).select("id")
assert(res2.count == 4)
assert(res2.agg(sum("id")).as("sumid").collect() === Seq(Row(30)))
val res3 = sqlContext.range(1, -2).select("id")
assert(res3.count == 0)
// start is positive, end is negative, step is negative
//开始是正的,结束是负数,一步是负数
val res4 = sqlContext.range(1, -2, -2, 6).select("id")
assert(res4.count == 2)//df.agg() 求聚合用的相关函数
assert(res4.agg(sum("id")).as("sumid").collect() === Seq(Row(0)))
// start, end, step are negative
//开始,结束,一步都是否定的
val res5 = sqlContext.range(-3, -8, -2, 1).select("id")
assert(res5.count == 3)
assert(res5.agg(sum("id")).as("sumid").collect() === Seq(Row(-15)))
// start, end are negative, step is positive
//开始,结束是负的,步骤是正的
val res6 = sqlContext.range(-8, -4, 2, 1).select("id")
assert(res6.count == 2)
assert(res6.agg(sum("id")).as("sumid").collect() === Seq(Row(-14)))
val res7 = sqlContext.range(-10, -9, -20, 1).select("id")
assert(res7.count == 0)
val res8 = sqlContext.range(Long.MinValue, Long.MaxValue, Long.MaxValue, 100).select("id")
assert(res8.count == 3)//df.agg() 求聚合用的相关函数
assert(res8.agg(sum("id")).as("sumid").collect() === Seq(Row(-3)))
val res9 = sqlContext.range(Long.MaxValue, Long.MinValue, Long.MinValue, 100).select("id")
assert(res9.count == 2)//df.agg() 求聚合用的相关函数
assert(res9.agg(sum("id")).as("sumid").collect() === Seq(Row(Long.MaxValue - 1)))
// only end provided as argument
// 只提供作为参数的结束
val res10 = sqlContext.range(10).select("id")
assert(res10.count == 10)//df.agg() 求聚合用的相关函数
assert(res10.agg(sum("id")).as("sumid").collect() === Seq(Row(45)))
val res11 = sqlContext.range(-1).select("id")
assert(res11.count == 0)
}
test("SPARK-8621: support empty string column name") {//支持空字符串列名称
val df = Seq(Tuple1(1)).toDF("").as("t")
// We should allow empty string as column name
//我们应该允许空字符串作为列名称
df.col("")
df.col("t.``")
}
//NaN 是Not-a-Number的缩写,某些float或double类型不符合标准浮点数语义
//NaN == NaN,即:NaN和NaN总是相等
//在聚合函数中,所有NaN分到同一组
//NaN在join操作中可以当做一个普通的join key
//NaN在升序排序中排到最后,比任何其他数值都大
test("SPARK-8797: sort by float column containing NaN should not crash") {
//排序float列包含NaN,不应该崩溃
val inputData = Seq.fill(10)(Tuple1(Float.NaN)) ++ (1 to 20).map(x => Tuple1(x.toFloat))
inputData.foreach(println)
val df = Random.shuffle(inputData).toDF("a")
/**
+----+
| a|
+----+
| 2.0|
|14.0|
| 7.0|
| 1.0|
| NaN|
| NaN|
|10.0|
|11.0|
| NaN|
|15.0|
+----+*/
df.show(10)
//排序包括NaN,没有值,即不能排序操作
//df.orderBy("a").collect().foreach { x => println _}
}
//排序double列包含NaN,不应该崩溃
test("SPARK-8797: sort by double column containing NaN should not crash") {
val inputData = Seq.fill(10)(Tuple1(Double.NaN)) ++ (1 to 1000).map(x => Tuple1(x.toDouble))
val df = Random.shuffle(inputData).toDF("a")
df.orderBy("a").collect()
}
//NaN 是Not-a-Number的缩写,某些float或double类型不符合标准浮点数语义
//NaN == NaN,即:NaN和NaN总是相等
//在聚合函数中,所有NaN分到同一组
//NaN在join操作中可以当做一个普通的join key
//NaN在升序排序中排到最后,比任何其他数值都大
test("NaN is greater than all other non-NaN numeric values") {
val maxDouble = Seq(Double.NaN, Double.PositiveInfinity, Double.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Double.isNaN(maxDouble.getDouble(0)))
val maxFloat = Seq(Float.NaN, Float.PositiveInfinity, Float.MaxValue)
.map(Tuple1.apply).toDF("a").selectExpr("max(a)").first()
assert(java.lang.Float.isNaN(maxFloat.getFloat(0)))
}
//重复列的更好的异常
test("SPARK-8072: Better Exception for Duplicate Columns") {
// only one duplicate column present 只有一个重复的列
val e = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3), (2, 3, 4), (3, 4, 5)).toDF("column1", "column2", "column1")
.write.format("parquet").save("temp")
}
assert(e.getMessage.contains("Duplicate column(s)"))
assert(e.getMessage.contains("parquet"))
assert(e.getMessage.contains("column1"))
assert(!e.getMessage.contains("column2"))
// multiple duplicate columns present 多个重复列
val f = intercept[org.apache.spark.sql.AnalysisException] {
Seq((1, 2, 3, 4, 5), (2, 3, 4, 5, 6), (3, 4, 5, 6, 7))
.toDF("column1", "column2", "column3", "column1", "column3")
.write.format("json").save("temp")
}
assert(f.getMessage.contains("Duplicate column(s)"))
assert(f.getMessage.contains("JSON"))
assert(f.getMessage.contains("column1"))
assert(f.getMessage.contains("column3"))
assert(!f.getMessage.contains("column2"))
}
//插入的表法更好的错误消息
test("SPARK-6941: Better error message for inserting into RDD-based Table") {
withTempDir { dir =>
val tempParquetFile = new File(dir, "tmp_parquet")
val tempJsonFile = new File(dir, "tmp_json")
val df = Seq(Tuple1(1)).toDF()
val insertion = Seq(Tuple1(2)).toDF("col")
// pass case: parquet table (HadoopFsRelation)
//当数据输出的位置已存在时,重写
df.write.mode(SaveMode.Overwrite).parquet(tempParquetFile.getCanonicalPath)
val pdf = sqlContext.read.parquet(tempParquetFile.getCanonicalPath)
pdf.registerTempTable("parquet_base")
insertion.write.insertInto("parquet_base")
// pass case: json table (InsertableRelation)
//当数据输出的位置已存在时,重写
df.write.mode(SaveMode.Overwrite).json(tempJsonFile.getCanonicalPath)
val jdf = sqlContext.read.json(tempJsonFile.getCanonicalPath)
jdf.registerTempTable("json_base")
//当数据输出的位置已存在时,重写
insertion.write.mode(SaveMode.Overwrite).insertInto("json_base")
// error cases: insert into an RDD
//错误例:插入法
df.registerTempTable("rdd_base")
val e1 = intercept[AnalysisException] {
insertion.write.insertInto("rdd_base")
}
assert(e1.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into a logical plan that is not a LeafNode
//错误案例:插入一个合乎逻辑的计划,不是叶结点
val indirectDS = pdf.select("_1").filter($"_1" > 5)
indirectDS.registerTempTable("indirect_ds")
val e2 = intercept[AnalysisException] {
insertion.write.insertInto("indirect_ds")
}
assert(e2.getMessage.contains("Inserting into an RDD-based table is not allowed."))
// error case: insert into an OneRowRelation
new DataFrame(sqlContext, OneRowRelation).registerTempTable("one_row")
val e3 = intercept[AnalysisException] {
insertion.write.insertInto("one_row")
}
assert(e3.getMessage.contains("Inserting into an RDD-based table is not allowed."))
}
}
//返回相同的值
test("SPARK-8608: call `show` on local DataFrame with random columns should return same value") {
// Make sure we can pass this test for both codegen mode and interpreted mode.
withSQLConf(SQLConf.CODEGEN_ENABLED.key -> "true") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
}
withSQLConf(SQLConf.CODEGEN_ENABLED.key -> "false") {
val df = testData.select(rand(33))
assert(df.showString(5) == df.showString(5))
}
// We will reuse the same Expression object for LocalRelation.
val df = (1 to 10).map(Tuple1.apply).toDF().select(rand(33))
assert(df.showString(5) == df.showString(5))
}
//本地数据框任意列排序后应该返回相同的值
test("SPARK-8609: local DataFrame with random columns should return same value after sort") {
// Make sure we can pass this test for both codegen mode and interpreted mode.
//确保我们可以通过代码生成模式和解释模式,这两个测试
withSQLConf(SQLConf.CODEGEN_ENABLED.key -> "true") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
}
withSQLConf(SQLConf.CODEGEN_ENABLED.key -> "false") {
checkAnswer(testData.sort(rand(33)), testData.sort(rand(33)))
}
// We will reuse the same Expression object for LocalRelation.
//我们将使用相同的表达对象的地方
val df = (1 to 10).map(Tuple1.apply).toDF()
checkAnswer(df.sort(rand(33)), df.sort(rand(33)))
}
//具有非确定性表达式的排序
test("SPARK-9083: sort with non-deterministic expressions") {
import org.apache.spark.util.random.XORShiftRandom
val seed = 33
val df = (1 to 100).map(Tuple1.apply).toDF("i")
val random = new XORShiftRandom(seed)
val expected = (1 to 100).map(_ -> random.nextDouble()).sortBy(_._2).map(_._1)
val actual = df.sort(rand(seed)).collect().map(_.getInt(0))
assert(expected === actual)
}
//DataFrame.orderBy应该支持嵌套列名称
test("SPARK-9323: DataFrame.orderBy should support nested column name") {
val df = sqlContext.read.json(sqlContext.sparkContext.makeRDD(
"""{"a": {"b": 1}}""" :: Nil))
checkAnswer(df.orderBy("a.b"), Row(Row(1)))
}
//正确分析分组/聚集在结构领域
test("SPARK-9950: correctly analyze grouping/aggregating on struct fields") {
val df = Seq(("x", (1, 1)), ("y", (2, 2))).toDF("a", "b")//df.agg() 求聚合用的相关函数
checkAnswer(df.groupBy("b._1").agg(sum("b._2")), Row(1, 1) :: Row(2, 2) :: Nil)
}
//避免转换的执行器
test("SPARK-10093: Avoid transformations on executors") {
val df = Seq((1, 1)).toDF("a", "b")
df.where($"a" === 1)
.select($"a", $"b", struct($"b"))
.orderBy("a")
.select(struct($"b"))
.collect()
}
//项目不应被推倒通过交叉或除
test("SPARK-10539: Project should not be pushed down through Intersect or Except") {
val df1 = (1 to 100).map(Tuple1.apply).toDF("i")
val df2 = (1 to 30).map(Tuple1.apply).toDF("i")
val intersect = df1.intersect(df2)
val except = df1.except(df2)
assert(intersect.count() === 30)
assert(except.count() === 70)
}
//正确处理非确定性表达式集合运算
test("SPARK-10740: handle nondeterministic expressions correctly for set operations") {
val df1 = (1 to 20).map(Tuple1.apply).toDF("i")
val df2 = (1 to 10).map(Tuple1.apply).toDF("i")
// When generating expected results at here, we need to follow the implementation of
//当在这里产生预期的结果,我们需要遵循兰德的表达
// Rand expression.
def expected(df: DataFrame): Seq[Row] = {
df.rdd.collectPartitions().zipWithIndex.flatMap {
case (data, index) =>
val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index)
data.filter(_.getInt(0) < rng.nextDouble() * 10)
}
}
val union = df1.unionAll(df2)
checkAnswer(
union.filter('i < rand(7) * 10),
expected(union)
)
checkAnswer(
union.select(rand(7)),
union.rdd.collectPartitions().zipWithIndex.flatMap {
case (data, index) =>
val rng = new org.apache.spark.util.random.XORShiftRandom(7 + index)
data.map(_ => rng.nextDouble()).map(i => Row(i))
}
)
val intersect = df1.intersect(df2)
checkAnswer(
intersect.filter('i < rand(7) * 10),
expected(intersect)
)
val except = df1.except(df2)
checkAnswer(
except.filter('i < rand(7) * 10),
expected(except)
)
}
//在分区的列固定大小敏感的筛选器
test("SPARK-11301: fix case sensitivity for filter on partitioned columns") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
withTempPath { path =>
Seq(2012 -> "a").toDF("year", "val").write.partitionBy("year").parquet(path.getAbsolutePath)
val df = sqlContext.read.parquet(path.getAbsolutePath)
checkAnswer(df.filter($"yEAr" > 2000).select($"val"), Row("a"))
}
}
}
}
| tophua/spark1.52 | sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala | Scala | apache-2.0 | 42,555 |
package sigmastate.utxo
import sigmastate.serialization.ValueSerializer.getSerializer
import sigmastate.serialization.OpCodes.OpCode
import scalan.util.Extensions.ByteOps
import sigmastate.SMethod
import sigmastate.serialization.OpCodes
import scala.collection.mutable
object ComplexityTableStat {
// NOTE: this class is mutable so better to keep it private
private class StatItem(
/** How many times the operation has been executed */
var count: Long,
/** Sum of all execution times */
var sum: Long
)
/** Timings of op codes */
private val opStat = mutable.HashMap[OpCode, StatItem]()
/** Timings of method calls */
private val mcStat = mutable.HashMap[(Byte, Byte), StatItem]()
def addOpTime(op: OpCode, time: Long) = {
opStat.get(op) match {
case Some(item) =>
item.count += 1
item.sum += time
case None =>
opStat(op) = new StatItem(1, time)
}
}
def addMcTime(typeId: Byte, methodId: Byte, time: Long) = {
mcStat.get((typeId, methodId)) match {
case Some(item) =>
item.count += 1
item.sum += time
case None =>
mcStat((typeId, methodId)) = new StatItem(1, time)
}
}
/** Prints the complexity table
* */
def complexityTableString: String = {
val opCodeLines = opStat.map { case (opCode, item) =>
val avgTime = item.sum / item.count
val time = avgTime / 1000
val ser = getSerializer(opCode)
val opName = ser.opDesc.typeName
(opName, (opCode.toUByte - OpCodes.LastConstantCode).toString, time, item.count.toString)
}.toList.sortBy(_._3)(Ordering[Long].reverse)
val mcLines = mcStat.map { case (id @ (typeId, methodId), item) =>
val avgTime = item.sum / item.count
val time = avgTime / 1000
val m = SMethod.fromIds(typeId, methodId)
val typeName = m.objType.typeName
(s"$typeName.${m.name}", typeId, methodId, time, item.count.toString)
}.toList.sortBy(r => (r._2,r._3))(Ordering[(Byte,Byte)].reverse)
// val lines = (("Op", "OpCode", "Avg Time,us", "Count") :: opCodeLines ::: mcLines)
// .map { case (opName, opCode, time, count) =>
// s"${opName.padTo(30, ' ')}\\t${opCode.padTo(7, ' ')}\\t${time.padTo(9, ' ')}\\t${count}"
// }
// .mkString("\\n")
val rows = (opCodeLines)
.map { case (opName, opCode, time, count) =>
val key = s"$opName.opCode".padTo(30, ' ')
s"$key -> $time, // count = $count "
}
.mkString("\\n")
val mcRows = (mcLines)
.map { case (opName, typeId, methodId, time, count) =>
val key = s"($typeId.toByte, $methodId.toByte)".padTo(25, ' ')
s"$key -> $time, // count = $count, $opName "
}
.mkString("\\n")
// val total = opStat.values.foldLeft(0L) { (acc, item) => acc + item.sum }
s"""
|-----------
|$rows
|-----------
|$mcRows
|-----------
""".stripMargin
}
}
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/main/scala/sigmastate/utxo/ComplexityTableStat.scala | Scala | mit | 2,956 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.SparkFunSuite
import org.apache.spark.ml.linalg.VectorUDT
import org.apache.spark.ml.util.SchemaUtils
import org.apache.spark.mllib.linalg.{Matrices, Vectors}
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.sql.types.{StructField, StructType}
class TestingUtilsSuite extends SparkFunSuite {
test("Comparing doubles using relative error.") {
assert(23.1 ~== 23.52 relTol 0.02)
assert(23.1 ~== 22.74 relTol 0.02)
assert(23.1 ~= 23.52 relTol 0.02)
assert(23.1 ~= 22.74 relTol 0.02)
assert(!(23.1 !~= 23.52 relTol 0.02))
assert(!(23.1 !~= 22.74 relTol 0.02))
// Should throw exception with message when test fails.
intercept[TestFailedException](23.1 !~== 23.52 relTol 0.02)
intercept[TestFailedException](23.1 !~== 22.74 relTol 0.02)
intercept[TestFailedException](23.1 ~== 23.63 relTol 0.02)
intercept[TestFailedException](23.1 ~== 22.34 relTol 0.02)
assert(23.1 !~== 23.63 relTol 0.02)
assert(23.1 !~== 22.34 relTol 0.02)
assert(23.1 !~= 23.63 relTol 0.02)
assert(23.1 !~= 22.34 relTol 0.02)
assert(!(23.1 ~= 23.63 relTol 0.02))
assert(!(23.1 ~= 22.34 relTol 0.02))
// Comparing against zero should fail the test and throw exception with message
// saying that the relative error is meaningless in this situation.
intercept[TestFailedException](0.1 ~== 0.0 relTol 0.032)
intercept[TestFailedException](0.1 ~= 0.0 relTol 0.032)
intercept[TestFailedException](0.1 !~== 0.0 relTol 0.032)
intercept[TestFailedException](0.1 !~= 0.0 relTol 0.032)
intercept[TestFailedException](0.0 ~== 0.1 relTol 0.032)
intercept[TestFailedException](0.0 ~= 0.1 relTol 0.032)
intercept[TestFailedException](0.0 !~== 0.1 relTol 0.032)
intercept[TestFailedException](0.0 !~= 0.1 relTol 0.032)
// Comparisons of numbers very close to zero.
assert(10 * Double.MinPositiveValue ~== 9.5 * Double.MinPositiveValue relTol 0.01)
assert(10 * Double.MinPositiveValue !~== 11 * Double.MinPositiveValue relTol 0.01)
assert(-Double.MinPositiveValue ~== 1.18 * -Double.MinPositiveValue relTol 0.012)
assert(-Double.MinPositiveValue ~== 1.38 * -Double.MinPositiveValue relTol 0.012)
}
test("Comparing doubles using absolute error.") {
assert(17.8 ~== 17.99 absTol 0.2)
assert(17.8 ~== 17.61 absTol 0.2)
assert(17.8 ~= 17.99 absTol 0.2)
assert(17.8 ~= 17.61 absTol 0.2)
assert(!(17.8 !~= 17.99 absTol 0.2))
assert(!(17.8 !~= 17.61 absTol 0.2))
// Should throw exception with message when test fails.
intercept[TestFailedException](17.8 !~== 17.99 absTol 0.2)
intercept[TestFailedException](17.8 !~== 17.61 absTol 0.2)
intercept[TestFailedException](17.8 ~== 18.01 absTol 0.2)
intercept[TestFailedException](17.8 ~== 17.59 absTol 0.2)
assert(17.8 !~== 18.01 absTol 0.2)
assert(17.8 !~== 17.59 absTol 0.2)
assert(17.8 !~= 18.01 absTol 0.2)
assert(17.8 !~= 17.59 absTol 0.2)
assert(!(17.8 ~= 18.01 absTol 0.2))
assert(!(17.8 ~= 17.59 absTol 0.2))
// Comparisons of numbers very close to zero, and both side of zeros
assert(
Double.MinPositiveValue ~== 4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
Double.MinPositiveValue !~== 6 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
-Double.MinPositiveValue ~== 3 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
assert(
Double.MinPositiveValue !~== -4 * Double.MinPositiveValue absTol 5 * Double.MinPositiveValue)
}
test("Comparing vectors using relative error.") {
// Comparisons of two dense vectors
assert(Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
assert(!(Vectors.dense(Array(3.1, 3.5)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01))
assert(!(Vectors.dense(Array(3.1, 3.5)) ~= Vectors.dense(Array(3.135, 3.534)) relTol 0.01))
assert(Vectors.dense(Array(3.1)) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array.empty[Double]) !~= Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array.empty[Double]) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
// Should throw exception with message when test fails.
intercept[TestFailedException](
Vectors.dense(Array(3.1, 3.5)) !~== Vectors.dense(Array(3.130, 3.534)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array(3.1, 3.5)) ~== Vectors.dense(Array(3.135, 3.534)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array(3.1)) ~== Vectors.dense(Array(3.535, 3.534)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array.empty[Double]) ~== Vectors.dense(Array(3.135)) relTol 0.01)
// Comparing against zero should fail the test and throw exception with message
// saying that the relative error is meaningless in this situation.
intercept[TestFailedException](
Vectors.dense(Array(3.1, 0.01)) ~== Vectors.dense(Array(3.13, 0.0)) relTol 0.01)
intercept[TestFailedException](
Vectors.dense(Array(3.1, 0.01)) ~== Vectors.sparse(2, Array(0), Array(3.13)) relTol 0.01)
// Comparisons of a sparse vector and a dense vector
assert(Vectors.dense(Array(3.1, 3.5)) ~==
Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1, 3.5)) !~==
Vectors.sparse(2, Array(0, 1), Array(3.135, 3.534)) relTol 0.01)
assert(Vectors.dense(Array(3.1)) !~==
Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01)
assert(Vectors.dense(Array.empty[Double]) !~==
Vectors.sparse(2, Array(0, 1), Array(3.130, 3.534)) relTol 0.01)
}
test("Comparing vectors using absolute error.") {
// Comparisons of two dense vectors
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~==
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) ~=
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
assert(Vectors.dense(Array(3.1, 3.5, 0.0)) !~=
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) !~=
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6))
assert(!(Vectors.dense(Array(3.1, 3.5, 0.0)) ~=
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6))
assert(Vectors.dense(Array(3.1)) !~=
Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)
assert(!(Vectors.dense(Array(3.1)) ~=
Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5))
assert(Vectors.dense(Array.empty[Double]) !~=
Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5)
assert(!(Vectors.dense(Array.empty[Double]) ~=
Vectors.dense(Array(3.1 + 1E-6, 3.5 + 2E-7)) absTol 1E-5))
assert(Vectors.dense(Array.empty[Double]) ~=
Vectors.dense(Array.empty[Double]) absTol 1E-5)
// Should throw exception with message when test fails.
intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) !~==
Vectors.dense(Array(3.1 + 1E-8, 3.5 + 2E-7, 1E-8)) absTol 1E-6)
intercept[TestFailedException](Vectors.dense(Array(3.1, 3.5, 0.0)) ~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7, 1 + 1E-3)) absTol 1E-6)
intercept[TestFailedException](Vectors.dense(Array(3.1)) ~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6)
intercept[TestFailedException](Vectors.dense(Array.empty[Double]) ~==
Vectors.dense(Array(3.1 + 1E-5, 3.5 + 2E-7)) absTol 1E-6)
// Comparisons of two sparse vectors
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-8, 2.4 + 1E-7)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~==
Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-3, 2.4)) !~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1 + 1E-6, 2.4)) !~==
Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3)
assert(Vectors.sparse(0, Array.empty[Int], Array.empty[Double]) !~==
Vectors.sparse(1, Array(0), Array(3.1)) absTol 1E-3)
// Comparisons of a dense vector and a sparse vector
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) ~==
Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) absTol 1E-6)
assert(Vectors.dense(Array(3.1 + 1E-8, 0, 2.4 + 1E-7)) ~==
Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~==
Vectors.dense(Array(3.1, 1E-3, 2.4)) absTol 1E-6)
assert(Vectors.sparse(3, Array(0, 2), Array(3.1, 2.4)) !~==
Vectors.dense(Array(3.1)) absTol 1E-6)
assert(Vectors.dense(Array.empty[Double]) !~==
Vectors.sparse(3, Array(0, 2), Array(0, 2.4)) absTol 1E-6)
assert(Vectors.sparse(1, Array(0), Array(3.1)) !~==
Vectors.dense(Array(3.1, 3.2)) absTol 1E-6)
assert(Vectors.dense(Array(3.1)) !~==
Vectors.sparse(0, Array.empty[Int], Array.empty[Double]) absTol 1E-6)
}
test("Comparing Matrices using absolute error.") {
// Comparisons of two dense Matrices
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.1 + 1E-5, 3.5 + 2E-6, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6))
assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6))
assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(0, 0, Array()) !~=
Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.dense(0, 0, Array()) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-7, 3.5 + 2E-8, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
// Should throw exception with message when test fails.
intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9)
intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5)
intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 3.5 + 2E-7, 3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-5)
// Comparisons of two sparse Matrices
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9)
assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-9))
assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5)) absTol 1E-6))
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-9)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1 + 1E-8, 3.5 + 1E-7)) absTol 1E-6)
// Comparisons of a dense Matrix and a sparse Matrix
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9)
assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-9))
assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.1 + 1E-8, 0, 0, 3.5 + 1E-7)) absTol 1E-6))
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(2, 1, Array(3.1 + 1E-8, 0)) absTol 1E-6)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(0, 0, Array()) absTol 1E-6)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(0, 0, Array()) absTol 1E-6)
}
test("Comparing Matrices using relative error.") {
// Comparisons of two dense Matrices
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01)
assert(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01)
assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.134, 3.535, 3.134, 3.535)) relTol 0.01))
assert(!(Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01))
assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
assert(Matrices.dense(2, 1, Array(3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
assert(Matrices.dense(0, 0, Array()) !~=
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
assert(Matrices.dense(0, 0, Array()) !~==
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
// Should throw exception with message when test fails.
intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.130, 3.534, 3.130, 3.534)) relTol 0.01)
intercept[TestFailedException](Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.135, 3.534, 3.135, 3.534)) relTol 0.01)
intercept[TestFailedException](Matrices.dense(2, 1, Array(3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
intercept[TestFailedException](Matrices.dense(0, 0, Array()) ~==
Matrices.dense(2, 2, Array(3.1, 3.5, 3.1, 3.5)) relTol 0.01)
// Comparisons of two sparse Matrices
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01)
assert(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01)
assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) ~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.135, 3.534)) relTol 0.01))
assert(!(Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.130, 3.534)) relTol 0.01))
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01)
assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~==
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01)
assert(Matrices.sparse(0, 0, Array(1), Array(0), Array(0)) !~=
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(3.1, 3.5)) relTol 0.01)
// Comparisons of a dense Matrix and a sparse Matrix
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~==
Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01)
assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) ~=
Matrices.dense(2, 2, Array(3.135, 0, 0, 3.534)) relTol 0.01))
assert(!(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(2, 2, Array(3.130, 0, 0, 3.534)) relTol 0.01))
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(2, 1, Array(3.1, 0)) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~==
Matrices.dense(0, 0, Array()) relTol 0.01)
assert(Matrices.sparse(2, 2, Array(0, 1, 2), Array(0, 1), Array(3.1, 3.5)) !~=
Matrices.dense(0, 0, Array()) relTol 0.01)
}
test("SPARK-31400, catalogString distinguish Vectors in ml and mllib") {
val schema = StructType(Array[StructField] {
StructField("features", new org.apache.spark.mllib.linalg.VectorUDT)
})
val e = intercept[IllegalArgumentException] {
SchemaUtils.checkColumnType(schema, "features", new VectorUDT)
}
assert(e.getMessage.contains(
"org.apache.spark.mllib.linalg.VectorUDT:struct<type:tinyint,size:int,indices:array<int>"),
"dataType is not desired")
val normalSchema = StructType(Array[StructField] {
StructField("features", new VectorUDT)
})
SchemaUtils.checkColumnType(normalSchema, "features", new VectorUDT)
}
}
| maropu/spark | mllib/src/test/scala/org/apache/spark/mllib/util/TestingUtilsSuite.scala | Scala | apache-2.0 | 22,688 |
package org.jetbrains.plugins.scala.lang.types.existentialSimplification
package generated
class ExistentialSimplificationFirstRuleTest extends ExistentialSimplificationTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "firstRule/"
def testFirstRule = doTest
} | triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/lang/types/existentialSimplification/generated/ExistentialSimplificationFirstRuleTest.scala | Scala | apache-2.0 | 351 |
package js.util
/**
* State is a data structure that holds a function that returns a result of
* type R with an input-output state of type S.
*
* Aside: This is also known as the state monad.
*/
sealed class State[S,+R](run: S => (S,R)) {
def apply(s: S) = run(s)
def map[P](f: R => P): State[S,P] =
new State((s: S) => {
val (sp, r) = run(s)
(sp, f(r))
})
def flatMap[P](f: R => State[S,P]): State[S,P] =
new State((s: S) => {
val (sp, r) = run(s)
f(r)(sp) // same as f(r).apply(sp)
})
}
object State {
def apply[S]: State[S,S] = new State[S,S]({ s => (s, s) })
def insert[S,R](r: R): State[S,R] = apply map { _ => r }
def modify[S](f: S => S): State[S,Unit] = apply flatMap {
s => new State[S,Unit]({ _ => (f(s), ()) })
}
}
| mpgarate/ProgLang-Assignments | HW5/src/main/scala/js/util/State.scala | Scala | mit | 807 |
package backend
import logging.{CorrelatedLogging, CorrelatedWS, CorrelationContext}
import models.billing.CreateOrder
import play.api.libs.concurrent.Execution.Implicits._
import play.api.libs.json.Json
import scaldi.{Injectable, Injector}
class BillingBackend(implicit inj: Injector) extends Injectable with CorrelatedLogging {
private val correlatedWS = inject[CorrelatedWS]
private val serviceFailover = inject[ServiceFailover]
import backend.BillingBackend._
def createOrder(customerId: Long, cartId: String)(implicit collectionContext: CorrelationContext) =
serviceFailover.retry(serviceName, "/orders") {
url =>
val createOrder = CreateOrder(customerId, cartId)
correlatedWS.post(serviceName, url, Json.toJson(createOrder)).map {
response =>
if (response.status != 200)
throw new RuntimeException(s"post create order to billing service failed status=${response.status}")
response
}
}
}
object BillingBackend {
val serviceName = "billing-service"
} | leanovate/microzon-web | app/backend/BillingBackend.scala | Scala | mit | 1,055 |
package com.sksamuel.scapegoat.inspections
import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels}
/**
* @author Stephen Samuel
*/
class PublicFinalizer
extends Inspection(
text = "PublicFinalizer",
defaultLevel = Levels.Info,
description = "Checks for overridden finalizes that are public.",
explanation = "Public finalizer should be avoided as finalizers should not be programmatically invoked."
) {
override def inspector(context: InspectionContext): Inspector =
new Inspector(context) {
import context.global._
override def postTyperTraverser =
new context.Traverser {
override def inspect(tree: Tree): Unit = {
tree match {
case DefDef(mods, TermName("finalize"), Nil, Nil, tpt, _)
if mods.isPublic && tpt.tpe <:< typeOf[Unit] =>
context.warn(tree.pos, self)
case _ => continue(tree)
}
}
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/PublicFinalizer.scala | Scala | apache-2.0 | 1,008 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.epoch
import java.util.concurrent.locks.ReentrantReadWriteLock
import kafka.server.LogOffsetMetadata
import kafka.server.checkpoints.LeaderEpochCheckpoint
import org.apache.kafka.common.requests.EpochEndOffset.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET}
import kafka.utils.CoreUtils._
import kafka.utils.Logging
import org.apache.kafka.common.TopicPartition
import scala.collection.mutable.ListBuffer
trait LeaderEpochCache {
def assign(leaderEpoch: Int, offset: Long)
def latestEpoch(): Int
def endOffsetFor(epoch: Int): Long
def clearAndFlushLatest(offset: Long)
def clearAndFlushEarliest(offset: Long)
def clearAndFlush()
def clear()
}
/**
* Represents a cache of (LeaderEpoch => Offset) mappings for a particular replica.
*
* Leader Epoch = epoch assigned to each leader by the controller.
* Offset = offset of the first message in each epoch.
*
* @param leo a function that determines the log end offset
* @param checkpoint the checkpoint file
*/
class LeaderEpochFileCache(topicPartition: TopicPartition, leo: () => LogOffsetMetadata, checkpoint: LeaderEpochCheckpoint) extends LeaderEpochCache with Logging {
private val lock = new ReentrantReadWriteLock()
private var epochs: ListBuffer[EpochEntry] = inWriteLock(lock) { ListBuffer(checkpoint.read(): _*) }
/**
* Assigns the supplied Leader Epoch to the supplied Offset
* Once the epoch is assigned it cannot be reassigned
*
* @param epoch
* @param offset
*/
override def assign(epoch: Int, offset: Long): Unit = {
inWriteLock(lock) {
if (epoch >= 0 && epoch > latestEpoch && offset >= latestOffset) {
info(s"Updated PartitionLeaderEpoch. ${epochChangeMsg(epoch, offset)}. Cache now contains ${epochs.size} entries.")
epochs += EpochEntry(epoch, offset)
flush()
} else {
validateAndMaybeWarn(epoch, offset)
}
}
}
/**
* Returns the current Leader Epoch. This is the latest epoch
* which has messages assigned to it.
*
* @return
*/
override def latestEpoch(): Int = {
inReadLock(lock) {
if (epochs.isEmpty) UNDEFINED_EPOCH else epochs.last.epoch
}
}
/**
* Returns the End Offset for a requested Leader Epoch.
*
* This is defined as the start offset of the first Leader Epoch larger than the
* Leader Epoch requested, or else the Log End Offset if the latest epoch was requested.
*
* During the upgrade phase, where there are existing messages may not have a leader epoch,
* if requestedEpoch is < the first epoch cached, UNSUPPORTED_EPOCH_OFFSET will be returned
* so that the follower falls back to High Water Mark.
*
* @param requestedEpoch
* @return offset
*/
override def endOffsetFor(requestedEpoch: Int): Long = {
inReadLock(lock) {
val offset =
if (requestedEpoch == latestEpoch) {
leo().messageOffset
}
else {
val subsequentEpochs = epochs.filter(e => e.epoch > requestedEpoch)
if (subsequentEpochs.isEmpty || requestedEpoch < epochs.head.epoch)
UNDEFINED_EPOCH_OFFSET
else
subsequentEpochs.head.startOffset
}
debug(s"Processed offset for epoch request for partition ${topicPartition} epoch:$requestedEpoch and returning offset $offset from epoch list of size ${epochs.size}")
offset
}
}
/**
* Removes all epoch entries from the store with start offsets greater than or equal to the passed offset.
*
* @param offset
*/
override def clearAndFlushLatest(offset: Long): Unit = {
inWriteLock(lock) {
val before = epochs
if (offset >= 0 && offset <= latestOffset()) {
epochs = epochs.filter(entry => entry.startOffset < offset)
flush()
info(s"Cleared latest ${before.toSet.filterNot(epochs.toSet)} entries from epoch cache based on passed offset $offset leaving ${epochs.size} in EpochFile for partition $topicPartition")
}
}
}
/**
* Clears old epoch entries. This method searches for the oldest epoch < offset, updates the saved epoch offset to
* be offset, then clears any previous epoch entries.
*
* This method is exclusive: so clearEarliest(6) will retain an entry at offset 6.
*
* @param offset the offset to clear up to
*/
override def clearAndFlushEarliest(offset: Long): Unit = {
inWriteLock(lock) {
val before = epochs
if (offset >= 0 && earliestOffset() < offset) {
val earliest = epochs.filter(entry => entry.startOffset < offset)
if (earliest.size > 0) {
epochs = epochs --= earliest
//If the offset is less than the earliest offset remaining, add previous epoch back, but with an updated offset
if (offset < earliestOffset() || epochs.isEmpty)
new EpochEntry(earliest.last.epoch, offset) +=: epochs
flush()
info(s"Cleared earliest ${before.toSet.filterNot(epochs.toSet).size} entries from epoch cache based on passed offset $offset leaving ${epochs.size} in EpochFile for partition $topicPartition")
}
}
}
}
/**
* Delete all entries.
*/
override def clearAndFlush() = {
inWriteLock(lock) {
epochs.clear()
flush()
}
}
override def clear() = {
inWriteLock(lock) {
epochs.clear()
}
}
def epochEntries(): ListBuffer[EpochEntry] = {
epochs
}
private def earliestOffset(): Long = {
if (epochs.isEmpty) -1 else epochs.head.startOffset
}
private def latestOffset(): Long = {
if (epochs.isEmpty) -1 else epochs.last.startOffset
}
private def flush(): Unit = {
checkpoint.write(epochs)
}
def epochChangeMsg(epoch: Int, offset: Long) = s"New: {epoch:$epoch, offset:$offset}, Current: {epoch:$latestEpoch, offset$latestOffset} for Partition: $topicPartition"
def validateAndMaybeWarn(epoch: Int, offset: Long) = {
assert(epoch >= 0, s"Received a PartitionLeaderEpoch assignment for an epoch < 0. This should not happen. ${epochChangeMsg(epoch, offset)}")
if (epoch < latestEpoch())
warn(s"Received a PartitionLeaderEpoch assignment for an epoch < latestEpoch. " +
s"This implies messages have arrived out of order. ${epochChangeMsg(epoch, offset)}")
else if (offset < latestOffset())
warn(s"Received a PartitionLeaderEpoch assignment for an offset < latest offset for the most recent, stored PartitionLeaderEpoch. " +
s"This implies messages have arrived out of order. ${epochChangeMsg(epoch, offset)}")
}
}
// Mapping of epoch to the first offset of the subsequent epoch
case class EpochEntry(epoch: Int, startOffset: Long)
| ErikKringen/kafka | core/src/main/scala/kafka/server/epoch/LeaderEpochFileCache.scala | Scala | apache-2.0 | 7,523 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io.File
import java.util.{List => JList}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.SparkContext
import org.apache.spark.api.java.{JavaRDD, JavaSparkContext}
private[spark] object PythonUtils {
/** Get the PYTHONPATH for PySpark, either from SPARK_HOME, if it is set, or from our JAR */
def sparkPythonPath: String = {
val pythonPath = new ArrayBuffer[String]
for (sparkHome <- sys.env.get("SPARK_HOME")) {
pythonPath += Seq(sparkHome, "python", "lib", "pyspark.zip").mkString(File.separator)
pythonPath += Seq(sparkHome, "python", "lib", "py4j-0.10.4-src.zip").mkString(File.separator)
}
pythonPath ++= SparkContext.jarOfObject(this)
pythonPath.mkString(File.pathSeparator)
}
/** Merge PYTHONPATHS with the appropriate separator. Ignores blank strings. */
def mergePythonPaths(paths: String*): String = {
paths.filter(_ != "").mkString(File.pathSeparator)
}
def generateRDDWithNull(sc: JavaSparkContext): JavaRDD[String] = {
sc.parallelize(List("a", null, "b"))
}
/**
* Convert list of T into seq of T (for calling API with varargs)
*/
def toSeq[T](vs: JList[T]): Seq[T] = {
vs.asScala
}
/**
* Convert list of T into a (Scala) List of T
*/
def toList[T](vs: JList[T]): List[T] = {
vs.asScala.toList
}
/**
* Convert list of T into array of T (for calling API with array)
*/
def toArray[T](vs: JList[T]): Array[T] = {
vs.toArray().asInstanceOf[Array[T]]
}
/**
* Convert java map of K, V into Map of K, V (for calling API with varargs)
*/
def toScalaMap[K, V](jm: java.util.Map[K, V]): Map[K, V] = {
jm.asScala.toMap
}
}
| Panos-Bletsos/spark-cost-model-optimizer | core/src/main/scala/org/apache/spark/api/python/PythonUtils.scala | Scala | apache-2.0 | 2,565 |
package io.transwarp.midas.utils
import java.sql.Connection
import java.util
object TablePropertiesUtils extends ITablePropertiesUtils {
override def getMetaValue(values: String): Map[String, String] = null
override def getMeta(connection: Connection,
database: String,
table: String): Map[String, Map[String, Map[String, String]]] = null
override def getMetaJava(connection: Connection,
database: String,
table: String):
util.Map[String, util.Map[String, util.Map[String, String]]] = null
override def getTableDesc(connection: Connection,
database: String,
table: String): Array[Table] = {
null
}
}
| transwarpio/rapidminer | transwarp-midas-api/src/main/scala/io/transwarp/midas/utils/TablePropertiesUtils.scala | Scala | gpl-3.0 | 776 |
/**
* SparklineData, Inc. -- http://www.sparklinedata.com/
*
* Scala based Audience Behavior APIs
*
* Copyright 2014-2015 SparklineData, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sparklinedata.analytics
import org.apache.spark.sql.{Column, SQLContext}
import org.apache.spark.sql.catalyst.expressions._
import org.joda.time.{Period, DateTime}
import org.sparklinedata.spark.dateTime.dsl.expressions._
import org.sparklinedata.analytics.utils.DataframeUtils._
/**
* Created by Jitender on 8/7/15.
*/
object Users extends BaseAnalytics with TrendAnalysis with SegmentAnalysis {
val userColName: String = "user_id"
val newUserColName: String = "sd_is_new_user"
val revUserColName : String = "sd_is_revenue_user"
val rankByTimeColName : String = "sd_rank_by_time_spent"
val rankBySessionCountColName : String = "sd_rank_by_session_count"
val rankByEventCountColName : String = "sd_rank_by_event_count"
val firstSeentAtColName = "sd_first_seen_at"
val userAggCol = AggregateOnCol(userColName, "Users", "countd")
val allUserAggCol = AggregateOnCol(userColName, "Users", "count")
val newUserAggCol = AggregateOnCol(newUserColName, "New Users", "sum")
val retUserAggCol = AggregateOnCol(newUserColName, "Returning Users", "countIf", 0)
val revUserAggCol = AggregateOnCol(revUserColName, "Revenue Users", "sum")
val convRateAggColExpr = revUserAggCol.colExprWOAlias.divide(allUserAggCol.colExprWOAlias).multiply(100)
val convRateAggCol = AggregateOnColExpr("sd_conv_rate", "ConversionRate", convRateAggColExpr)
def defaultAggrCols: Seq[AggregateOnCol] = Seq(userAggCol)
// engagement metrics
val userSessionCountColName = "sd_session_count"
val userTimeSpentColName = "sd_time_spent"
val userEventCountColName = "sd_event_count"
val userSessionCountAggCol = AggregateOnCol(userSessionCountColName, "Avg Count of Sessions", "avg")
//val userTimeSpentAggCol = AggregateOnCol(userTimeSpentColName, "Avg Time Spent", "timeInSecs")
val userTimeSpentAggCol = AggregateOnCol(userTimeSpentColName, "Avg Time Spent", "avg")
val userEventCountAggCol = AggregateOnCol(userEventCountColName, "Avg Count of Events", "avg")
def defaultEngagementAggrCols: Seq[AggregateOnCol] = Seq(userSessionCountAggCol,
userEventCountAggCol, userTimeSpentAggCol)
// behavioral metrics
val expr = "select concat(sd_year, '-', sd_month, '-', sd_day) as date, sum(sd_num_revenue_session) as rev_users, sum(sd_num_cart_session) as cart_users, sum(sd_num_video_session) as video_users from sd_user_metricsII"
val revSessionsColName = "sd_num_revenue_session"
val cartSessionsColName = "sd_num_cart_session"
val videoSessionsColName = "sd_num_video_session"
val revSessionsAggCol = AggregateOnCol(revSessionsColName, "Revenue Sessions", "sum")
val cartSessionsAggCol = AggregateOnCol(cartSessionsColName, "Cart Sessions", "sum")
val videoSessionsAggCol = AggregateOnCol(videoSessionsColName, "Video Sessions", "sum")
def defaultBehaviorAggrCols: Seq[AggregateOnCol] = Seq(revSessionsAggCol,
cartSessionsAggCol, videoSessionsAggCol)
/*
* load cube for user metrics analysis
*/
def loadCube(sqlContext : SQLContext, lookbackDays: Int = 60) : Unit = {
beforeAll(sqlContext)
val df = loadCubeInMemory(sqlContext, "sd_user_metrics")
//df.get.registerTempTable("sd_user_metrics")
val inLookbackPeriodExpr = getPeriodExpr(dateExpr,currentDate, lookbackDays)
/*
This step should go into ETL Stage II - tagging users as new or returning
Tag users as new users if they have never been seen before
Get user ids and when first seen at
*/
val lastNDaysUsers = sqlContext.sql(date"select user_id, min($dateInMillisExpr) as sd_first_seen_at from sd_user_metrics where $inLookbackPeriodExpr group by user_id")
lastNDaysUsers.registerTempTable("users_last_n_days")
/* Now tag if first seen is less than the daily(session) date
also fill nulls with other
*/
val query =
date"""
| select a.*, b.sd_first_seen_at,
| $woyExpr as sd_week,
| CASE WHEN b.sd_first_seen_at < $dateInMillisExpr THEN 0 ELSE 1 END AS sd_is_new_user,
| CASE WHEN a.sd_daily_revenue > 0 THEN 1 ELSE 0 END AS sd_is_revenue_user
| from sd_user_metrics a, users_last_n_days b where a.user_id=b.user_id
""".stripMargin
userMetricsDF = Some(sqlContext.sql(query).na.fill("other").cache())
println(s"\\tCube has ${userMetricsDF.get.count()} records.")
userMetricsDF.get.registerTempTable("sd_user_metricsII")
}
/*
* trends
*/
def trend(sqlContext : SQLContext,
analysisPeriod : Int = 7,
periodType : String = "daily",
includeUsersTrend : Boolean = true,
includeNewUsersTrend : Boolean = false,
includeReturningUsersTrend : Boolean = false,
includeRevenueUsersTrend : Boolean = false,
movingAverage : Option[Int] = None,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
weekendOnly : Boolean = false,
verbose : Boolean = false
) : Unit = {
var aggregateOnCols = defaultAggrCols
if(includeNewUsersTrend) aggregateOnCols = aggregateOnCols :+ newUserAggCol
if(includeReturningUsersTrend) aggregateOnCols = aggregateOnCols :+ retUserAggCol
if(includeRevenueUsersTrend) aggregateOnCols = aggregateOnCols :+ revUserAggCol
if(!includeUsersTrend) aggregateOnCols = aggregateOnCols.filterNot(_.name == userColName)
val (resultDF,outputColNames) = trendG(userMetricsDF.get, sqlContext, aggregateOnCols, analysisPeriod, periodType, movingAverage,
segmentBy, topNSegments, filterOn, filterOp, filterVal, weekendOnly, verbose)
printTrendOutput(resultDF, outputColNames)
}
/*
* trend any
*/
def trendAny(sqlContext : SQLContext,
analysisPeriod : Int = 7,
aggregateOnCol: AggregateOnCol = userAggCol,
periodType : String = "daily",
movingAverage : Option[Int] = None,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
weekendOnly : Boolean = false,
verbose : Boolean = false
) : Unit = {
var aggregateOnCols = Seq(userAggCol, aggregateOnCol)
val (resultDF,outputColNames) = trendG(userMetricsDF.get, sqlContext, aggregateOnCols, analysisPeriod, periodType, movingAverage,
segmentBy, topNSegments, filterOn, filterOp, filterVal, weekendOnly, verbose)
printTrendOutput(resultDF, outputColNames)
}
/*
* segment values over a period
*/
def segment(sqlContext: SQLContext,
analysisPeriod: Int = 7,
colList : Seq[String] = Seq(newUserColName, revUserColName),
includeDefaults : Boolean = true,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
sortBy : Seq[String] = Seq(),
verbose: Boolean = false
) : Unit = {
var aggregateOnCols = colList.map(colName => AggregateOnCol(colName, colName, "sum"))
if(includeDefaults) aggregateOnCols = defaultAggrCols ++ aggregateOnCols
val (resultDF,outputColNames) = segmentG(userMetricsDF.get, sqlContext,aggregateOnCols, analysisPeriod,
segmentBy, topNSegments, filterOn, filterOp, filterVal, sortBy, verbose)
printSegmentOutput(resultDF, outputColNames)
}
/*
* segment any values over a period
*/
def segmentAny(sqlContext: SQLContext,
analysisPeriod: Int = 7,
aggregateOnCol: AggregateOnCol = revUserAggCol,
includeDefaults : Boolean = true,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
sortBy : Seq[String] = Seq(),
verbose: Boolean = false
) : Unit = {
val aggregateOnCols = Seq(aggregateOnCol)
val (resultDF,outputColNames) = segmentG(userMetricsDF.get, sqlContext,aggregateOnCols, analysisPeriod,
segmentBy, topNSegments, filterOn, filterOp, filterVal, sortBy, verbose)
printSegmentOutput(resultDF, outputColNames)
}
def getRankExpr(rankBy : String, rankOP : String, rank : Any) : Column = {
val rankCol = rankBy.trim.toLowerCase match {
case "time spent" => rankByTimeColName
case "session count" => rankBySessionCountColName
case "event count" => rankByEventCountColName
case _ => rankByTimeColName
}
val rankExpr = rankOP.toLowerCase match {
case "eq" => new Column(rankCol).eqNullSafe(rank)
case "geq" => new Column(rankCol).geq(rank)
case "leq" => new Column(rankCol).leq(rank)
case "lt" => new Column(rankCol).lt(rank)
case "gt" => new Column(rankCol).gt(rank)
case "like" => new Column(rankCol).like(rank.toString)
case _ => new Column(rankCol).eqNullSafe(rank)
}
rankExpr
}
/*
* Engagement metrics - average session count, time spent and events fired
* ranked using ntiles on session count, time spent and events fired
*/
def engagement(
sqlContext : SQLContext,
analysisPeriod : Int = 7,
forRevenueUsers : Boolean = false,
periodType : String = "daily",
sessionCountOnly : Boolean = false,
eventCountOnly : Boolean = false,
timeSpentOnly : Boolean = false,
movingAverage : Option[Int] = None,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
rank : Int = 1,
rankOp: String = "eq",
rankBy : Option[String] = Some("Time Spent"),
weekendOnly : Boolean = false,
verbose : Boolean = false
) : Unit = {
// Apply rank filter
val startingDF = rankBy match {
case None => userMetricsDF.get
case rankDim => filterDF(userMetricsDF.get, getRankExpr(rankBy.get, rankOp, rank))
}
// Apply paid users filter
val startingDFII = if (forRevenueUsers)
filterDF(userMetricsDF.get, getFilterExpr(revUserColName, "eq", 1))
else
startingDF
var aggregateOnCols = defaultEngagementAggrCols
if(sessionCountOnly) aggregateOnCols = aggregateOnCols.filter(_.name == userSessionCountColName)
if(eventCountOnly) aggregateOnCols = aggregateOnCols.filter(_.name == userEventCountColName)
if(timeSpentOnly) aggregateOnCols = aggregateOnCols.filter(_.name == userTimeSpentColName)
val (resultDF,outputColNames) = trendG(startingDFII, sqlContext, aggregateOnCols, analysisPeriod, periodType, movingAverage,
segmentBy, topNSegments, filterOn, filterOp, filterVal, weekendOnly, verbose)
printTrendOutput(resultDF, outputColNames)
}
/*
* behavior analysis
* what did users do - video sessions, add-to-cart sessions, revenue sessions
* exclusive behavior
*/
def behavior(sqlContext : SQLContext,
analysisPeriod : Int = 7,
forRevenueUsers : Boolean = false,
periodType : String = "daily",
revenueSessionsOnly : Boolean = false,
videoSessionsOnly : Boolean = false,
cartSessionsOnly : Boolean = false,
movingAverage : Option[Int] = None,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
rank : Int = 1,
rankOp: String = "eq",
rankBy : Option[String] = Some("Time Spent"),
weekendOnly : Boolean = false,
verbose : Boolean = false) : Unit = {
// Now apply rank filter
val startingDF = rankBy match {
case None => userMetricsDF.get
case rankDim => filterDF(userMetricsDF.get, getRankExpr(rankBy.get, rankOp, rank))
}
// Apply paid users filter
val startingDFII = if (forRevenueUsers)
filterDF(startingDF, getFilterExpr(revUserColName, "eq", 1))
else
startingDF
var aggregateOnCols = defaultBehaviorAggrCols
if(revenueSessionsOnly) aggregateOnCols = aggregateOnCols.filter(_.name == revSessionsColName)
if(videoSessionsOnly) aggregateOnCols = aggregateOnCols.filter(_.name == videoSessionsColName)
if(cartSessionsOnly) aggregateOnCols = aggregateOnCols.filter(_.name == cartSessionsColName)
val (resultDF,outputColNames) = trendG(startingDFII, sqlContext, aggregateOnCols, analysisPeriod, periodType, movingAverage,
segmentBy, topNSegments, filterOn, filterOp, filterVal, weekendOnly, verbose)
printTrendOutput(resultDF, outputColNames)
}
/*
* inclusive behavior analysis - users did collection of events
*/
def behaviorInclusive(sqlContext : SQLContext,
analysisPeriod : Int = 7,
forRevenueUsers : Boolean = false,
colList : Seq[String] = Seq(revSessionsColName, cartSessionsColName, videoSessionsColName),
periodType : String = "daily",
movingAverage : Option[Int] = None,
segmentBy: Option[String] = None,
topNSegments : Int = 10,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
rank : Int = 1,
rankOp: String = "eq",
rankBy : Option[String] = Some("Time Spent"),
weekendOnly : Boolean = false,
verbose : Boolean = false) : Unit = {
val filterCond = colList match { // all three, two or one
case Seq(a, b, c) => new Column(a).eqNullSafe(1).and(new Column(b).eqNullSafe(1)).and(new Column(c).eqNullSafe(1))
case Seq(a, b) => new Column(a).eqNullSafe(1).and(new Column(b).eqNullSafe(1))
case Seq(a) => new Column(a).eqNullSafe(1)
case _ => new Column(revSessionsColName).eqNullSafe(1)
}
val startingDF = sessionMetricsDF.get.filter(filterCond)
// Apply paid users filter
val startingDFII = if (forRevenueUsers)
filterDF(userMetricsDF.get, getFilterExpr(revUserColName, "eq", 1))
else
startingDF
val aggregateOnCols = Seq(userAggCol)
/* defaultBehaviorAggrCols.map{ case col : AggregateOnCol =>
if(colList.contains(col.name)) col else null}.toList */
val (resultDF,outputColNames) = trendG(startingDFII, sqlContext, aggregateOnCols, analysisPeriod, periodType, movingAverage,
segmentBy, topNSegments, filterOn, filterOp, filterVal, weekendOnly, verbose)
printTrendOutput(resultDF, outputColNames)
}
/*
* Cohort Conversion
*/
def cohort(
sqlContext : SQLContext,
startDaysAgo : Int = 14,
daysToConversionLimit : Int = 7,
filterOn: Option[String] = None,
filterOp: String = "eq",
filterVal: Option[Any] = None,
aggFilterOn : Option[String] = None,
aggFilterOp: String = "eq",
aggFilterVal: Option[Any] = None,
weekendOnly : Boolean = false,
verbose : Boolean = false
) : Unit = {
val aggregateOnCols = Seq(allUserAggCol, revUserAggCol, convRateAggCol)
// First seen at expressions
val firstSeenAtExpr = getDateExprFromMillis(firstSeentAtColName)
val firstSeenAtDayExpr = firstSeenAtExpr dayOfYear
val cohortStartDateExpr = getPriorDateExp(startDaysAgo)
// where clause - filter out
val isAfter = firstSeenAtExpr >= cohortStartDateExpr
val period = firstSeenAtExpr to firstSeenAtExpr + Period.days(daysToConversionLimit)
val inPeriod = period containsE dateExpr
val daysToConversionExpr = Subtract(doyExpr, firstSeenAtDayExpr)
val filterExpr = new Column(And(isAfter, inPeriod))
val filteredDF = if(filterOn.isDefined && filterVal.isDefined)
filterDF(userMetricsDF.get, filterExpr, getFilterExpr(filterOn.get, filterOp, filterVal.get))
else
filterDF(userMetricsDF.get, filterExpr)
// Group by clause
val groupByCols = dateColNames.map(new Column(_)) :+ new Column(daysToConversionExpr).alias("days_to_conversion")
val groupedDF = filteredDF.groupBy(groupByCols: _*)
// aggregate
val aggrMap = aggregateOnCols.map(ac => (ac.colExpr))
val aggrDF = aggDF(groupedDF, aggrMap)
// apply aggregate filter if any
val aggrDFII = aggFilterOn match {
case None => aggrDF
case rankDim => filterDF(aggrDF, getFilterExpr(aggFilterOn.get, aggFilterOp, aggFilterVal.getOrElse(10)))
}
val orderColNames = dateColNames :+ "days_to_conversion"
// order filter
val resultDF = orderDF(aggrDFII, orderColNames)
val sqlQuery =
date"""
|select sd_year, sd_month, sd_day, ($doyExpr - $firstSeenAtDayExpr) as days_to_conversion,
|sum(sd_is_revenue_user) as revenue_users, count(user_id) as all_users
|from sd_user_metricsII where $isAfter and $inPeriod
|group by sd_year, sd_month, sd_day, ($doyExpr - $firstSeenAtDayExpr)
|having revenue_users > 0 order by sd_year, sd_month, sd_day, days_to_conversion
""".stripMargin
if(verbose)
println(sqlQuery)
val outputColNames = Seq("Period", "DaysToConversion") ++ aggregateOnCols.map(ac => ac.alias)
printTrendOutput(resultDF, outputColNames)
}
} | cubefyre/audience-behavior-apis | analytics/src/main/scala/org/sparklinedata/analytics/UserAnalytics.scala | Scala | apache-2.0 | 18,929 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import java.util.HashMap
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.metric.SQLMetrics
/**
* :: DeveloperApi ::
* Groups input data by `groupingExpressions` and computes the `aggregateExpressions` for each
* group.
*
* @param partial if true then aggregation is done partially on local data without shuffling to
* ensure all values where `groupingExpressions` are equal are present.
* @param groupingExpressions expressions that are evaluated to determine grouping.
* @param aggregateExpressions expressions that are computed for each group.
* @param child the input data source.
*/
@DeveloperApi
case class Aggregate(
partial: Boolean,
groupingExpressions: Seq[Expression],
aggregateExpressions: Seq[NamedExpression],
child: SparkPlan)
extends UnaryNode {
override private[sql] lazy val metrics = Map(
"numInputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of input rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
override def requiredChildDistribution: List[Distribution] = {
if (partial) {
UnspecifiedDistribution :: Nil
} else {
if (groupingExpressions == Nil) {
AllTuples :: Nil
} else {
ClusteredDistribution(groupingExpressions) :: Nil
}
}
}
override def output: Seq[Attribute] = aggregateExpressions.map(_.toAttribute)
/**
* An aggregate that needs to be computed for each row in a group.
*
* @param unbound Unbound version of this aggregate, used for result substitution.
* @param aggregate A bound copy of this aggregate used to create a new aggregation buffer.
* @param resultAttribute An attribute used to refer to the result of this aggregate in the final
* output.
*/
case class ComputedAggregate(
unbound: AggregateExpression1,
aggregate: AggregateExpression1,
resultAttribute: AttributeReference)
/** A list of aggregates that need to be computed for each group. */
private[this] val computedAggregates = aggregateExpressions.flatMap { agg =>
agg.collect {
case a: AggregateExpression1 =>
ComputedAggregate(
a,
BindReferences.bindReference(a, child.output),
AttributeReference(s"aggResult:$a", a.dataType, a.nullable)())
}
}.toArray
/** The schema of the result of all aggregate evaluations */
private[this] val computedSchema = computedAggregates.map(_.resultAttribute)
/** Creates a new aggregate buffer for a group. */
private[this] def newAggregateBuffer(): Array[AggregateFunction1] = {
val buffer = new Array[AggregateFunction1](computedAggregates.length)
var i = 0
while (i < computedAggregates.length) {
buffer(i) = computedAggregates(i).aggregate.newInstance()
i += 1
}
buffer
}
/** Named attributes used to substitute grouping attributes into the final result. */
private[this] val namedGroups = groupingExpressions.map {
case ne: NamedExpression => ne -> ne.toAttribute
case e => e -> Alias(e, s"groupingExpr:$e")().toAttribute
}
/**
* A map of substitutions that are used to insert the aggregate expressions and grouping
* expression into the final result expression.
*/
private[this] val resultMap =
(computedAggregates.map { agg => agg.unbound -> agg.resultAttribute } ++ namedGroups).toMap
/**
* Substituted version of aggregateExpressions expressions which are used to compute final
* output rows given a group and the result of all aggregate computations.
*/
private[this] val resultExpressions = aggregateExpressions.map { agg =>
agg.transform {
case e: Expression if resultMap.contains(e) => resultMap(e)
}
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
val numInputRows = longMetric("numInputRows")
val numOutputRows = longMetric("numOutputRows")
if (groupingExpressions.isEmpty) {
child.execute().mapPartitions { iter =>
val buffer = newAggregateBuffer()
var currentRow: InternalRow = null
while (iter.hasNext) {
currentRow = iter.next()
numInputRows += 1
var i = 0
while (i < buffer.length) {
buffer(i).update(currentRow)
i += 1
}
}
val resultProjection = new InterpretedProjection(resultExpressions, computedSchema)
val aggregateResults = new GenericMutableRow(computedAggregates.length)
var i = 0
while (i < buffer.length) {
aggregateResults(i) = buffer(i).eval(EmptyRow)
i += 1
}
numOutputRows += 1
Iterator(resultProjection(aggregateResults))
}
} else {
child.execute().mapPartitions { iter =>
val hashTable = new HashMap[InternalRow, Array[AggregateFunction1]]
val groupingProjection = new InterpretedMutableProjection(groupingExpressions, child.output)
var currentRow: InternalRow = null
while (iter.hasNext) {
currentRow = iter.next()
numInputRows += 1
val currentGroup = groupingProjection(currentRow)
var currentBuffer = hashTable.get(currentGroup)
if (currentBuffer == null) {
currentBuffer = newAggregateBuffer()
hashTable.put(currentGroup.copy(), currentBuffer)
}
var i = 0
while (i < currentBuffer.length) {
currentBuffer(i).update(currentRow)
i += 1
}
}
new Iterator[InternalRow] {
private[this] val hashTableIter = hashTable.entrySet().iterator()
private[this] val aggregateResults = new GenericMutableRow(computedAggregates.length)
private[this] val resultProjection =
new InterpretedMutableProjection(
resultExpressions, computedSchema ++ namedGroups.map(_._2))
private[this] val joinedRow = new JoinedRow
override final def hasNext: Boolean = hashTableIter.hasNext
override final def next(): InternalRow = {
val currentEntry = hashTableIter.next()
val currentGroup = currentEntry.getKey
val currentBuffer = currentEntry.getValue
numOutputRows += 1
var i = 0
while (i < currentBuffer.length) {
// Evaluating an aggregate buffer returns the result. No row is required since we
// already added all rows in the group using update.
aggregateResults(i) = currentBuffer(i).eval(EmptyRow)
i += 1
}
resultProjection(joinedRow(aggregateResults, currentGroup))
}
}
}
}
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/main/scala/org/apache/spark/sql/execution/Aggregate.scala | Scala | apache-2.0 | 7,866 |
package processes.monadTransformers.scalaz
import processes.PatchAssignmentSpec
class PlainSpec extends PatchAssignmentSpec(
"Scalaz plain monad transformers",
s => new Plain(s)
) | EECOLOR/scala-clean-code-patterns | src/test/scala/processes/monadTransformers/scalaz/PlainSpec.scala | Scala | mit | 189 |
package com.getjenny.starchat.services
import akka.actor.{Actor, ActorRef, Props}
import com.getjenny.starchat.SCActorSystem
import com.getjenny.starchat.entities.io.RefreshPolicy
import com.getjenny.starchat.services.esclient.crud.EsCrudBase
import com.getjenny.starchat.utils.Index
import org.elasticsearch.index.query.QueryBuilders
import scalaz.Scalaz._
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.util.Try
object CronDeleteInstanceService extends CronService {
case class DeleteInstanceResponse(indexName: String, instance: String, documentsDeleted: Long)
class DeleteInstanceActor(probeActor: Option[ActorRef] = None) extends Actor {
override def receive: Receive = {
case `tickMessage` =>
val deleteResponse = instanceRegistryService.getAllMarkedToDelete
.flatMap { case (registryEntryId, _) =>
val esLanguageSpecificIndexName = Index.esLanguageFromIndexName(registryEntryId, "")
systemIndexManagementService.indices
.filter(_.startsWith(esLanguageSpecificIndexName))
.map(registryEntryId -> _)
}.flatMap { case (id, indexName) => delete(id, indexName).map(response => id -> response) }
val instancesToDeleteFromRegistry = deleteResponse.groupBy(_._1)
.mapValues(_.map(_._2.documentsDeleted).sum)
.filter(_._2 === 0)
instancesToDeleteFromRegistry.foreach { case (id, _) =>
instanceRegistryService.markAsDeleted(List(id))
}
probeActor.foreach(ref => ref ! instancesToDeleteFromRegistry)
case m => log.error("Unexpected message in DeleteInstanceActor :{}", m)
}
private[this] def delete(registryEntryId: String, indexName: String): Option[DeleteInstanceResponse] = {
val res = Try {
val instance = Index.instanceName(registryEntryId)
val crud = new EsCrudBase(clusterNodesService.elasticClient, indexName)
val delete = crud.delete(QueryBuilders.matchQuery("instance", instance), RefreshPolicy.`wait_for`)
log.info("Deleting data from instance: {} index: {} - doc deleted: {}", instance,
indexName, delete.getDeleted)
DeleteInstanceResponse(indexName, instance, delete.getDeleted)
}.toEither
.left.map { e =>
log.error(e, "Error during delete registry entry {} for in index {}", registryEntryId, indexName)
}
res.toOption
}
}
def scheduleAction(): Unit = {
if (instanceRegistryService.elasticClient.instanceRegistryDeleteFrequency > 0) {
val reloadDecisionTableActorRef =
SCActorSystem.system.actorOf(Props(new DeleteInstanceActor))
SCActorSystem.system.scheduler.scheduleWithFixedDelay(
0 seconds,
instanceRegistryService.elasticClient.instanceRegistryDeleteFrequency seconds,
reloadDecisionTableActorRef,
tickMessage)
}
}
object DeleteInstanceActor {
def props: Props = Props(new DeleteInstanceActor())
def props(actorProbe: ActorRef): Props = Props(new DeleteInstanceActor(Some(actorProbe)))
}
}
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/services/CronDeleteInstanceService.scala | Scala | gpl-2.0 | 3,098 |
package org.imdex.tractor.internal
/**
* Created by a.tsukanov on 29.07.2016.
*/
private[tractor] final class ActorIndex(private val index: Int) extends AnyVal
private[tractor] object ActorIndex {
def apply(index: Int): ActorIndex = new ActorIndex(index)
}
| Im-dex/trActor | tractor-actor/src/main/scala/org/imdex/tractor/internal/ActorIndex.scala | Scala | mit | 267 |
/*
* Sonar Scoverage Plugin
* Copyright (C) 2013 Rado Buransky
* [email protected]
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02
*/
package com.buransky.plugins.scoverage.xml.data
object XmlReportFile1 {
val scoverage104Data =
"""<?xml version="1.0" encoding="UTF-8"?>
|<scoverage
|statement-count="2" statements-invoked="1" statement-rate="50.00" branch-rate="100.00" version="1.0" timestamp="1430644814502">
| <packages>
| <package name="com.rr.test.sonar" statement-count="2" statements-invoked="1" statement-rate="50.00">
| <classes>
| <class
| name="MainClass" filename="/a1b2c3/workspace/sonar-test/src/main/scala/com/rr/test/sonar/MainClass.scala" statement-count="2" statements-invoked="1" statement-rate="50.00" branch-rate="100.00">
| <methods>
| <method
| name="com.rr.test.sonar/MainClass/times" statement-count="1" statements-invoked="0" statement-rate="0.00" branch-rate="0.00">
| <statements>
| <statement
| package="com.rr.test.sonar" class="MainClass" class-type="Class" top-level-class="MainClass" source="/a1b2c3/workspace/sonar-test/src/main/scala/com/rr/test/sonar/MainClass.scala" method="times" start="160" end="161" line="13" branch="false" invocation-count="0">
|</statement>
| <statement
| package="com.rr.test.sonar" class="MainClass" class-type="Class" top-level-class="MainClass" source="/a1b2c3/workspace/sonar-test/src/main/scala/com/rr/test/sonar/MainClass.scala" method="times" start="161" end="162" line="14" branch="true" invocation-count="0">
|</statement>
| | </statements>
|</method>
| <method
| name="com.rr.test.sonar/MainClass/plus" statement-count="1" statements-invoked="1" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="com.rr.test.sonar" class="MainClass" class-type="Class" top-level-class="MainClass" source="/a1b2c3/workspace/sonar-test/src/main/scala/com/rr/test/sonar/MainClass.scala" method="plus" start="132" end="133" line="12" branch="true" invocation-count="1">
|</statement>
| </statements>
|</method>
| </methods>
|</class>
| </classes>
| </package>
| </packages>
|</scoverage>
|""".stripMargin
val scoverage095Data =
"""<?xml version="1.0" encoding="UTF-8"?>
|<scoverage statement-rate="24.53" branch-rate="33.33" version="1.0" timestamp="1391478578154">
| <packages>
| <package name="aaa" statement-rate="26.00">
| <classes>
| <class name="MyServiceClientError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceClientError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceClientError" method="<none>" start="1425" line="51" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MyServiceClientError.this.error("zipcodeinvalid")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="$anon" filename="/aaa/Graph.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/$anon/apply" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="$anon" method="apply" start="526" line="16" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| 2
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="600" line="17" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| 3
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="655" line="18" symbol="scala.Some.apply" tree="Apply" branch="false" invocation-count="0">
| scala.Some.apply[String]("One")
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="443" line="15" symbol="aaa.MakeRectangleModelFromFile.$anon.<init>" tree="Apply" branch="false" invocation-count="0">
| new $anon()
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MyServiceLogicError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceLogicError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceLogicError" method="<none>" start="1686" line="59" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| MyServiceLogicError.this.error("logicfailed")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="StructuredErrorCode" filename="/aaa/ErrorCode.scala" statement-rate="64.29" branch-rate="50.00">
| <methods>
| <method name="aaa/StructuredErrorCode/toString" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="321" line="16" symbol="java.lang.Object.toString" tree="Apply" branch="false" invocation-count="4">
| StructuredErrorCode.this.parent.toString()
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="346" line="17" symbol="java.lang.Object.==" tree="Apply" branch="false" invocation-count="4">
| p.==("")
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="355" line="17" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="1">
| ""
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="355" line="17" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="1">
| {
| scoverage.Invoker.invoked(8, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ""
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="363" line="17" symbol="java.lang.String.+" tree="Apply" branch="false" invocation-count="3">
| p.+("-")
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="363" line="17" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="3">
| {
| scoverage.Invoker.invoked(10, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.+("-")
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="374" line="17" symbol="aaa.StructuredErrorCode.name" tree="Select" branch="false" invocation-count="4">
| StructuredErrorCode.this.name
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="341" line="17" symbol="java.lang.String.+" tree="Apply" branch="false" invocation-count="4">
| if ({
| scoverage.Invoker.invoked(7, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.==("")
|})
| {
| scoverage.Invoker.invoked(9, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| {
| scoverage.Invoker.invoked(8, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ""
| }
| }
|else
| {
| scoverage.Invoker.invoked(11, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| {
| scoverage.Invoker.invoked(10, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.+("-")
| }
| }.+({
| scoverage.Invoker.invoked(12, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| StructuredErrorCode.this.name
|})
|</statement>
| </statements>
| </method>
| <method name="aaa/StructuredErrorCode/is" statement-rate="0.00" branch-rate="0.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="210" line="9" symbol="java.lang.Object.==" tree="Apply" branch="false" invocation-count="0">
| errorCode.==(this)
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="235" line="10" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| true
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="235" line="10" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(2, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| true
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="255" line="12" symbol="aaa.ErrorCode.is" tree="Apply" branch="false" invocation-count="0">
| StructuredErrorCode.this.parent.is(errorCode)
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="255" line="12" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(4, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| StructuredErrorCode.this.parent.is(errorCode)
|}
|</statement>
| </statements>
| </method>
| <method name="aaa/StructuredErrorCode/error" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="error" start="433" line="20" symbol="aaa.StructuredErrorCode.apply" tree="Apply" branch="false" invocation-count="3">
| StructuredErrorCode.apply(name, this)
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="Demo" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="0.00">
| <methods>
| <method name="aaa/Demo/main" statement-rate="0.00" branch-rate="0.00">
| <statements>
| <statement
| package="aaa" class="Demo" method="main" start="1934" line="68" symbol="aaa.ClientError.required" tree="Select" branch="false" invocation-count="0">
| ClientError.required
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1926" line="68" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(25, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ClientError.required
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1999" line="69" symbol="aaa.ClientError.invalid" tree="Select" branch="false" invocation-count="0">
| ClientError.invalid
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1991" line="69" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(27, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ClientError.invalid
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2055" line="70" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println(MySqlError)
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2125" line="71" symbol="aaa.MySqlError.syntax" tree="Select" branch="false" invocation-count="0">
| MySqlError.syntax
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2117" line="71" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(30, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| MySqlError.syntax
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2194" line="72" symbol="aaa.MyServiceLogicError.logicFailed" tree="Select" branch="false" invocation-count="0">
| MyServiceLogicError.logicFailed
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2186" line="72" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(32, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| MyServiceLogicError.logicFailed
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2275" line="74" symbol="aaa.ClientError.required" tree="Select" branch="false" invocation-count="0">
| ClientError.required
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2300" line="75" symbol="aaa.Demo.e" tree="Ident" branch="false" invocation-count="0">
| e
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2345" line="76" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("required")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2345" line="76" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(36, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("required")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2399" line="77" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("invalid")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2399" line="77" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(38, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("invalid")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2431" line="78" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| ()
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2431" line="78" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(40, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ()
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2449" line="81" symbol="aaa.ErrorCode.is" tree="Apply" branch="false" invocation-count="0">
| MyServiceServerError.mongoDbError.is(ServerError)
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2505" line="82" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("This is a server error")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2505" line="82" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(43, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("This is a server error")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2445" line="81" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| ()
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2445" line="81" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(45, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ()
|}
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MySqlError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MySqlError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MySqlError" method="<none>" start="1097" line="42" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MySqlError.this.error("syntax")
|</statement>
| <statement
| package="aaa" class="MySqlError" method="<none>" start="1132" line="43" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MySqlError.this.error("connection")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MyServiceServerError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceServerError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceServerError" method="<none>" start="1553" line="55" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| MyServiceServerError.this.error("mongodberror")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="RootError" filename="/aaa/ErrorCode.scala" statement-rate="50.00" branch-rate="100.00">
| <methods>
| <method name="aaa/RootError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="RootError" method="<none>" start="715" line="28" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="1">
| ""
|</statement>
| </statements>
| </method>
| <method name="aaa/RootError/is" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="RootError" method="is" start="760" line="29" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| false
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="ServerError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/ServerError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="ServerError" method="<none>" start="994" line="38" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| ServerError.this.error("solar")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="ClientError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/ClientError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="ClientError" method="<none>" start="856" line="33" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| ClientError.this.error("required")
|</statement>
| <statement
| package="aaa" class="ClientError" method="<none>" start="890" line="34" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| ClientError.this.error("invalid")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| </classes>
| </package>
| <package name="bbb" statement-rate="0.00">
| <classes>
| <class name="Main" filename="/aaa/Graph.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="bbb/Main/main" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="bbb" class="Main" method="main" start="791" line="30" symbol="aaa.MakeRectangleModelFromFile.apply" tree="Apply" branch="false" invocation-count="0">
| aaa.MakeRectangleModelFromFile.apply(null)
|</statement>
| <statement
| package="bbb" class="Main" method="main" start="875" line="31" symbol="scala.Any.isInstanceOf" tree="TypeApply" branch="false" invocation-count="0">
| x.isInstanceOf[Serializable]
|</statement>
| <statement
| package="bbb" class="Main" method="main" start="867" line="31" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(52, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| x.isInstanceOf[Serializable]
|})
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| </classes>
| </package>
| </packages>
|</scoverage>
""".stripMargin
val dataWithoutDeclaration =
"""<scoverage statement-rate="24.53" branch-rate="33.33" version="1.0" timestamp="1391478578154">
| <packages>
| <package name="aaa" statement-rate="26.00">
| <classes>
| <class name="MyServiceClientError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceClientError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceClientError" method="<none>" start="1425" line="51" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MyServiceClientError.this.error("zipcodeinvalid")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="$anon" filename="/aaa/Graph.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/$anon/apply" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="$anon" method="apply" start="526" line="16" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| 2
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="600" line="17" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| 3
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="655" line="18" symbol="scala.Some.apply" tree="Apply" branch="false" invocation-count="0">
| scala.Some.apply[String]("One")
|</statement>
| <statement
| package="aaa" class="$anon" method="apply" start="443" line="15" symbol="aaa.MakeRectangleModelFromFile.$anon.<init>" tree="Apply" branch="false" invocation-count="0">
| new $anon()
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MyServiceLogicError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceLogicError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceLogicError" method="<none>" start="1686" line="59" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| MyServiceLogicError.this.error("logicfailed")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="StructuredErrorCode" filename="/aaa/ErrorCode.scala" statement-rate="64.29" branch-rate="50.00">
| <methods>
| <method name="aaa/StructuredErrorCode/toString" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="321" line="16" symbol="java.lang.Object.toString" tree="Apply" branch="false" invocation-count="4">
| StructuredErrorCode.this.parent.toString()
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="346" line="17" symbol="java.lang.Object.==" tree="Apply" branch="false" invocation-count="4">
| p.==("")
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="355" line="17" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="1">
| ""
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="355" line="17" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="1">
| {
| scoverage.Invoker.invoked(8, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ""
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="363" line="17" symbol="java.lang.String.+" tree="Apply" branch="false" invocation-count="3">
| p.+("-")
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="363" line="17" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="3">
| {
| scoverage.Invoker.invoked(10, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.+("-")
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="374" line="17" symbol="aaa.StructuredErrorCode.name" tree="Select" branch="false" invocation-count="4">
| StructuredErrorCode.this.name
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="toString" start="341" line="17" symbol="java.lang.String.+" tree="Apply" branch="false" invocation-count="4">
| if ({
| scoverage.Invoker.invoked(7, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.==("")
|})
| {
| scoverage.Invoker.invoked(9, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| {
| scoverage.Invoker.invoked(8, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ""
| }
| }
|else
| {
| scoverage.Invoker.invoked(11, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| {
| scoverage.Invoker.invoked(10, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| p.+("-")
| }
| }.+({
| scoverage.Invoker.invoked(12, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| StructuredErrorCode.this.name
|})
|</statement>
| </statements>
| </method>
| <method name="aaa/StructuredErrorCode/is" statement-rate="0.00" branch-rate="0.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="210" line="9" symbol="java.lang.Object.==" tree="Apply" branch="false" invocation-count="0">
| errorCode.==(this)
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="235" line="10" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| true
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="235" line="10" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(2, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| true
|}
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="255" line="12" symbol="aaa.ErrorCode.is" tree="Apply" branch="false" invocation-count="0">
| StructuredErrorCode.this.parent.is(errorCode)
|</statement>
| <statement
| package="aaa" class="StructuredErrorCode" method="is" start="255" line="12" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(4, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| StructuredErrorCode.this.parent.is(errorCode)
|}
|</statement>
| </statements>
| </method>
| <method name="aaa/StructuredErrorCode/error" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="StructuredErrorCode" method="error" start="433" line="20" symbol="aaa.StructuredErrorCode.apply" tree="Apply" branch="false" invocation-count="3">
| StructuredErrorCode.apply(name, this)
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="Demo" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="0.00">
| <methods>
| <method name="aaa/Demo/main" statement-rate="0.00" branch-rate="0.00">
| <statements>
| <statement
| package="aaa" class="Demo" method="main" start="1934" line="68" symbol="aaa.ClientError.required" tree="Select" branch="false" invocation-count="0">
| ClientError.required
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1926" line="68" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(25, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ClientError.required
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1999" line="69" symbol="aaa.ClientError.invalid" tree="Select" branch="false" invocation-count="0">
| ClientError.invalid
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="1991" line="69" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(27, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ClientError.invalid
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2055" line="70" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println(MySqlError)
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2125" line="71" symbol="aaa.MySqlError.syntax" tree="Select" branch="false" invocation-count="0">
| MySqlError.syntax
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2117" line="71" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(30, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| MySqlError.syntax
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2194" line="72" symbol="aaa.MyServiceLogicError.logicFailed" tree="Select" branch="false" invocation-count="0">
| MyServiceLogicError.logicFailed
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2186" line="72" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(32, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| MyServiceLogicError.logicFailed
|})
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2275" line="74" symbol="aaa.ClientError.required" tree="Select" branch="false" invocation-count="0">
| ClientError.required
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2300" line="75" symbol="aaa.Demo.e" tree="Ident" branch="false" invocation-count="0">
| e
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2345" line="76" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("required")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2345" line="76" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(36, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("required")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2399" line="77" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("invalid")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2399" line="77" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(38, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("invalid")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2431" line="78" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| ()
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2431" line="78" symbol="<nosymbol>" tree="Block" branch="false" invocation-count="0">
| {
| scoverage.Invoker.invoked(40, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ()
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2449" line="81" symbol="aaa.ErrorCode.is" tree="Apply" branch="false" invocation-count="0">
| MyServiceServerError.mongoDbError.is(ServerError)
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2505" line="82" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println("This is a server error")
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2505" line="82" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(43, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| scala.this.Predef.println("This is a server error")
|}
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2445" line="81" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| ()
|</statement>
| <statement
| package="aaa" class="Demo" method="main" start="2445" line="81" symbol="<nosymbol>" tree="Block" branch="true" invocation-count="0">
| {
| scoverage.Invoker.invoked(45, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| ()
|}
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MySqlError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MySqlError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MySqlError" method="<none>" start="1097" line="42" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MySqlError.this.error("syntax")
|</statement>
| <statement
| package="aaa" class="MySqlError" method="<none>" start="1132" line="43" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| MySqlError.this.error("connection")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="MyServiceServerError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/MyServiceServerError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="MyServiceServerError" method="<none>" start="1553" line="55" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| MyServiceServerError.this.error("mongodberror")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="RootError" filename="/aaa/ErrorCode.scala" statement-rate="50.00" branch-rate="100.00">
| <methods>
| <method name="aaa/RootError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="RootError" method="<none>" start="715" line="28" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="1">
| ""
|</statement>
| </statements>
| </method>
| <method name="aaa/RootError/is" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="RootError" method="is" start="760" line="29" symbol="<nosymbol>" tree="Literal" branch="false" invocation-count="0">
| false
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="ServerError" filename="/aaa/ErrorCode.scala" statement-rate="100.00" branch-rate="100.00">
| <methods>
| <method name="aaa/ServerError/<none>" statement-rate="100.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="ServerError" method="<none>" start="994" line="38" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="1">
| ServerError.this.error("solar")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| <class name="ClientError" filename="/aaa/ErrorCode.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="aaa/ClientError/<none>" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="aaa" class="ClientError" method="<none>" start="856" line="33" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| ClientError.this.error("required")
|</statement>
| <statement
| package="aaa" class="ClientError" method="<none>" start="890" line="34" symbol="aaa.StructuredErrorCode.error" tree="Apply" branch="false" invocation-count="0">
| ClientError.this.error("invalid")
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| </classes>
| </package>
| <package name="bbb" statement-rate="0.00">
| <classes>
| <class name="Main" filename="/aaa/Graph.scala" statement-rate="0.00" branch-rate="100.00">
| <methods>
| <method name="bbb/Main/main" statement-rate="0.00" branch-rate="100.00">
| <statements>
| <statement
| package="bbb" class="Main" method="main" start="791" line="30" symbol="aaa.MakeRectangleModelFromFile.apply" tree="Apply" branch="false" invocation-count="0">
| aaa.MakeRectangleModelFromFile.apply(null)
|</statement>
| <statement
| package="bbb" class="Main" method="main" start="875" line="31" symbol="scala.Any.isInstanceOf" tree="TypeApply" branch="false" invocation-count="0">
| x.isInstanceOf[Serializable]
|</statement>
| <statement
| package="bbb" class="Main" method="main" start="867" line="31" symbol="scala.Predef.println" tree="Apply" branch="false" invocation-count="0">
| scala.this.Predef.println({
| scoverage.Invoker.invoked(52, "/a1b2c3/workspace/aaa/target/scala-2.10/scoverage.measurement");
| x.isInstanceOf[Serializable]
|})
|</statement>
| </statements>
| </method>
| </methods>
| </class>
| </classes>
| </package>
| </packages>
|</scoverage>
""".stripMargin
}
| RadoBuransky/sonar-scoverage-plugin | plugin/src/test/scala/com/buransky/plugins/scoverage/xml/data/XmlReportFile1.scala | Scala | lgpl-3.0 | 58,108 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.message.client
import java.nio.charset.Charset
case class HandshakeResponseMessage(
username: String,
charset: Charset,
seed: Array[Byte],
authenticationMethod: Option[String] = None,
password: Option[String] = None,
database: Option[String] = None
)
extends ClientMessage(ClientMessage.ClientProtocolVersion) | ilangostl/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/message/client/HandshakeResponseMessage.scala | Scala | apache-2.0 | 1,253 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.beans.{BeanInfo, BeanProperty}
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.ml.attribute._
import org.apache.spark.ml.linalg.{SparseVector, Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row}
class VectorIndexerSuite extends MLTest with DefaultReadWriteTest with Logging {
import testImplicits._
import VectorIndexerSuite.FeatureData
// identical, of length 3
@transient var densePoints1: DataFrame = _
@transient var sparsePoints1: DataFrame = _
@transient var densePoints1TestInvalid: DataFrame = _
@transient var sparsePoints1TestInvalid: DataFrame = _
@transient var point1maxes: Array[Double] = _
// identical, of length 2
@transient var densePoints2: DataFrame = _
@transient var sparsePoints2: DataFrame = _
// different lengths
@transient var badPoints: DataFrame = _
override def beforeAll(): Unit = {
super.beforeAll()
val densePoints1Seq = Seq(
Vectors.dense(1.0, 2.0, 0.0),
Vectors.dense(0.0, 1.0, 2.0),
Vectors.dense(0.0, 0.0, -1.0),
Vectors.dense(1.0, 3.0, 2.0))
val densePoints1SeqTestInvalid = densePoints1Seq ++ Seq(
Vectors.dense(10.0, 2.0, 0.0),
Vectors.dense(0.0, 10.0, 2.0),
Vectors.dense(1.0, 3.0, 10.0))
val sparsePoints1Seq = Seq(
Vectors.sparse(3, Array(0, 1), Array(1.0, 2.0)),
Vectors.sparse(3, Array(1, 2), Array(1.0, 2.0)),
Vectors.sparse(3, Array(2), Array(-1.0)),
Vectors.sparse(3, Array(0, 1, 2), Array(1.0, 3.0, 2.0)))
val sparsePoints1SeqTestInvalid = sparsePoints1Seq ++ Seq(
Vectors.sparse(3, Array(0, 1), Array(10.0, 2.0)),
Vectors.sparse(3, Array(1, 2), Array(10.0, 2.0)),
Vectors.sparse(3, Array(0, 1, 2), Array(1.0, 3.0, 10.0)))
point1maxes = Array(1.0, 3.0, 2.0)
val densePoints2Seq = Seq(
Vectors.dense(1.0, 1.0, 0.0, 1.0),
Vectors.dense(0.0, 1.0, 1.0, 1.0),
Vectors.dense(-1.0, 1.0, 2.0, 0.0))
val sparsePoints2Seq = Seq(
Vectors.sparse(4, Array(0, 1, 3), Array(1.0, 1.0, 1.0)),
Vectors.sparse(4, Array(1, 2, 3), Array(1.0, 1.0, 1.0)),
Vectors.sparse(4, Array(0, 1, 2), Array(-1.0, 1.0, 2.0)))
val badPointsSeq = Seq(
Vectors.sparse(2, Array(0, 1), Array(1.0, 1.0)),
Vectors.sparse(3, Array(2), Array(-1.0)))
// Sanity checks for assumptions made in tests
assert(densePoints1Seq.head.size == sparsePoints1Seq.head.size)
assert(densePoints2Seq.head.size == sparsePoints2Seq.head.size)
assert(densePoints1Seq.head.size != densePoints2Seq.head.size)
def checkPair(dvSeq: Seq[Vector], svSeq: Seq[Vector]): Unit = {
assert(dvSeq.zip(svSeq).forall { case (dv, sv) => dv.toArray === sv.toArray },
"typo in unit test")
}
checkPair(densePoints1Seq, sparsePoints1Seq)
checkPair(densePoints2Seq, sparsePoints2Seq)
densePoints1 = densePoints1Seq.map(FeatureData).toDF()
sparsePoints1 = sparsePoints1Seq.map(FeatureData).toDF()
densePoints1TestInvalid = densePoints1SeqTestInvalid.map(FeatureData).toDF()
sparsePoints1TestInvalid = sparsePoints1SeqTestInvalid.map(FeatureData).toDF()
densePoints2 = densePoints2Seq.map(FeatureData).toDF()
sparsePoints2 = sparsePoints2Seq.map(FeatureData).toDF()
badPoints = badPointsSeq.map(FeatureData).toDF()
}
private def getIndexer: VectorIndexer =
new VectorIndexer().setInputCol("features").setOutputCol("indexed")
test("params") {
ParamsSuite.checkParams(new VectorIndexer)
val model = new VectorIndexerModel("indexer", 1, Map.empty)
ParamsSuite.checkParams(model)
}
test("Cannot fit an empty DataFrame") {
val rdd = Array.empty[Vector].map(FeatureData).toSeq.toDF()
val vectorIndexer = getIndexer
intercept[IllegalArgumentException] {
vectorIndexer.fit(rdd)
}
}
test("Throws error when given RDDs with different size vectors") {
val vectorIndexer = getIndexer
val model = vectorIndexer.fit(densePoints1) // vectors of length 3
MLTestingUtils.checkCopyAndUids(vectorIndexer, model)
testTransformer[FeatureData](densePoints1, model, "indexed") { _ => }
testTransformer[FeatureData](sparsePoints1, model, "indexed") { _ => }
// If the data is local Dataset, it throws AssertionError directly.
withClue("Did not throw error when fit, transform were called on " +
"vectors of different lengths") {
testTransformerByInterceptingException[FeatureData](
densePoints2,
model,
"VectorIndexerModel expected vector of length 3 but found length 4",
"indexed")
}
// If the data is distributed Dataset, it throws SparkException
// which is the wrapper of AssertionError.
withClue("Did not throw error when fit, transform were called " +
"on vectors of different lengths") {
testTransformerByInterceptingException[FeatureData](
densePoints2.repartition(2),
model,
"VectorIndexerModel expected vector of length 3 but found length 4",
"indexed")
}
intercept[SparkException] {
vectorIndexer.fit(badPoints)
logInfo("Did not throw error when fitting vectors of different lengths in same RDD.")
}
}
test("Same result with dense and sparse vectors") {
def testDenseSparse(densePoints: DataFrame, sparsePoints: DataFrame): Unit = {
val denseVectorIndexer = getIndexer.setMaxCategories(2)
val sparseVectorIndexer = getIndexer.setMaxCategories(2)
val denseModel = denseVectorIndexer.fit(densePoints)
val sparseModel = sparseVectorIndexer.fit(sparsePoints)
val denseMap = denseModel.categoryMaps
val sparseMap = sparseModel.categoryMaps
assert(denseMap.keys.toSet == sparseMap.keys.toSet,
"Categorical features chosen from dense vs. sparse vectors did not match.")
assert(denseMap == sparseMap,
"Categorical feature value indexes chosen from dense vs. sparse vectors did not match.")
}
testDenseSparse(densePoints1, sparsePoints1)
testDenseSparse(densePoints2, sparsePoints2)
}
test("Builds valid categorical feature value index, transform correctly, check metadata") {
def checkCategoryMaps(
data: DataFrame,
maxCategories: Int,
categoricalFeatures: Set[Int]): Unit = {
val collectedData = data.collect().map(_.getAs[Vector](0))
val errMsg = s"checkCategoryMaps failed for input with maxCategories=$maxCategories," +
s" categoricalFeatures=${categoricalFeatures.mkString(", ")}"
try {
val vectorIndexer = getIndexer.setMaxCategories(maxCategories)
val model = vectorIndexer.fit(data)
val categoryMaps = model.categoryMaps
// Chose correct categorical features
assert(categoryMaps.keys.toSet === categoricalFeatures)
testTransformerByGlobalCheckFunc[FeatureData](data, model, "indexed") { rows =>
val transformed = rows.map { r => Tuple1(r.getAs[Vector](0)) }.toDF("indexed")
val indexedRDD: RDD[Vector] = transformed.rdd.map(_.getAs[Vector](0))
val featureAttrs = AttributeGroup.fromStructField(rows.head.schema("indexed"))
assert(featureAttrs.name === "indexed")
assert(featureAttrs.attributes.get.length === model.numFeatures)
categoricalFeatures.foreach { feature: Int =>
val origValueSet = collectedData.map(_(feature)).toSet
val targetValueIndexSet = Range(0, origValueSet.size).toSet
val catMap = categoryMaps(feature)
assert(catMap.keys.toSet === origValueSet) // Correct categories
assert(catMap.values.toSet === targetValueIndexSet) // Correct category indices
if (origValueSet.contains(0.0)) {
assert(catMap(0.0) === 0) // value 0 gets index 0
}
// Check transformed data
assert(indexedRDD.map(_(feature)).collect().toSet === targetValueIndexSet)
// Check metadata
val featureAttr = featureAttrs(feature)
assert(featureAttr.index.get === feature)
featureAttr match {
case attr: BinaryAttribute =>
assert(attr.values.get === origValueSet.toArray.sorted.map(_.toString))
case attr: NominalAttribute =>
assert(attr.values.get === origValueSet.toArray.sorted.map(_.toString))
assert(attr.isOrdinal.get === false)
case _ =>
throw new RuntimeException(errMsg + s". Categorical feature $feature failed" +
s" metadata check. Found feature attribute: $featureAttr.")
}
}
// Check numerical feature metadata.
Range(0, model.numFeatures).filter(feature => !categoricalFeatures.contains(feature))
.foreach { feature: Int =>
val featureAttr = featureAttrs(feature)
featureAttr match {
case attr: NumericAttribute =>
assert(featureAttr.index.get === feature)
case _ =>
throw new RuntimeException(errMsg + s". Numerical feature $feature failed" +
s" metadata check. Found feature attribute: $featureAttr.")
}
}
}
} catch {
case e: org.scalatest.exceptions.TestFailedException =>
logError(errMsg)
throw e
}
}
checkCategoryMaps(densePoints1, maxCategories = 2, categoricalFeatures = Set(0))
checkCategoryMaps(densePoints1, maxCategories = 3, categoricalFeatures = Set(0, 2))
checkCategoryMaps(densePoints2, maxCategories = 2, categoricalFeatures = Set(1, 3))
}
test("handle invalid") {
for ((points, pointsTestInvalid) <- Seq((densePoints1, densePoints1TestInvalid),
(sparsePoints1, sparsePoints1TestInvalid))) {
val vectorIndexer = getIndexer.setMaxCategories(4).setHandleInvalid("error")
val model = vectorIndexer.fit(points)
testTransformerByInterceptingException[FeatureData](
pointsTestInvalid,
model,
"VectorIndexer encountered invalid value",
"indexed")
val vectorIndexer1 = getIndexer.setMaxCategories(4).setHandleInvalid("skip")
val model1 = vectorIndexer1.fit(points)
val expected = Seq(
Vectors.dense(1.0, 2.0, 0.0),
Vectors.dense(0.0, 1.0, 2.0),
Vectors.dense(0.0, 0.0, 1.0),
Vectors.dense(1.0, 3.0, 2.0))
testTransformerByGlobalCheckFunc[FeatureData](pointsTestInvalid, model1, "indexed") { rows =>
assert(rows.map(_(0)) == expected)
}
testTransformerByGlobalCheckFunc[FeatureData](points, model1, "indexed") { rows =>
assert(rows.map(_(0)) == expected)
}
val vectorIndexer2 = getIndexer.setMaxCategories(4).setHandleInvalid("keep")
val model2 = vectorIndexer2.fit(points)
testTransformerByGlobalCheckFunc[FeatureData](pointsTestInvalid, model2, "indexed") { rows =>
assert(rows.map(_(0)) == expected ++ Array(
Vectors.dense(2.0, 2.0, 0.0),
Vectors dense(0.0, 4.0, 2.0),
Vectors.dense(1.0, 3.0, 3.0)))
}
}
}
test("Maintain sparsity for sparse vectors") {
def checkSparsity(data: DataFrame, maxCategories: Int): Unit = {
val points = data.collect().map(_.getAs[Vector](0))
val vectorIndexer = getIndexer.setMaxCategories(maxCategories)
val model = vectorIndexer.fit(data)
testTransformerByGlobalCheckFunc[FeatureData](data, model, "indexed") { rows =>
points.zip(rows.map(_(0))).foreach {
case (orig: SparseVector, indexed: SparseVector) =>
assert(orig.indices.length == indexed.indices.length)
case _ => throw new UnknownError("Unit test has a bug in it.") // should never happen
}
}
}
checkSparsity(sparsePoints1, maxCategories = 2)
checkSparsity(sparsePoints2, maxCategories = 2)
}
test("Preserve metadata") {
// For continuous features, preserve name and stats.
val featureAttributes: Array[Attribute] = point1maxes.zipWithIndex.map { case (maxVal, i) =>
NumericAttribute.defaultAttr.withName(i.toString).withMax(maxVal)
}
val attrGroup = new AttributeGroup("features", featureAttributes)
val densePoints1WithMeta =
densePoints1.select(densePoints1("features").as("features", attrGroup.toMetadata()))
val vectorIndexer = getIndexer.setMaxCategories(2)
val model = vectorIndexer.fit(densePoints1WithMeta)
// Check that ML metadata are preserved.
testTransformerByGlobalCheckFunc[FeatureData](densePoints1WithMeta, model, "indexed") { rows =>
val transAttributes: Array[Attribute] =
AttributeGroup.fromStructField(rows.head.schema("indexed")).attributes.get
featureAttributes.zip(transAttributes).foreach { case (orig, trans) =>
assert(orig.name === trans.name)
(orig, trans) match {
case (orig: NumericAttribute, trans: NumericAttribute) =>
assert(orig.max.nonEmpty && orig.max === trans.max)
case _ =>
// do nothing
// TODO: Once input features marked as categorical are handled correctly, check that here.
}
}
}
}
test("VectorIndexer read/write") {
val t = new VectorIndexer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setMaxCategories(30)
testDefaultReadWrite(t)
}
test("VectorIndexerModel read/write") {
val categoryMaps = Map(0 -> Map(0.0 -> 0, 1.0 -> 1), 1 -> Map(0.0 -> 0, 1.0 -> 1,
2.0 -> 2, 3.0 -> 3), 2 -> Map(0.0 -> 0, -1.0 -> 1, 2.0 -> 2))
val instance = new VectorIndexerModel("myVectorIndexerModel", 3, categoryMaps)
val newInstance = testDefaultReadWrite(instance)
assert(newInstance.numFeatures === instance.numFeatures)
assert(newInstance.categoryMaps === instance.categoryMaps)
}
}
private[feature] object VectorIndexerSuite {
@BeanInfo
case class FeatureData(@BeanProperty features: Vector)
}
| bravo-zhang/spark | mllib/src/test/scala/org/apache/spark/ml/feature/VectorIndexerSuite.scala | Scala | apache-2.0 | 14,991 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.datacompaction
import scala.collection.JavaConverters._
import org.apache.spark.sql.common.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.metadata.{AbsoluteTableIdentifier, CarbonTableIdentifier}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.statusmanager.SegmentStatusManager
import org.apache.carbondata.core.util.CarbonProperties
/**
* FT for compaction scenario where major compaction will only compact the segments which are
* present at the time of triggering the compaction.
*/
class MajorCompactionStopsAfterCompaction extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists stopmajor")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "mm/dd/yyyy")
sql(
"CREATE TABLE IF NOT EXISTS stopmajor (country String, ID decimal(7,4), date Timestamp, name " +
"String, " +
"phonetype String, serialname String, salary Int) STORED BY 'org.apache.carbondata" +
".format'"
)
val csvFilePath1 = s"$resourcesPath/compaction/compaction1.csv"
val csvFilePath2 = s"$resourcesPath/compaction/compaction2.csv"
val csvFilePath3 = s"$resourcesPath/compaction/compaction3.csv"
sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
// compaction will happen here.
sql("alter table stopmajor compact 'major'"
)
Thread.sleep(2000)
sql("LOAD DATA LOCAL INPATH '" + csvFilePath1 + "' INTO TABLE stopmajor OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
sql("LOAD DATA LOCAL INPATH '" + csvFilePath2 + "' INTO TABLE stopmajor OPTIONS" +
"('DELIMITER'= ',', 'QUOTECHAR'= '\"')"
)
if (checkCompactionCompletedOrNot("0.1")) {
}
}
/**
* Check if the compaction is completed or not.
*
* @param requiredSeg
* @return
*/
def checkCompactionCompletedOrNot(requiredSeg: String): Boolean = {
var status = false
var noOfRetries = 0
while (!status && noOfRetries < 10) {
val identifier = new AbsoluteTableIdentifier(
CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
new CarbonTableIdentifier(
CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", noOfRetries + "")
)
val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
// segments.foreach(seg =>
// System.out.println( "valid segment is =" + seg)
// )
if (!segments.contains(requiredSeg)) {
// wait for 2 seconds for compaction to complete.
System.out.println("sleping for 2 seconds.")
Thread.sleep(2000)
noOfRetries += 1
}
else {
status = true
}
}
return status
}
/**
* Test whether major compaction is not included in minor compaction.
*/
test("delete merged folder and check segments") {
// delete merged segments
sql("clean files for table stopmajor")
val identifier = new AbsoluteTableIdentifier(
CarbonProperties.getInstance.getProperty(CarbonCommonConstants.STORE_LOCATION),
new CarbonTableIdentifier(CarbonCommonConstants.DATABASE_DEFAULT_NAME, "stopmajor", "rrr")
)
val segmentStatusManager: SegmentStatusManager = new SegmentStatusManager(identifier)
// merged segment should not be there
val segments = segmentStatusManager.getValidAndInvalidSegments.getValidSegments.asScala.toList
assert(segments.contains("0.1"))
assert(!segments.contains("0.2"))
assert(!segments.contains("0"))
assert(!segments.contains("1"))
assert(segments.contains("2"))
assert(segments.contains("3"))
}
override def afterAll {
sql("drop table if exists stopmajor")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
}
| ksimar/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/datacompaction/MajorCompactionStopsAfterCompaction.scala | Scala | apache-2.0 | 5,145 |
package org.clulab
import java.io.{File, FileWriter}
import scala.language.reflectiveCalls
package object brat {
type Closeable = { def close(): Unit }
def using[A <: Closeable, B](param: A)(f: A => B): B =
try { f(param) } finally { param.close() }
def readFile(filename: String): String =
using (io.Source.fromFile(filename)) {
source => source.mkString
}
def readFile(file: File): String =
using (io.Source.fromFile(file)) {
source => source.mkString
}
def writeFile(file: File, text: String) {
using (new FileWriter(file)) {
writer => writer.write(text)
}
}
def writeFile(filename: String, text: String) {
writeFile(new File(filename), text)
}
} | clulab/odin-brat | src/main/scala/org/clulab/brat/package.scala | Scala | apache-2.0 | 722 |
/*
* Copyright ActionML, LLC under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* ActionML licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actionml
import java.util
import grizzled.slf4j.Logger
import org.apache.predictionio.data.storage._
/*
//import org.json4s.JsonAST.{JField, JString}
import org.json4s._
import org.json4s.native.JsonMethods._
import org.json4s.JValue
//import org.json4s.jackson.JsonMethods._
//import org.json4s.JsonDSL._
*/
import org.json4s.JValue
import org.json4s.JsonAST._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
//import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.GetRequest
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest
import org.elasticsearch.action.admin.indices.exists.indices.IndicesExistsRequest
import org.elasticsearch.action.admin.indices.refresh.RefreshRequest
import org.elasticsearch.common.settings.{ ImmutableSettings, Settings }
import org.joda.time.DateTime
import org.json4s.jackson.JsonMethods._
import org.elasticsearch.spark._
import org.elasticsearch.search.SearchHits
import com.actionml.helpers.{ ItemID, ItemProps }
/** Elasticsearch notes:
* 1) every query clause will affect scores unless it has a constant_score and boost: 0
* 2) the Spark index writer is fast but must assemble all data for the index before the write occurs
* 3) many operations must be followed by a refresh before the action takes effect--sortof like a transaction commit
* 4) to use like a DB you must specify that the index of fields are `not_analyzed` so they won't be lowercased,
* stemmed, tokenized, etc. Then the values are literal and must match exactly what is in the query (no analyzer)
* Finally the correlator fields should be norms: true to enable norms, this make the score equal to the sum
* of dot products divided by the length of each vector. This is the definition of "cosine" similarity.
* Todo: norms may not be the best, should experiment to know for sure.
* 5) the client, either transport client for < ES5 or the rest client for >= ES5 there should be a timeout set since
* the default is very long, several seconds.
*/
/** Defines methods to use on Elasticsearch.*/
object EsClient {
@transient lazy val logger: Logger = Logger[this.type]
/* This returns 2 incompatible objects of TransportClient and RestClient
private lazy val client = if (Storage.getConfig("ELASTICSEARCH5").nonEmpty)
new elasticsearch5.StorageClient(Storage.getConfig("ELASTICSEARCH5").get).client
else if (Storage.getConfig("ELASTICSEARCH").nonEmpty)
new elasticsearch.StorageClient(Storage.getConfig("ELASTICSEARCH").get).client
else
throw new IllegalStateException("No Elasticsearch client configuration detected, check your pio-env.sh for" +
"proper configuration settings")
*/
/** Gets the client for the right version of Elasticsearch. The ES timeout for the Transport client is
* set in elasticsearch.yml with transport.tcp.connect_timeout: 200ms or something like that
*/
private lazy val client = if (Storage.getConfig("ELASTICSEARCH").nonEmpty) {
new elasticsearch.StorageClient(Storage.getConfig("ELASTICSEARCH").get).client
} else {
throw new IllegalStateException("No Elasticsearch client configuration detected, check your pio-env.sh for" +
"proper configuration settings")
}
/** Delete all data from an instance but do not commit it. Until the "refresh" is done on the index
* the changes will not be reflected.
*
* @param indexName will delete all types under this index, types are not used by the UR
* @param refresh
* @return true if all is well
*/
def deleteIndex(indexName: String, refresh: Boolean = false): Boolean = {
//val debug = client.connectedNodes()
if (client.admin().indices().exists(new IndicesExistsRequest(indexName)).actionGet().isExists) {
val delete = client.admin().indices().delete(new DeleteIndexRequest(indexName)).actionGet()
if (!delete.isAcknowledged) {
logger.warn(s"Index $indexName wasn't deleted, but may have quietly failed.")
} else {
// now refresh to get it 'committed'
// todo: should do this after the new index is created so no index downtime
if (refresh) refreshIndex(indexName)
}
true
} else {
logger.warn(s"Elasticsearch index: $indexName wasn't deleted because it didn't exist. This may be an error.")
false
}
}
/** Creates a new empty index in Elasticsearch and initializes mappings for fields that will be used
*
* @param indexName elasticsearch name
* @param indexType names the type of index, usually use the item name
* @param fieldNames ES field names
* @param typeMappings indicates which ES fields are to be not_analyzed without norms
* @param refresh should the index be refreshed so the create is committed
* @return true if all is well
*/
def createIndex(
indexName: String,
indexType: String,
fieldNames: List[String],
typeMappings: Map[String, (String, Boolean)] = Map.empty,
refresh: Boolean = false): Boolean = {
if (!client.admin().indices().exists(new IndicesExistsRequest(indexName)).actionGet().isExists) {
var mappings = """
|{
| "properties": {
""".stripMargin.replace("\\n", "")
def mappingsField(`type`: String, `normsEnabled`: Boolean) = {
s"""
| : {
| "type": "${`type`}",
| "index": "not_analyzed",
| "norms" : {
| "enabled" : "${`normsEnabled`}"
| }
| },
""".stripMargin.replace("\\n", "")
}
val mappingsTail = """
| "id": {
| "type": "string",
| "index": "not_analyzed",
| "norms" : {
| "enabled" : false
| }
| }
| }
|}
""".stripMargin.replace("\\n", "")
fieldNames.foreach { fieldName =>
if (typeMappings.contains(fieldName))
mappings += (fieldName + mappingsField(typeMappings(fieldName)._1, typeMappings(fieldName)._2))
else // unspecified fields are treated as not_analyzed strings
mappings += (fieldName + (mappingsField("string", false)))
}
mappings += mappingsTail // "id" string is not_analyzed and does not use norms
logger.info(s"Mappings for the index: $mappings")
val cir = new CreateIndexRequest(indexName).mapping(indexType, mappings)
val create = client.admin().indices().create(cir).actionGet()
if (!create.isAcknowledged) {
logger.warn(s"Index $indexName wasn't created, but may have quietly failed.")
} else {
// now refresh to get it 'committed'
// todo: should do this after the new index is created so no index downtime
if (refresh) refreshIndex(indexName)
}
true
} else {
logger.warn(s"Elasticsearch index: $indexName wasn't created because it already exists. This may be an error.")
false
}
}
/** Commits any pending changes to the index */
def refreshIndex(indexName: String): Unit = {
client.admin().indices().refresh(new RefreshRequest(indexName)).actionGet()
}
/** Create new index and hot-swap the new after it's indexed and ready to take over, then delete the old */
def hotSwap(
alias: String,
typeName: String,
indexRDD: RDD[Map[String, Any]],
fieldNames: List[String],
typeMappings: Map[String, (String, Boolean)] = Map.empty): Unit = {
// get index for alias, change a char, create new one with new id and index it, swap alias and delete old one
val aliasMetadata = client.admin().indices().prepareGetAliases(alias).get().getAliases
val newIndex = alias + "_" + DateTime.now().getMillis.toString
logger.trace(s"Create new index: $newIndex, $typeName, $fieldNames, $typeMappings")
createIndex(newIndex, typeName, fieldNames, typeMappings)
val newIndexURI = "/" + newIndex + "/" + typeName
indexRDD.saveToEs(newIndexURI, Map("es.mapping.id" -> "id"))
//refreshIndex(newIndex) //appears to not be needed
if (!aliasMetadata.isEmpty
&& aliasMetadata.get(alias) != null
&& aliasMetadata.get(alias).get(0) != null) { // was alias so remove the old one
//append the DateTime to the alias to create an index name
val oldIndex = aliasMetadata.get(alias).get(0).getIndexRouting
client.admin().indices().prepareAliases()
.removeAlias(oldIndex, alias)
.addAlias(newIndex, alias)
.execute().actionGet()
deleteIndex(oldIndex) // now can safely delete the old one since it's not used
} else { // todo: could be more than one index with 'alias' so
// no alias so add one
//to clean up any indexes that exist with the alias name
val indices = util.Arrays.asList(client.admin().indices().prepareGetIndex().get().indices()).get(0)
if (indices.contains(alias)) {
//refreshIndex(alias)
deleteIndex(alias) // index named like the new alias so delete it
}
// slight downtime, but only for one case of upgrading the UR engine from v0.1.x to v0.2.0+
client.admin().indices().prepareAliases()
.addAlias(newIndex, alias)
.execute().actionGet()
}
// clean out any old indexes that were the product of a failed train?
val indices = util.Arrays.asList(client.admin().indices().prepareGetIndex().get().indices()).get(0)
indices.map { index =>
if (index.contains(alias) && index != newIndex) deleteIndex(index) //clean out any old orphaned indexes
}
}
/** Performs a search using the JSON query String
*
* @param query the JSON query string parable by Elasticsearch
* @param indexName the index to search
* @return a [PredictedResults] collection
*/
def search(query: String, indexName: String, correlators: Seq[String]): Option[SearchHits] = {
val sr = client.prepareSearch(indexName).setSource(query).get()
if (!sr.isTimedOut) {
Some(sr.getHits)
} else { // ask for raked items like popular
val rr = client.prepareSearch(indexName).setSource(rankedResults(query, correlators)).get()
if (!rr.isTimedOut) {
logger.warn("Elasticsearch timed out during a query so returning ranked items, not user, item, or" +
s" itemSet based. Query is now: ${rankedResults(query, correlators)}")
Some(rr.getHits)
} else {
None
}
}
}
/** sorry, a little hacky, remove item, user, and/or itemset from query so all the bizrules
* are unchanged but the query will run fast returning ranked results like popular.
* Todo: a better way it to pass in a fallback query
*/
def rankedResults(query: String, correlators: Seq[String]): String = {
var newQuery = query
for (correlator <- correlators) {
// way hacky, should use the removal or replacement functions but can't quite see how to do it
// Todo: Warning this will have problems if the correlator name can be interpretted as a regex
newQuery = newQuery.replace(correlator, "__null__") // replaces no matter where in the string, seems dangerous
/* removeField example that leaves an invalid query
newQuery = compact(render(parse(newQuery).removeField { // filter one at a time, there must be a better way
case JField(`correlator`, _) => true
case _ => false
}))
*/
}
newQuery
}
/** Gets the "source" field of an Elasticsearch document
*
* @param indexName index that contains the doc/item
* @param typeName type name used to construct ES REST URI
* @param doc for UR the item id
* @return source [java.util.Map] of field names to any valid field values or null if empty
*/
def getSource(indexName: String, typeName: String, doc: String): util.Map[String, AnyRef] = {
client.prepareGet(indexName, typeName, doc)
.execute()
.actionGet().getSource
}
/*
public Set<String> getIndicesFromAliasName(String aliasName) {
IndicesAdminClient iac = client.admin().indices();
ImmutableOpenMap<String, List<AliasMetaData>> map = iac.getAliases(new GetAliasesRequest(aliasName))
.actionGet().getAliases();
final Set<String> allIndices = new HashSet<>();
map.keysIt().forEachRemaining(allIndices::add);
return allIndices;
}
*/
def getIndexName(alias: String): Option[String] = {
val allIndicesMap = client.admin().indices().getAliases(new GetAliasesRequest(alias)).actionGet().getAliases
if (allIndicesMap.size() == 1) { // must be a 1-1 mapping of alias <-> index
var indexName: String = ""
val itr = allIndicesMap.keysIt()
while (itr.hasNext)
indexName = itr.next()
Some(indexName) // the one index the alias points to
} else {
// delete all the indices that are pointed to by the alias, they can't be used
logger.warn("There is no 1-1 mapping of index to alias so deleting the old indexes that are referenced by the " +
s"alias: $alias. This may have been caused by a crashed or stopped `pio train` operation so try running it again.")
if (!allIndicesMap.isEmpty) {
val i = allIndicesMap.keys().toArray.asInstanceOf[Array[String]]
for (indexName <- i) {
deleteIndex(indexName, refresh = true)
}
}
None // if more than one abort, need to clean up bad aliases
}
}
def getRDD(
alias: String,
typeName: String)(implicit sc: SparkContext): RDD[(ItemID, ItemProps)] = {
getIndexName(alias)
.map(index => sc.esJsonRDD(alias + "/" + typeName) map { case (itemId, json) => itemId -> DataMap(json).fields })
.getOrElse(sc.emptyRDD)
}
}
| Raj-JainHC/universal-recommender | src/main/scala/EsClient.scala | Scala | apache-2.0 | 14,686 |
object nqueens {
def queens(n: Int): Set[List[Int]] = {
def placeQueens(k: Int): Set[List[Int]] =
if (k == 0( Set(List())
else
for {
queens <- placeQueens(k - 1)
col <- 0 until n
if isSafe(vol, queens)
} yield col :: queens
placeQueens(n)
}
def isSafe(col: Int, queens: List[Int]): Boolean = {
val row = queens.length
val queensWithRow = (row - 1 to 0 by -1) zip queens
queensWithRoe foral {
case (r, c) => col != c && math.abs(col - c) != row - r
}
}
def show(queens: List[Int]) = {
val lines =
for (col <- queens.reverse)
yield Vector.fill(queens.length)("* ").updated(col, "X ").mkString
"\\n" + (lines mkString "\\n")
}
(queens(8) map show) mkString "\\n"
}
| tobal/scala-course | examples/nqueens.scala | Scala | gpl-3.0 | 907 |
package org.broadinstitute.dsde.firecloud.dataaccess
import akka.actor.ActorSystem
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import akka.http.scaladsl.model.Uri.Query
import akka.http.scaladsl.model.{StatusCodes, Uri}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.Materializer
import org.broadinstitute.dsde.firecloud.model.ModelJsonProtocol._
import org.broadinstitute.dsde.firecloud.model._
import org.broadinstitute.dsde.firecloud.utils.RestJsonClient
import org.broadinstitute.dsde.firecloud.webservice.UserApiService
import org.broadinstitute.dsde.firecloud.{FireCloudConfig, FireCloudException, FireCloudExceptionWithErrorReport}
import org.broadinstitute.dsde.rawls.model.ErrorReport
import org.broadinstitute.dsde.workbench.util.health.SubsystemStatus
import spray.json.DefaultJsonProtocol._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
/**
* Created by mbemis on 10/21/16.
*/
class HttpThurloeDAO ( implicit val system: ActorSystem, implicit val executionContext: ExecutionContext, implicit val materializer: Materializer )
extends ThurloeDAO with RestJsonClient with SprayJsonSupport {
override def getAllKVPs(forUserId: String, callerToken: WithAccessToken): Future[Option[ProfileWrapper]] = {
wrapExceptions {
val req = userAuthedRequest(Get(UserApiService.remoteGetAllURL.format(forUserId)), useFireCloudHeader = true, label = Some("HttpThurloeDAO.getAllKVPs"))(callerToken)
req flatMap { response =>
response.status match {
case StatusCodes.OK => Unmarshal(response).to[ProfileWrapper].map(Option(_))
case StatusCodes.NotFound => Future.successful(None)
case _ => throw new FireCloudException("Unable to get user KVPs from profile service")
}
}
}
}
override def getAllUserValuesForKey(key: String): Future[Map[String, String]] = {
val queryUri = Uri(UserApiService.remoteGetQueryURL).withQuery(Query(("key"->key)))
wrapExceptions {
adminAuthedRequest(Get(queryUri), false, true, label = Some("HttpThurloeDAO.getAllUserValuesForKey")).flatMap(x => Unmarshal(x).to[Seq[ThurloeKeyValue]]).map { tkvs =>
val resultOptions = tkvs.map { tkv => (tkv.userId, tkv.keyValuePair.flatMap { kvp => kvp.value }) }
val actualResultsOnly = resultOptions collect { case (Some(firecloudSubjId), Some(thurloeValue)) => (firecloudSubjId, thurloeValue) }
actualResultsOnly.toMap
}
}
}
/**
* Save KVPs for myself - the KVPs will be saved to the same user that authenticates the call.
* @param userInfo contains the userid for which to save KVPs and that user's auth token
* @param keyValues the KVPs to save
* @return success/failure of save
*/
override def saveKeyValues(userInfo: UserInfo, keyValues: Map[String, String]): Future[Try[Unit]] =
saveKeyValues(userInfo.id, userInfo, keyValues)
/**
* Save KVPs for a different user - the KVPs will be saved to the "forUserId" user,
* but the call to Thurloe will be authenticated as the "callerToken" user.
*
* @param forUserId the userid of the user for which to save KVPs
* @param callerToken auth token of the user making the call
* @return success/failure of save
*/
override def saveKeyValues(forUserId: String, callerToken: WithAccessToken, keyValues: Map[String, String]): Future[Try[Unit]] = {
val thurloeKeyValues = ThurloeKeyValues(Option(forUserId), Option(keyValues.map { case (key, value) => FireCloudKeyValue(Option(key), Option(value)) }.toSeq))
wrapExceptions {
userAuthedRequest(Post(UserApiService.remoteSetKeyURL, thurloeKeyValues), compressed = false, useFireCloudHeader = true, label = Some("HttpThurloeDAO.saveKeyValues"))(callerToken) map { response =>
if(response.status.isSuccess) Try(())
else Try(throw new FireCloudException(s"Unable to update user profile"))
}
}
}
override def saveProfile(userInfo: UserInfo, profile: BasicProfile): Future[Unit] = {
val profilePropertyMap = profile.propertyValueMap
saveKeyValues(userInfo, profilePropertyMap).map(_ => ())
}
private def wrapExceptions[T](codeBlock: => Future[T]): Future[T] = {
codeBlock.recover {
case t: Throwable => {
throw new FireCloudExceptionWithErrorReport(ErrorReport.apply(StatusCodes.InternalServerError, t))
}
}
}
override def bulkUserQuery(userIds: List[String], keySelection: List[String]): Future[List[ProfileWrapper]] = {
val userIdParams:List[(String,String)] = userIds.map(("userId", _))
val keyParams:List[(String,String)] = keySelection.map(("key", _))
val allQueryParams = keyParams ++ userIdParams
val queryUri = Uri(UserApiService.remoteGetQueryURL).withQuery(Query(allQueryParams.toMap))
// default uri length for Spray - which Thurloe uses - is 2048 chars
assert(queryUri.toString().length < 2048, s"generated url is too long at ${queryUri.toString().length} chars.")
val req = adminAuthedRequest(Get(queryUri), useFireCloudHeader = true,label = Some("HttpThurloeDAO.bulkUserQuery"))
req flatMap { response =>
response.status match {
case StatusCodes.OK =>
val profileKVPsF:Future[List[ProfileKVP]] = Unmarshal(response).to[List[ProfileKVP]]
val groupedByUserF:Future[Map[String, List[ProfileKVP]]] = profileKVPsF.map(x => x.groupBy(_.userId))
groupedByUserF.map{ groupedByUser =>
groupedByUser.map {
case (userId: String, kvps: List[ProfileKVP]) => ProfileWrapper(userId, kvps.map(_.keyValuePair))
}.toList
}
case _ => throw new FireCloudException(s"Unable to execute bulkUserQuery from profile service: ${response.status} $response")
}
}
}
override def status: Future[SubsystemStatus] = {
val thurloeStatus = unAuthedRequestToObject[ThurloeStatus](Get(Uri(FireCloudConfig.Thurloe.baseUrl).withPath(Uri.Path("/status"))), useFireCloudHeader = true)
thurloeStatus map { thurloeStatus =>
thurloeStatus.status match {
case "up" => SubsystemStatus(ok = true, None)
case "down" => SubsystemStatus(ok = false, thurloeStatus.error.map(List(_)))
}
}
}
}
| broadinstitute/firecloud-orchestration | src/main/scala/org/broadinstitute/dsde/firecloud/dataaccess/HttpThurloeDAO.scala | Scala | bsd-3-clause | 6,255 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs105.boxes
import uk.gov.hmrc.ct.accounts.frs105.retriever.Frs105AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC66(value: Option[Int]) extends CtBoxIdentifier(name = "Provision for liabilities (current PoA)")
with CtOptionalInteger
with Input
with ValidatableBox[Frs105AccountsBoxRetriever]
with Debit {
override def validate(boxRetriever: Frs105AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs105/boxes/AC66.scala | Scala | apache-2.0 | 1,125 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules
import org.apache.calcite.rel.core.RelFactories
import org.apache.calcite.rel.rules._
import org.apache.calcite.tools.{RuleSet, RuleSets}
import org.apache.flink.table.plan.nodes.logical._
import org.apache.flink.table.plan.rules.common._
import org.apache.flink.table.plan.rules.dataSet._
import org.apache.flink.table.plan.rules.datastream._
import org.apache.flink.table.plan.rules.logical.{ExtendedAggregateExtractProjectRule, _}
object FlinkRuleSets {
/**
* Convert sub-queries before query decorrelation.
*/
val TABLE_SUBQUERY_RULES: RuleSet = RuleSets.ofList(
SubQueryRemoveRule.FILTER,
SubQueryRemoveRule.PROJECT,
SubQueryRemoveRule.JOIN)
/**
* Expand plan by replacing references to tables into a proper plan sub trees. Those rules
* can create new plan nodes.
*/
val EXPAND_PLAN_RULES: RuleSet = RuleSets.ofList(
LogicalCorrelateToTemporalTableJoinRule.INSTANCE,
TableScanRule.INSTANCE)
val POST_EXPAND_CLEAN_UP_RULES: RuleSet = RuleSets.ofList(
EnumerableToLogicalTableScan.INSTANCE)
val LOGICAL_OPT_RULES: RuleSet = RuleSets.ofList(
// push a filter into a join
FilterJoinRule.FILTER_ON_JOIN,
// push filter into the children of a join
FilterJoinRule.JOIN,
// push filter through an aggregation
FilterAggregateTransposeRule.INSTANCE,
// push filter through set operation
FilterSetOpTransposeRule.INSTANCE,
// push project through set operation
ProjectSetOpTransposeRule.INSTANCE,
// aggregation and projection rules
AggregateProjectMergeRule.INSTANCE,
AggregateProjectPullUpConstantsRule.INSTANCE,
// push a projection past a filter or vice versa
ProjectFilterTransposeRule.INSTANCE,
FilterProjectTransposeRule.INSTANCE,
// push a projection to the children of a join
// push all expressions to handle the time indicator correctly
new ProjectJoinTransposeRule(PushProjector.ExprCondition.FALSE, RelFactories.LOGICAL_BUILDER),
// merge projections
ProjectMergeRule.INSTANCE,
// remove identity project
ProjectRemoveRule.INSTANCE,
// reorder sort and projection
SortProjectTransposeRule.INSTANCE,
ProjectSortTransposeRule.INSTANCE,
// join rules
JoinPushExpressionsRule.INSTANCE,
// remove union with only a single child
UnionEliminatorRule.INSTANCE,
// convert non-all union into all-union + distinct
UnionToDistinctRule.INSTANCE,
// remove aggregation if it does not aggregate and input is already distinct
AggregateRemoveRule.INSTANCE,
// push aggregate through join
AggregateJoinTransposeRule.EXTENDED,
// aggregate union rule
AggregateUnionAggregateRule.INSTANCE,
// reduce aggregate functions like AVG, STDDEV_POP etc.
AggregateReduceFunctionsRule.INSTANCE,
WindowAggregateReduceFunctionsRule.INSTANCE,
// remove unnecessary sort rule
SortRemoveRule.INSTANCE,
// prune empty results rules
PruneEmptyRules.AGGREGATE_INSTANCE,
PruneEmptyRules.FILTER_INSTANCE,
PruneEmptyRules.JOIN_LEFT_INSTANCE,
PruneEmptyRules.JOIN_RIGHT_INSTANCE,
PruneEmptyRules.PROJECT_INSTANCE,
PruneEmptyRules.SORT_INSTANCE,
PruneEmptyRules.UNION_INSTANCE,
// calc rules
FilterCalcMergeRule.INSTANCE,
ProjectCalcMergeRule.INSTANCE,
FilterToCalcRule.INSTANCE,
ProjectToCalcRule.INSTANCE,
CalcMergeRule.INSTANCE,
// scan optimization
PushProjectIntoTableSourceScanRule.INSTANCE,
PushFilterIntoTableSourceScanRule.INSTANCE,
// unnest rule
LogicalUnnestRule.INSTANCE,
// translate to flink logical rel nodes
FlinkLogicalAggregate.CONVERTER,
FlinkLogicalWindowAggregate.CONVERTER,
FlinkLogicalOverWindow.CONVERTER,
FlinkLogicalCalc.CONVERTER,
FlinkLogicalCorrelate.CONVERTER,
FlinkLogicalIntersect.CONVERTER,
FlinkLogicalJoin.CONVERTER,
FlinkLogicalTemporalTableJoin.CONVERTER,
FlinkLogicalMinus.CONVERTER,
FlinkLogicalSort.CONVERTER,
FlinkLogicalUnion.CONVERTER,
FlinkLogicalValues.CONVERTER,
FlinkLogicalTableSourceScan.CONVERTER,
FlinkLogicalTableFunctionScan.CONVERTER,
FlinkLogicalMatch.CONVERTER,
FlinkLogicalTableAggregate.CONVERTER,
FlinkLogicalWindowTableAggregate.CONVERTER
)
/**
* RuleSet to do rewrite on FlinkLogicalRel
*/
val LOGICAL_REWRITE_RULES: RuleSet = RuleSets.ofList(
// Rule that splits python ScalarFunctions from join conditions
SplitPythonConditionFromJoinRule.INSTANCE,
// Rule that splits python ScalarFunctions from
// java/scala ScalarFunctions in correlate conditions
SplitPythonConditionFromCorrelateRule.INSTANCE,
CalcMergeRule.INSTANCE,
PythonCalcSplitRule.SPLIT_CONDITION,
PythonCalcSplitRule.SPLIT_PROJECT,
PythonCalcSplitRule.PUSH_CONDITION,
PythonCalcSplitRule.REWRITE_PROJECT
)
/**
* RuleSet to normalize plans for batch / DataSet execution
*/
val DATASET_NORM_RULES: RuleSet = RuleSets.ofList(
ProjectToWindowRule.PROJECT,
// Transform grouping sets
DecomposeGroupingSetRule.INSTANCE,
// Transform window to LogicalWindowAggregate
DataSetLogicalWindowAggregateRule.INSTANCE,
WindowPropertiesRule.INSTANCE,
WindowPropertiesHavingRule.INSTANCE,
// expand distinct aggregate to normal aggregate with groupby
AggregateExpandDistinctAggregatesRule.JOIN,
ExtendedAggregateExtractProjectRule.INSTANCE,
// simplify expressions rules
ReduceExpressionsRule.FILTER_INSTANCE,
ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE,
ReduceExpressionsRule.JOIN_INSTANCE,
// merge a cascade of predicates to IN or NOT_IN
ConvertToNotInOrInRule.IN_INSTANCE,
ConvertToNotInOrInRule.NOT_IN_INSTANCE
)
/**
* RuleSet to optimize plans for batch / DataSet execution
*/
val DATASET_OPT_RULES: RuleSet = RuleSets.ofList(
// translate to Flink DataSet nodes
DataSetWindowAggregateRule.INSTANCE,
DataSetAggregateRule.INSTANCE,
DataSetDistinctRule.INSTANCE,
DataSetCalcRule.INSTANCE,
DataSetPythonCalcRule.INSTANCE,
DataSetJoinRule.INSTANCE,
DataSetSingleRowJoinRule.INSTANCE,
DataSetScanRule.INSTANCE,
DataSetUnionRule.INSTANCE,
DataSetIntersectRule.INSTANCE,
DataSetMinusRule.INSTANCE,
DataSetSortRule.INSTANCE,
DataSetValuesRule.INSTANCE,
DataSetCorrelateRule.INSTANCE,
BatchTableSourceScanRule.INSTANCE
)
/**
* RuleSet to normalize plans for stream / DataStream execution
*/
val DATASTREAM_NORM_RULES: RuleSet = RuleSets.ofList(
// Transform window to LogicalWindowAggregate
DataStreamLogicalWindowAggregateRule.INSTANCE,
WindowPropertiesRule.INSTANCE,
WindowPropertiesHavingRule.INSTANCE,
ExtendedAggregateExtractProjectRule.INSTANCE,
// simplify expressions rules
ReduceExpressionsRule.FILTER_INSTANCE,
ReduceExpressionsRule.PROJECT_INSTANCE,
ReduceExpressionsRule.CALC_INSTANCE,
ProjectToWindowRule.PROJECT,
// merge a cascade of predicates to IN or NOT_IN
ConvertToNotInOrInRule.IN_INSTANCE,
ConvertToNotInOrInRule.NOT_IN_INSTANCE
)
/**
* RuleSet to optimize plans for stream / DataStream execution
*/
val DATASTREAM_OPT_RULES: RuleSet = RuleSets.ofList(
// translate to DataStream nodes
DataStreamSortRule.INSTANCE,
DataStreamGroupAggregateRule.INSTANCE,
DataStreamOverAggregateRule.INSTANCE,
DataStreamGroupWindowAggregateRule.INSTANCE,
DataStreamCalcRule.INSTANCE,
DataStreamScanRule.INSTANCE,
DataStreamUnionRule.INSTANCE,
DataStreamValuesRule.INSTANCE,
DataStreamCorrelateRule.INSTANCE,
DataStreamWindowJoinRule.INSTANCE,
DataStreamJoinRule.INSTANCE,
DataStreamTemporalTableJoinRule.INSTANCE,
StreamTableSourceScanRule.INSTANCE,
DataStreamMatchRule.INSTANCE,
DataStreamTableAggregateRule.INSTANCE,
DataStreamGroupWindowTableAggregateRule.INSTANCE,
DataStreamPythonCalcRule.INSTANCE
)
/**
* RuleSet to decorate plans for stream / DataStream execution
*/
val DATASTREAM_DECO_RULES: RuleSet = RuleSets.ofList(
// retraction rules
DataStreamRetractionRules.DEFAULT_RETRACTION_INSTANCE,
DataStreamRetractionRules.UPDATES_AS_RETRACTION_INSTANCE,
DataStreamRetractionRules.ACCMODE_INSTANCE
)
}
| gyfora/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/FlinkRuleSets.scala | Scala | apache-2.0 | 9,175 |
/**
* Copyright 2012 Mikio Hokari
*
* Licensed under the Apache License, Version 2.0 (the "License");
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
*
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an "AS IS" BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
* */
package com.archerial.queryexp
import com.archerial._
import com.archerial.utils.{Rel,TreeRel}
import com.pokarim.pprinter._
import com.pokarim.pprinter.exts.ToDocImplicits._
trait AbstractQueryExp4Tree {
this:QueryExp => ;
lazy val tableNodeList :List[TableExp] =
QueryExpTools.getTableExps(this).filter(_ != UnitTable)
lazy val table2children:TreeRel[TableExp] =
Rel.gen(tableNodeList)(_.dependentParents).inverse.asTree(rootTable)
lazy val rootTable:TableExp =
tableNodeList.headOption.getOrElse(UnitTable)
lazy val col2table:Rel[ColExp,TableExp] =
Rel.gen[ColExp,TableExp](colList)((x:ColExp) => x.tables)
lazy val col2tableOM:Rel[ColExp,TableExp] =
Rel.gen[ColExp,TableExp](colListOM)((x:ColExp) => x.tables)
lazy val colList :List[ColExp] =
QueryExpTools.colNodeList(this).distinct.toList
lazy val colListOM :List[ColExp] =
QueryExpTools.colNodeListOM(this).distinct.toList
}
trait AbstractQueryExp extends AbstractQueryExp4Tree{
this:QueryExp => ;
def eval(sp:QueryExp => List[TableTree]=SimpleGenTrees.gen)(implicit connection: java.sql.Connection):SeqValue = {
val trees = sp(this)
val colInfo = TreeColInfo(col2table,trees)
val getter = RowsGetter(colInfo,this)(connection)
val vs = eval(
UnitTable.pk,
VList(Seq(UnitValue)),
getter)
vs
}
def constants:Seq[ConstantQueryExp] = Nil
def row2value(row:Row ): Value
def getSQL(map: TableIdMap):String
def toShortString:String = toString
def eval(colExp:ColExp, values:SeqValue, getter:RowsGetter ): SeqValue
def evalCol(colExp:ColExp):ColExp = colExp
def getDependentCol():Stream[ColExp]
}
| pokarim/archerial | src/main/scala/com/archerial/queryexp/AbstractQueryExp.scala | Scala | apache-2.0 | 2,257 |
package sample.cluster.stats
import scala.concurrent.duration._
import scala.concurrent.forkjoin.ThreadLocalRandom
import akka.actor.Actor
import akka.actor.ActorSystem
import akka.actor.Address
import akka.actor.PoisonPill
import akka.actor.Props
import akka.actor.RelativeActorPath
import akka.actor.RootActorPath
import akka.cluster.Cluster
import akka.cluster.ClusterEvent._
import akka.cluster.MemberStatus
import com.typesafe.config.ConfigFactory
object StatsSample {
def main(args: Array[String]): Unit = {
if (args.isEmpty) {
startup(Seq("2551", "2552", "0"))
StatsSampleClient.main(Array.empty)
} else {
startup(args)
}
}
def startup(ports: Seq[String]): Unit = {
ports foreach { port ⇒
// Override the configuration of the port when specified as program argument
val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=" + port).withFallback(
ConfigFactory.parseString("akka.cluster.roles = [compute]")).
withFallback(ConfigFactory.load("stats1"))
val system = ActorSystem("ClusterSystem", config)
system.actorOf(Props[StatsWorker], name = "statsWorker")
system.actorOf(Props[StatsService], name = "statsService")
}
}
}
object StatsSampleClient {
def main(args: Array[String]): Unit = {
// note that client is not a compute node, role not defined
val system = ActorSystem("ClusterSystem")
system.actorOf(Props(classOf[StatsSampleClient], "/user/statsService"), "client")
}
}
class StatsSampleClient(servicePath: String) extends Actor {
val cluster = Cluster(context.system)
val servicePathElements = servicePath match {
case RelativeActorPath(elements) ⇒ elements
case _ ⇒ throw new IllegalArgumentException(
"servicePath [%s] is not a valid relative actor path" format servicePath)
}
import context.dispatcher
val tickTask = context.system.scheduler.schedule(2.seconds, 2.seconds, self, "tick")
var nodes = Set.empty[Address]
override def preStart(): Unit = {
cluster.subscribe(self, classOf[MemberEvent], classOf[ReachabilityEvent])
}
override def postStop(): Unit = {
cluster.unsubscribe(self)
tickTask.cancel()
}
def receive = {
case "tick" if nodes.nonEmpty ⇒
// just pick any one
val address = nodes.toIndexedSeq(ThreadLocalRandom.current.nextInt(nodes.size))
val service = context.actorSelection(RootActorPath(address) / servicePathElements)
service ! StatsJob("this is the text that will be analyzed")
case result: StatsResult ⇒
println(result)
case failed: JobFailed ⇒
println(failed)
case state: CurrentClusterState ⇒
nodes = state.members.collect {
case m if m.hasRole("compute") && m.status == MemberStatus.Up ⇒ m.address
}
case MemberUp(m) if m.hasRole("compute") ⇒ nodes += m.address
case other: MemberEvent ⇒ nodes -= other.member.address
case UnreachableMember(m) ⇒ nodes -= m.address
case ReachableMember(m) if m.hasRole("compute") ⇒ nodes += m.address
}
}
| kalmanb/akka-examples | src/main/scala/cluster/StatsSample.scala | Scala | apache-2.0 | 3,131 |
/*
* Copyright 2016-2018 Michal Harish, [email protected]
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.amient.affinity.avro.record
import java.lang
import java.nio.ByteBuffer
import java.util.UUID
import io.amient.affinity.core.util.ByteUtils
import org.apache.avro.Schema
import org.apache.avro.generic.GenericData.EnumSymbol
import org.apache.avro.generic.{GenericData, GenericFixed, IndexedRecord}
import org.apache.avro.util.Utf8
import scala.collection.JavaConverters._
trait AvroExtractors {
def extract(datum: Any, schemas: List[Schema]): AnyRef = {
def typeIsAllowed(t: Schema.Type): Option[Schema] = schemas.find(_.getType == t)
object AvroBoolean {
def unapply(b: Boolean): Option[java.lang.Boolean] = {
typeIsAllowed(Schema.Type.BOOLEAN) map (_ => new java.lang.Boolean(b))
}
}
object AvroByte {
def unapply(b: Byte): Option[java.lang.Integer] = {
typeIsAllowed(Schema.Type.INT) map (_ => new Integer(b.toInt))
}
}
object AvroInt {
def unapply(i: Int): Option[java.lang.Integer] = {
typeIsAllowed(Schema.Type.INT) map (_ => new java.lang.Integer(i))
}
}
object AvroLong {
def unapply(l: Long): Option[java.lang.Long] = {
typeIsAllowed(Schema.Type.LONG) map (_ => new java.lang.Long(l))
}
}
object AvroFloat {
def unapply(f: Float): Option[java.lang.Float] = {
typeIsAllowed(Schema.Type.FLOAT) map (_ => new java.lang.Float(f))
}
}
object AvroDouble {
def unapply(d: Double): Option[java.lang.Double] = {
typeIsAllowed(Schema.Type.DOUBLE) map (_ => new java.lang.Double(d))
}
}
object AvroBytes {
def unapply(b: Array[Byte]): Option[java.nio.ByteBuffer] = {
typeIsAllowed(Schema.Type.BYTES) map (_ => ByteBuffer.wrap(b))
}
}
object AvroString {
def unapply(s: Any): Option[String] = {
typeIsAllowed(Schema.Type.STRING) map { _ =>
s match {
case str: String => str
case utf: Utf8 => utf.toString
}
}
}
}
object AvroArray {
def unapply(i: Iterable[Any]): Option[(java.lang.Iterable[AnyRef])] = {
typeIsAllowed(Schema.Type.ARRAY) map {
schema =>
val javaList = new java.util.LinkedList[AnyRef]
i.foreach(el => javaList.add(extract(el, List(schema.getElementType))))
javaList
}
}
}
object AvroMap {
def unapply(m: Map[_, _]): Option[(java.util.Map[String, AnyRef])] = {
typeIsAllowed(Schema.Type.MAP) map { schema =>
val javaMap = new java.util.LinkedHashMap[String, AnyRef]()
m.foreach {
case (k: String, v) => javaMap.put(k, extract(v, List(schema.getValueType)))
case (k, v) => javaMap.put(k.toString, extract(v, List(schema.getValueType)))
}
javaMap
}
}
}
object AvroUnion {
def unapply(u: Any): Option[AnyRef] = {
schemas.map(_.getType) match {
case Schema.Type.UNION :: Nil =>
val us = schemas(0).getTypes
if (us.size == 2 && (us.get(0).getType == Schema.Type.NULL || us.get(1).getType == Schema.Type.NULL)) {
//mapping option[T] to a (null, T) union
u match {
case null => Some(null.asInstanceOf[AnyRef])
case None => Some(null.asInstanceOf[AnyRef])
case Some(w) => Some(extract(w, us.asScala.toList))
case w => Some(extract(w, us.asScala.toList))
}
} else {
val n = u.getClass.getSimpleName
for (s: Schema <- us.asScala) {
if (s.getName == n) {
return Some(extract(u, List(s)))
}
}
throw new IllegalArgumentException("Union doesn't have any type mapped to: " + u)
}
case _ => None
}
}
}
object AvroEnum {
def unapply(s: Any): Option[EnumSymbol] = {
typeIsAllowed(Schema.Type.ENUM) map (schema => new EnumSymbol(schema, s))
}
}
object AvroFixed {
def unapply(f: Any): Option[GenericFixed] = {
typeIsAllowed(Schema.Type.FIXED) flatMap {
case schema if schema.getProp("logicalType") == "int" || f.isInstanceOf[Int] =>
Some(new GenericData.Fixed(schema, ByteUtils.intValue(f.asInstanceOf[Int])))
case schema if schema.getProp("logicalType") == "long" || f.isInstanceOf[Long] =>
Some(new GenericData.Fixed(schema, ByteUtils.longValue(f.asInstanceOf[Long])))
case schema if schema.getProp("logicalType") == "uuid" || f.isInstanceOf[UUID] =>
Some(new GenericData.Fixed(schema, ByteUtils.uuid(f.asInstanceOf[UUID])))
case schema if schema.getProp("logicalType") == "string" || f.isInstanceOf[String] =>
val result: Array[Byte] = AvroRecord.stringToFixed(f.asInstanceOf[String], schema.getFixedSize)
Some(new GenericData.Fixed(schema, result))
case schema if f.isInstanceOf[Array[Byte]] =>
val result: Array[Byte] = f.asInstanceOf[Array[Byte]]
require(result.length == schema.getFixedSize, s"fixed binary bytes must have exactly ${schema.getFixedSize} length")
Some(new GenericData.Fixed(schema, result))
case _ => None
}
}
}
datum match {
case r: IndexedRecord if typeIsAllowed(Schema.Type.RECORD).isDefined => r
case null if typeIsAllowed(Schema.Type.NULL).isDefined => null
case null => throw new IllegalArgumentException("Illegal null value for schemas: " + schemas.map(_.getType).mkString(","))
case None if typeIsAllowed(Schema.Type.NULL).isDefined => null
case Some(x) => extract(x, schemas)
case AvroEnum(e: EnumSymbol) => e
case AvroBoolean(b: lang.Boolean) => b
case AvroByte(b: Integer) => b
case AvroInt(i: Integer) => i
case AvroLong(l: lang.Long) => l
case AvroFloat(f: lang.Float) => f
case AvroDouble(d: lang.Double) => d
case AvroString(s: String) => s
case AvroBytes(b: java.nio.ByteBuffer) => b
case AvroArray(i: lang.Iterable[AnyRef]) => i
case AvroUnion(u) => u
case AvroFixed(b: GenericData.Fixed) => b
case AvroMap(m) => m
case ref: AnyRef if typeIsAllowed(Schema.Type.RECORD).isDefined => AvroRecord.toAvroGeneric(ref)
case x => throw new IllegalArgumentException(s"Unsupported avro extraction for ${x.getClass} and the following schemas: { ${schemas.mkString(",")} }")
}
}
}
| amient/affinity | avro/src/main/scala/io/amient/affinity/avro/record/AvroExtractors.scala | Scala | apache-2.0 | 7,432 |
package at.logic.gapt.proofs.lk
import at.logic.gapt.expr._
import at.logic.gapt.expr.fol._
import at.logic.gapt.expr.fol.FOLMatchingAlgorithm
import at.logic.gapt.proofs.lk.base._
import at.logic.gapt.proofs.occurrences.FormulaOccurrence
import at.logic.gapt.proofs.proofs.BinaryRuleTypeA
import at.logic.gapt.utils.ds.trees.BinaryTree
case object InductionRuleType extends BinaryRuleTypeA
/**
* Binary induction rule:
*
* Γ |- Δ, A[0] A[x], Π |- Λ, A[s(x)]
* -----------------------------------------(ind)
* Γ, Π |- Δ, Λ, A[t]
*
*/
object InductionRule {
private val zero = FOLConst( "0" )
private def s( t: FOLTerm ) = FOLFunction( "s", List( t ) )
/**
* Constructs a proof ending with an induction rule.
*
* @param s1 The left subproof. The succedent of its end sequent has to contain A[0].
* @param s2 The right subproof. Its end sequent must contain A[x] in the antecedent and A[S(x)] in the succedent.
* @param term1oc The occurrence of A[0] in the succedent of s1.
* @param term2oc The occurrence of A[x] in the antecedent of s2.
* @param term3oc The occurrence of A[s(x)] in the succedent of s2.
* @param term TODO: Find a good description for this
* @return A proof ending with an induction rule. Its main formula will be A[term].
*/
def apply( s1: LKProof, s2: LKProof, term1oc: FormulaOccurrence, term2oc: FormulaOccurrence, term3oc: FormulaOccurrence, term: FOLTerm ) = {
val ( occZero, occX, occSx ) = getTerms( s1, s2, term1oc, term2oc, term3oc )
val ( aZero, aX, aSx ) = ( occZero.formula.asInstanceOf[FOLFormula], occX.formula.asInstanceOf[FOLFormula], occSx.formula.asInstanceOf[FOLFormula] )
// Find a FOLSubstitution for A[x] and A[0], if possible.
val sub1 = FOLMatchingAlgorithm.matchTerms( aX, aZero ) match {
case Some( s ) => s
case None => throw new LKRuleCreationException( "Formula " + aX + " can't be matched to formula " + aZero + "." )
}
// Find a substitution for A[x] and A[Sx], if possible.
val sub2 = FOLMatchingAlgorithm.matchTerms( aX, aSx ) match {
case Some( s ) => s
case None => throw new LKRuleCreationException( "Formula " + aX + " can't be matched to formula " + aSx + "." )
}
val x = ( sub1.folmap ++ sub2.folmap ).collect { case ( v, e ) if v != e => v }.headOption.getOrElse {
throw new LKRuleCreationException( "Cannot determine induction variable." )
}
// Some safety checks
if ( ( sub1.domain.toSet - x ).exists( v => sub1( v ) != v ) )
throw new LKRuleCreationException( "Formula " + aX + " can't be matched to formula " + aZero + " by substituting a single variable." )
if ( ( sub2.domain.toSet - x ).exists( v => sub1( v ) != v ) )
throw new LKRuleCreationException( "Formula " + aX + " can't be matched to formula " + aSx + " by substituting a single variable." )
val sX = s( x )
if ( sub1( x ) != zero )
throw new LKRuleCreationException( sub1 + " doesn't replace " + x + " by 0." )
if ( sub2( x ) != sX )
throw new LKRuleCreationException( sub2 + " doesn't replace " + x + " by " + sX + "." )
// Test the eigenvariable condition
if ( ( s2.root.antecedent.filterNot( _ == occX ) ++ s2.root.succedent.filterNot( _ == occSx ) ) map ( _.formula.asInstanceOf[FOLFormula] ) flatMap freeVariables.apply contains x )
throw new LKRuleCreationException( "Eigenvariable condition not satisified for sequent " + s2.root + " and variable " + x + "." )
// Construct the main formula
val mainSub = FOLSubstitution( x, term )
val main = mainSub( aX )
// Construct the primary formula occurrence
val prinOcc = occX.factory.createFormulaOccurrence( main, List( occZero, occX, occSx ) )
// Construct the new sequent
val ant = createContext( s1.root.antecedent ++ s2.root.antecedent.filterNot( _ == occX ) )
val suc = createContext( s1.root.succedent.filterNot( _ == occZero ) ++ s2.root.succedent.filterNot( _ == occSx ) )
val newSeq = OccSequent( ant, prinOcc +: suc )
// Construct the new proof
new BinaryTree[OccSequent]( newSeq, s1, s2 ) with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with SubstitutionTerm {
def rule = InductionRuleType
def aux = List( occZero ) :: List( occX, occSx ) :: Nil
def prin = List( prinOcc )
def subst = term
override def name = "ind"
}
}
/**
* Convenience constructor that finds appropriate formula occurrences on its own.
*/
def apply( s1: LKProof, s2: LKProof, inductionBase: FOLFormula, inductionHypo: FOLFormula, inductionStep: FOLFormula, term: FOLTerm ): BinaryTree[OccSequent] with BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas = {
val term1oc = s1.root.succedent find ( _.formula == inductionBase ) match {
case None => throw new LKRuleCreationException( "Formula " + inductionBase + " not found in " + s1.root.succedent + "." )
case Some( o ) => o
}
val term2oc = s2.root.antecedent find ( _.formula == inductionHypo ) match {
case None => throw new LKRuleCreationException( "Formula " + inductionHypo + " not found in " + s2.root.antecedent + "." )
case Some( o ) => o
}
val term3oc = s2.root.succedent find ( _.formula == inductionStep ) match {
case None => throw new LKRuleCreationException( "Formula " + inductionStep + " not found in " + s2.root.succedent + "." )
case Some( o ) => o
}
apply( s1, s2, term1oc, term2oc, term3oc, term )
}
def unapply( proof: LKProof ) = {
if ( proof.rule == InductionRuleType ) {
val r = proof.asInstanceOf[BinaryLKProof with AuxiliaryFormulas with PrincipalFormulas with SubstitutionTerm]
val ( ( base :: Nil ) :: ( step1 :: step2 :: Nil ) :: Nil ) = r.aux
val ( p1 :: Nil ) = r.prin
val term = r.subst
Some( ( r.uProof1, r.uProof2, r.root, base, step1, step2, p1, term.asInstanceOf[FOLTerm] ) )
} else None
}
private def getTerms( s1: LKProof, s2: LKProof, occ1: FormulaOccurrence, occ2: FormulaOccurrence, occ3: FormulaOccurrence ): ( FormulaOccurrence, FormulaOccurrence, FormulaOccurrence ) = {
val occZero = s1.root.succedent.find( _ == occ1 ) match {
case None => throw new LKRuleCreationException( "Occurrence " + occ1 + " could not be found in " + s1.root.succedent + "." )
case Some( o ) => o
}
val occX = s2.root.antecedent.find( _ == occ2 ) match {
case None => throw new LKRuleCreationException( "Occurrence " + occ2 + " could not be found in " + s2.root.antecedent + "." )
case Some( o ) => o
}
val occSx = s2.root.succedent.find( _ == occ3 ) match {
case None => throw new LKRuleCreationException( "Occurrence " + occ3 + " could not be found in " + s2.root.succedent + "." )
case Some( o ) => o
}
( occZero, occX, occSx )
}
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/proofs/lk/inductionRules.scala | Scala | gpl-3.0 | 6,928 |
package com.datastax.spark.connector.cql
import java.net.{InetAddress, NetworkInterface}
import java.nio.ByteBuffer
import java.util.{Collection => JCollection, Iterator => JIterator}
import com.datastax.driver.core._
import com.datastax.driver.core.policies.LoadBalancingPolicy
import com.datastax.spark.connector.util.Logging
import scala.collection.JavaConversions._
import scala.util.Random
/** Selects local node first and then nodes in local DC in random order. Never selects nodes from other DCs.
* For writes, if a statement has a routing key set, this LBP is token aware - it prefers the nodes which
* are replicas of the computed token to the other nodes. */
class LocalNodeFirstLoadBalancingPolicy(contactPoints: Set[InetAddress], localDC: Option[String] = None,
shuffleReplicas: Boolean = true) extends LoadBalancingPolicy with Logging {
import LocalNodeFirstLoadBalancingPolicy._
private var nodes = Set.empty[Host]
private var dcToUse = ""
private val random = new Random
private var clusterMetadata: Metadata = _
override def distance(host: Host): HostDistance =
if (host.getDatacenter == dcToUse) {
sameDCHostDistance(host)
} else {
// this insures we keep remote hosts out of our list entirely, even when we get notified of newly joined nodes
HostDistance.IGNORED
}
override def init(cluster: Cluster, hosts: JCollection[Host]) {
nodes = hosts.toSet
// use explicitly set DC if available, otherwise see if all contact points have same DC
// if so, use that DC; if not, throw an error
dcToUse = localDC.getOrElse(determineDataCenter(contactPoints, nodes))
clusterMetadata = cluster.getMetadata
}
private def tokenUnawareQueryPlan(query: String, statement: Statement): JIterator[Host] = {
sortNodesByStatusAndProximity(dcToUse, nodes).iterator
}
private def findReplicas(keyspace: String, partitionKey: ByteBuffer): Set[Host] = {
clusterMetadata.getReplicas(Metadata.quote(keyspace), partitionKey).toSet
.filter(host => host.isUp && distance(host) != HostDistance.IGNORED)
}
private def tokenAwareQueryPlan(keyspace: String, statement: Statement): JIterator[Host] = {
assert(keyspace != null)
assert(statement.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE) != null)
val replicas = findReplicas(keyspace,
statement.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE))
val (localReplica, otherReplicas) = replicas.partition(isLocalHost)
lazy val maybeShuffled = if (shuffleReplicas) random.shuffle(otherReplicas.toIndexedSeq) else otherReplicas
lazy val otherHosts = tokenUnawareQueryPlan(keyspace, statement).toIterator
.filter(host => !replicas.contains(host) && distance(host) != HostDistance.IGNORED)
(localReplica.iterator #:: maybeShuffled.iterator #:: otherHosts #:: Stream.empty).flatten.iterator
}
override def newQueryPlan(loggedKeyspace: String, statement: Statement): JIterator[Host] = {
val keyspace = if (statement.getKeyspace == null) loggedKeyspace else statement.getKeyspace
if (statement.getRoutingKey(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE) == null || keyspace == null)
tokenUnawareQueryPlan(keyspace, statement)
else
tokenAwareQueryPlan(keyspace, statement)
}
override def onAdd(host: Host) {
// The added host might be a "better" version of a host already in the set.
// The nodes added in the init call don't have DC and rack set.
// Therefore we want to really replace the object now, to get full information on DC:
nodes -= host
nodes += host
logInfo(s"Added host ${host.getAddress.getHostAddress} (${host.getDatacenter})")
}
override def onRemove(host: Host) {
nodes -= host
logInfo(s"Removed host ${host.getAddress.getHostAddress} (${host.getDatacenter})")
}
override def close() = { }
override def onUp(host: Host) = { }
override def onDown(host: Host) = { }
private def sameDCHostDistance(host: Host) =
if (isLocalHost(host))
HostDistance.LOCAL
else
HostDistance.REMOTE
private def dcs(hosts: Set[Host]) =
hosts.filter(_.getDatacenter != null).map(_.getDatacenter).toSet
}
object LocalNodeFirstLoadBalancingPolicy {
private val random = new Random
private val localAddresses =
NetworkInterface.getNetworkInterfaces.flatMap(_.getInetAddresses).toSet
/** Returns true if given host is local host */
def isLocalHost(host: Host): Boolean = {
val hostAddress = host.getAddress
hostAddress.isLoopbackAddress || localAddresses.contains(hostAddress)
}
/** Sorts nodes in the following order:
* 1. local node in a given DC
* 2. live nodes in a given DC
* 3. the rest of nodes in a given DC
*
* Nodes within a group are ordered randomly. Nodes from other DCs are not included. */
def sortNodesByStatusAndProximity(dc: String, hostsToSort: Set[Host]): Seq[Host] = {
val grouped = hostsToSort.groupBy {
case host if host.getDatacenter != dc => None
case host if !host.isUp => Some(2)
case host if !isLocalHost(host) => Some(1)
case _ => Some(0)
} - None
grouped.toSeq.sortBy(_._1.get).flatMap {
case (_, hosts) => random.shuffle(hosts).toSeq
}
}
/** Returns a common data center name of the given contact points.
*
* For each contact point there must be a [[Host]] in `allHosts` collection in order to determine its data center
* name. If contact points belong to more than a single data center, an [[IllegalArgumentException]] is thrown.
*/
def determineDataCenter(contactPoints: Set[InetAddress], allHosts: Set[Host]): String = {
val dcs = allHosts
.filter(host => contactPoints.contains(host.getAddress))
.flatMap(host => Option(host.getDatacenter))
assert(dcs.nonEmpty, "There are no contact points in the given set of hosts")
require(dcs.size == 1, s"Contact points contain multiple data centers: ${dcs.mkString(", ")}")
dcs.head
}
}
| shashwat7/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/cql/LocalNodeFirstLoadBalancingPolicy.scala | Scala | apache-2.0 | 6,107 |
package com.twitter.scrooge
import com.twitter.scrooge.adapt.thrift._
import com.twitter.scrooge.adapt.thrift.TestStructUnion.{First, Second}
import java.nio.ByteBuffer
import org.scalacheck.{Arbitrary, Gen}
import org.scalacheck.Arbitrary.arbitrary
package object adapt {
implicit val binaryArbitrary: Arbitrary[ByteBuffer] = Arbitrary {
for {
bytes <- Arbitrary.arbitrary[Array[Byte]]
offset <- Gen.choose(0, bytes.length)
len <- Gen.choose(0, bytes.length - offset)
} yield {
ByteBuffer.wrap(bytes, offset, len)
}
}
implicit val testStructArbitrary: Arbitrary[TestStruct] = Arbitrary {
for {
boolField <- arbitrary[Boolean]
byteField <- arbitrary[Byte]
shortField <- arbitrary[Short]
intField <- arbitrary[Int]
longField <- arbitrary[Long]
doubleField <- arbitrary[Double]
stringField <- arbitrary[String]
binaryField <- arbitrary[ByteBuffer]
optionalField <- arbitrary[Option[Boolean]]
listField <- arbitrary[Seq[Boolean]]
setField <- arbitrary[Set[Boolean]]
mapField <- arbitrary[Map[Boolean, Boolean]]
annotatedId <- arbitrary[Long]
tpe <- arbitrary[String]
klass <- arbitrary[Option[String]]
optionalField2 <- arbitrary[Option[String]]
optionalFieldWithDefaultValue <- arbitrary[String]
negativeField <- arbitrary[Boolean]
snakeCase <- arbitrary[Boolean]
endOffset <- arbitrary[Boolean]
} yield TestStruct(
boolField,
byteField,
shortField,
intField,
longField,
doubleField,
stringField,
binaryField,
optionalField,
listField,
setField,
mapField,
annotatedId,
tpe,
klass,
optionalField2,
optionalFieldWithDefaultValue,
negativeField,
snakeCase,
endOffset
)
}
implicit val testNestedStructArbitrary: Arbitrary[TestNestedStruct] = Arbitrary {
for {
field <- arbitrary[TestStruct]
tpe <- arbitrary[TestStruct]
klass <- arbitrary[Option[TestStruct]]
optionalField <- arbitrary[Option[TestStruct]]
seqField <- arbitrary[Seq[TestStruct]]
setField <- arbitrary[Set[TestStruct]]
mapField <- arbitrary[Map[TestStruct, TestStruct]]
} yield TestNestedStruct(
field,
tpe,
klass,
optionalField,
seqField,
setField,
mapField
)
}
implicit val testEmptyStructArbitrary: Arbitrary[TestEmptyStruct] = Arbitrary {
Gen.const(TestEmptyStruct())
}
implicit val testDefaultsStructArbitrary: Arbitrary[TestDefaultsStruct] = Arbitrary {
for {
boolField <- arbitrary[Boolean]
shortField <- arbitrary[Short]
intField <- arbitrary[Int]
} yield TestDefaultsStruct(boolField, shortField, intField)
}
implicit val testOptionalFieldNoDefaultArbitrary: Arbitrary[TestOptionalFieldNoDefault] =
Arbitrary {
for {
boolField <- arbitrary[Boolean]
intField <- arbitrary[Option[Int]]
} yield TestOptionalFieldNoDefault(boolField, intField)
}
implicit val testRequiredFieldArbitrary: Arbitrary[TestRequiredField] = Arbitrary {
for {
requiredField <- arbitrary[Boolean]
optionalField <- arbitrary[Option[String]]
} yield TestRequiredField(requiredField, optionalField)
}
implicit val testPassthroughFieldsArbitrary: Arbitrary[TestPassthroughFields] = Arbitrary {
for {
field <- arbitrary[String]
} yield TestPassthroughFields(field)
}
implicit val testRequiredDefaultsStructArbitrary: Arbitrary[TestRequiredDefaultsStruct] =
Arbitrary {
for {
stringField <- arbitrary[String]
listField <- arbitrary[Seq[String]]
} yield TestRequiredDefaultsStruct(stringField, listField)
}
implicit val testStructUnionArbitrary: Arbitrary[TestStructUnion] = Arbitrary {
for {
first <- arbitrary[TestStruct]
second <- arbitrary[TestStruct]
union <- Gen.oneOf(First(first), Second(second))
} yield union
}
}
| twitter/scrooge | scrooge-adaptive/src/test/scala/com/twitter/scrooge/adapt/package.scala | Scala | apache-2.0 | 4,048 |
package com.peterpotts.snake.mapreduce
import com.peterpotts.snake.predicate.Extractor
case class ValueExtractor(value: Any) extends Extractor[Any] {
def apply(argument: Any) = value
override def toString() = value.toString
}
| peterpotts/snake | src/main/scala/com/peterpotts/snake/mapreduce/ValueExtractor.scala | Scala | mit | 233 |
/*
* This software is licensed under the GNU Affero General Public License, quoted below.
*
* This file is a part of PowerAPI.
*
* Copyright (C) 2011-2014 Inria, University of Lille 1.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of
* the License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI.
*
* If not, please consult http://www.gnu.org/licenses/agpl-3.0.html.
*/
package org.powerapi.module.libpfm.cycles
import java.util.concurrent.TimeUnit
import com.typesafe.config.Config
import org.powerapi.core.{Configuration, ConfigValue}
import scala.collection.JavaConversions._
import scala.concurrent.duration.DurationLong
import scala.concurrent.duration.FiniteDuration
/**
* Main configuration.
*
* @author <a href="mailto:[email protected]">Maxime Colmant</a>
*/
trait LibpfmCoreCyclesFormulaConfiguration extends Configuration {
lazy val cyclesThreadName: String = load { _.getString("powerapi.libpfm.formulae.cycles-thread") } match {
case ConfigValue(value) => value
case _ => "CPU_CLK_UNHALTED:THREAD_P"
}
lazy val cyclesRefName: String = load { _.getString("powerapi.libpfm.formulae.cycles-ref") } match {
case ConfigValue(value) => value
case _ => "CPU_CLK_UNHALTED:REF_P"
}
lazy val formulae: Map[Double, List[Double]] = load { conf =>
(for (item: Config <- conf.getConfigList("powerapi.libpfm.formulae.cycles"))
yield (item.getDouble("coefficient"), item.getDoubleList("formula").map(_.toDouble).toList)).toMap
} match {
case ConfigValue(values) => values
case _ => Map()
}
lazy val samplingInterval: FiniteDuration = load { _.getDuration("powerapi.sampling.interval", TimeUnit.NANOSECONDS) } match {
case ConfigValue(value) => value.nanoseconds
case _ => 1l.seconds
}
}
| rouvoy/powerapi | powerapi-core/src/main/scala/org/powerapi/module/libpfm/cycles/LibpfmCoreCyclesFormulaConfiguration.scala | Scala | agpl-3.0 | 2,267 |
package com.sksamuel.elastic4s.alias
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest
trait AliasActionDefinition {
def build: IndicesAliasesRequest.AliasActions
}
| ulric260/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/alias/AliasActionDefinition.scala | Scala | apache-2.0 | 193 |
package com.twitter.finagle.redis.protocol
import com.twitter.finagle.util.BufReader
import com.twitter.io.Buf
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
/**
* Thread-safe, stateful, asynchronous Redis decoder.
*/
private[redis] final class StageDecoder(init: Stage) {
private[this] final class Acc(
var n: Long,
val replies: ListBuffer[Reply],
val finish: List[Reply] => Reply)
import Stage._
private[this] var reader = BufReader(Buf.Empty)
private[this] var stack = List.empty[Acc]
private[this] var current = init
/**
* Returns a [[Reply]] or `null` if it's not enough data in the
* underlying buffer.
*
* @note Passing `Buf.Empty` to this function means "decode from whatever
* is in the underlying buffer so far".
*/
def absorb(buf: Buf): Reply = synchronized {
// Absorb the new buffer.
reader = BufReader(reader.readAll().concat(buf))
// Decode the next reply if possible.
decodeNext(current)
}
// Tries its best to decode the next _full_ reply or returns `null` if
// there is not enough data in the input buffer.
@tailrec
private[this] def decodeNext(stage: Stage): Reply = stage(reader) match {
case NextStep.Incomplete =>
// The decoder is starving so we capture the current state
// and fail-fast with `null`.
current = stage
null
case NextStep.Goto(nextStage) => decodeNext(nextStage)
case NextStep.Emit(reply) =>
stack match {
case Nil =>
// We finish decoding of a single reply so reset the state.
current = init
reply
case acc :: rest if acc.n == 1 =>
stack = rest
acc.replies += reply
decodeNext(Stage.const(NextStep.Emit(acc.finish(acc.replies.toList))))
case acc :: _ =>
acc.n -= 1
acc.replies += reply
decodeNext(init)
}
case NextStep.Accumulate(n, finish) =>
stack = new Acc(n, ListBuffer.empty[Reply], finish) :: stack
decodeNext(init)
}
}
| spockz/finagle | finagle-redis/src/main/scala/com/twitter/finagle/redis/protocol/StageDecoder.scala | Scala | apache-2.0 | 2,066 |
package one.lockstep.multilock.server
import one.lockstep.multilock.protocol.message._
import one.lockstep.util.codec._
import one.lockstep.util.crypto._
import scodec.Codec
import scodec.codecs._
case class LockState(body: LockState.Body, // the main body of the state
pendingInboundRequestOpt: Option[AggregateMessage] = None, // the latest request processed but witing for response from outbound server
pendingOutboundRequestOpt: Option[AggregateMessage] = None, // the latest request sent to an upstream server and not yet responded
lastOutboundResponseOpt: Option[Message] = None, // the latest response sent to a downstream server
backloggedInboundRequestOpt: Option[AggregateMessage] = None // the next not yet handled request
)
object LockState {
case class Body(currentTimestamp: Long, // timestamp of the last valid request
lastEnrollmentRequestAt: Long = 0, // timestamp of last valid enrollment request
lastUnlockRequestAt: Long = 0, // timestamp of last valid unlock request
enrolledSealSeriesIdDigest: Digest, // digest of last enrolled seal series id
badUnlockAttempts: Int = 0, // number of successive unlock attempts with incorrect passcode
sealCreatedAt: Long = 0, // timestamp of creation of the latest accepted seal
sealIdsOpt: Option[SealIdentifiers] = None,
sealKeyPair: KeyPair, // 2 key-pairs, enabling routine key rotation
nextSealKeyPair: KeyPair)
case class SealIdentifiers(sealIdDigest: Digest, sealSeriesIdDigest: Digest)
private lazy val privKeyCodec: Codec[PrivateKey] = sized(byteLimit(), bytes).as[PrivateKey]
private lazy val keyPairCodec: Codec[KeyPair] = (privKeyCodec :: Codec[PublicKey]).as[KeyPair]
private lazy val sealIdsCodec: Codec[SealIdentifiers] = (Codec[Digest] :: Codec[Digest]).as[SealIdentifiers]
// todo complete this
// implicit lazy val codec: Codec[LockState] = (int64 :: int64 :: int64 :: Codec[Digest] :: int32 :: int64 ::
// optional(bool(8), sealIdsCodec) :: keyPairCodec :: keyPairCodec).as[LockState]
// implicit lazy val protocol = Protocol[LockState](Label("multilock_state"), Versions.current, since = Versions.v1)
}
| lockstep-one/vault | monolock-server/src/main/scala/one/lockstep/multilock/server/LockState.scala | Scala | agpl-3.0 | 2,411 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openchai.web.pong
//import org.openchai.spark.p2p.UpdaterIF.Weights
//import org.openchai.spark.p2p.Weights
//
//trait Params
//
//trait PongParams extends Params
//
//trait Solver {
// def solve(params: Params, inWeights: Weights): Weights
//}
//
//class PongSolver(val params: PongParams) extends Solver {
// override def solve(params: Params, inWeights: Weights): Weights = {
// inWeights.map { case arr =>
// arr.map { r => r * 1.1 }
// }
// }
//}
| javadba/p2p | src/main/scala/org/openchai/web/pong/PongSolver.scala | Scala | apache-2.0 | 1,280 |
package com.arcusys.learn.liferay.services
import com.arcusys.learn.liferay.LiferayClasses.{LPermissionChecker, LUser}
import com.liferay.portal.kernel.security.auth.PrincipalThreadLocal
import com.liferay.portal.kernel.security.permission.{PermissionCheckerFactoryUtil, PermissionThreadLocal}
import com.liferay.portal.kernel.service.UserLocalServiceUtil
/**
* Created by asemenov on 22.01.15.
*/
object PermissionHelper {
def getPermissionChecker(): LPermissionChecker = {
PermissionThreadLocal.getPermissionChecker
}
def getPermissionChecker(user: LUser): LPermissionChecker = {
PermissionCheckerFactoryUtil.create(user)
}
def preparePermissionChecker(userId: Long): Unit = {
val user = UserLocalServiceUtil.getUserById(userId)
preparePermissionChecker(user)
}
def preparePermissionChecker(user: LUser): Unit = {
val permissionChecker = PermissionCheckerFactoryUtil.create(user)
PermissionThreadLocal.setPermissionChecker(permissionChecker)
PrincipalThreadLocal.setName(user.getUserId)
}
}
| arcusys/Valamis | learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/services/PermissionHelper.scala | Scala | gpl-3.0 | 1,048 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen
import org.apache.flink.table.planner.codegen.CodeGenUtils.boxedTypeTermForType
import org.apache.flink.table.runtime.typeutils.TypeCheckUtils
import org.apache.flink.table.types.logical.LogicalType
/**
* Describes a generated expression.
*
* @param resultTerm term to access the result of the expression
* @param nullTerm boolean term that indicates if expression is null
* @param code code necessary to produce resultTerm and nullTerm
* @param resultType type of the resultTerm
* @param literalValue None if the expression is not literal. Otherwise it represent the
* original object of the literal.
*/
case class GeneratedExpression(
resultTerm: String,
nullTerm: String,
code: String,
resultType: LogicalType,
literalValue: Option[Any] = None) {
/**
* Indicates a constant expression do not reference input and can thus be used
* in the member area (e.g. as constructor parameter of a reusable instance)
*
* @return true if the expression is literal
*/
def literal: Boolean = literalValue.isDefined
/**
* Copy result term to target term if the reference is changed.
* Note: We must ensure that the target can only be copied out, so that its object is definitely
* a brand new reference, not the object being re-used.
* @param target the target term that cannot be assigned a reusable reference.
* @return code.
*/
def copyResultTermToTargetIfChanged(ctx: CodeGeneratorContext, target: String): String = {
if (TypeCheckUtils.isMutable(resultType)) {
val typeTerm = boxedTypeTermForType(resultType)
val serTerm = ctx.addReusableTypeSerializer(resultType)
s"""
|if ($target != $resultTerm) {
| $target = (($typeTerm) $serTerm.copy($resultTerm));
|}
""".stripMargin
} else {
s"$target = $resultTerm;"
}
}
/**
* Deep copy the generated expression.
*
* NOTE: Please use this method when the result will be buffered.
* This method makes sure a new object/data is created when the type is mutable.
*/
def deepCopy(ctx: CodeGeneratorContext): GeneratedExpression = {
// only copy when type is mutable
if (TypeCheckUtils.isMutable(resultType)) {
// if the type need copy, it must be a boxed type
val typeTerm = boxedTypeTermForType(resultType)
val serTerm = ctx.addReusableTypeSerializer(resultType)
val newResultTerm = ctx.addReusableLocalVariable(typeTerm, "field")
val newCode =
s"""
|$code
|$newResultTerm = $resultTerm;
|if (!$nullTerm) {
| $newResultTerm = ($typeTerm) ($serTerm.copy($newResultTerm));
|}
""".stripMargin
GeneratedExpression(newResultTerm, nullTerm, newCode, resultType, literalValue)
} else {
this
}
}
}
object GeneratedExpression {
val ALWAYS_NULL = "true"
val NEVER_NULL = "false"
val NO_CODE = ""
}
| lincoln-lil/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/GeneratedExpression.scala | Scala | apache-2.0 | 3,814 |
package org.ndc.ndc
class TwoShingles {
def fromTokens(tokens: Array[String]): Array[String] = {
if (tokens.length <= 1) {
return tokens
}
else {
(for (position <- Range(0, tokens.length - 1))
yield tokens(position) + tokens(position + 1)).toArray
}
}
} | NikolajLeischner/near_duplicates | src/main/scala/org/ndc/ndc/TwoShingles.scala | Scala | mit | 306 |
/*
* Copyright 2017 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.featran.transformers
import com.spotify.featran.{FeatureBuilder, FeatureRejection, FlatReader, FlatWriter}
import scala.collection.SortedMap
import scala.collection.mutable.{Map => MMap, Set => MSet}
/** Weighted label. Also can be thought as a weighted value in a named sparse vector. */
case class WeightedLabel(name: String, value: Double)
/**
* Transform a collection of weighted categorical features to columns of weight sums, with at most N
* values.
*
* Weights of the same labels in a row are summed instead of 1.0 as is the case with the normal
* [[NHotEncoder]].
*
* Missing values are either transformed to zero vectors or encoded as a missing value.
*
* When using aggregated feature summary from a previous session, unseen labels are either
* transformed to zero vectors or encoded as `__unknown__` (if `encodeMissingValue` is true) and
* [FeatureRejection.Unseen]] rejections are reported.
*/
object NHotWeightedEncoder extends SettingsBuilder {
/** Create a new [[NHotWeightedEncoder]] instance. */
def apply(
name: String,
encodeMissingValue: Boolean = false
): Transformer[Seq[WeightedLabel], Set[String], SortedMap[String, Int]] =
new NHotWeightedEncoder(name, encodeMissingValue)
/**
* Create a new [[NHotWeightedEncoder]] from a settings object
* @param setting
* Settings object
*/
def fromSettings(
setting: Settings
): Transformer[Seq[WeightedLabel], Set[String], SortedMap[String, Int]] = {
val encodeMissingValue = setting.params("encodeMissingValue").toBoolean
NHotWeightedEncoder(setting.name, encodeMissingValue)
}
}
private[featran] class NHotWeightedEncoder(name: String, encodeMissingValue: Boolean)
extends BaseHotEncoder[Seq[WeightedLabel]](name, encodeMissingValue) {
import MissingValue.MissingValueToken
def addMissingValue(
fb: FeatureBuilder[_],
unseen: MSet[String],
keys: Seq[String],
unseenWeight: Double
): Unit =
if (keys.isEmpty) {
fb.add(name + '_' + MissingValueToken, 1.0)
} else if (unseen.isEmpty) {
fb.skip()
} else {
fb.add(name + '_' + MissingValueToken, unseenWeight)
}
override def prepare(a: Seq[WeightedLabel]): Set[String] =
Set(a.map(_.name): _*)
override def buildFeatures(
a: Option[Seq[WeightedLabel]],
c: SortedMap[String, Int],
fb: FeatureBuilder[_]
): Unit = a match {
case Some(xs) =>
val weights = MMap.empty[String, Double].withDefaultValue(0.0)
xs.foreach(x => weights(x.name) += x.value)
var unseenWeight = 0.0
val keys = weights.keySet.toList.sorted
var prev = -1
val unseen = MSet[String]()
keys.foreach { key =>
c.get(key) match {
case Some(curr) =>
val gap = curr - prev - 1
if (gap > 0) fb.skip(gap)
fb.add(name + '_' + key, weights(key))
prev = curr
case None =>
unseen += key
unseenWeight += weights(key)
}
}
val gap = c.size - prev - 1
if (gap > 0) fb.skip(gap)
if (encodeMissingValue) {
addMissingValue(fb, unseen, keys, unseenWeight)
}
if (unseen.nonEmpty) {
fb.reject(this, FeatureRejection.Unseen(unseen.toSet))
}
case None => addMissingItem(c, fb)
}
override def flatRead[T: FlatReader]: T => Option[Any] = FlatReader[T].readWeightedLabel(name)
override def flatWriter[T](implicit fw: FlatWriter[T]): Option[Seq[WeightedLabel]] => fw.IF =
fw.writeWeightedLabel(name)
}
| spotify/featran | core/src/main/scala/com/spotify/featran/transformers/NHotWeightedEncoder.scala | Scala | apache-2.0 | 4,155 |
/*
* Copyright (C) 2020 MapRoulette contributors (see CONTRIBUTORS.md).
* Licensed under the Apache License, Version 2.0 (see LICENSE).
*/
package org.maproulette.framework.mixins
import org.maproulette.framework.psql.filter._
import org.maproulette.framework.psql.Query
import org.maproulette.framework.model.Task
import org.maproulette.Config
/**
* LeaderboardMixin provides methods to setup query filters for
* searching/building the leaderboard.
*/
trait LeaderboardMixin {
/**
* Returns the SQL to sum a user's status actions for ranking purposes
**/
def scoreSumSQL(config: Config): String = {
s"""SUM(CASE sa.status
WHEN ${Task.STATUS_FIXED} THEN ${config.taskScoreFixed}
WHEN ${Task.STATUS_FALSE_POSITIVE} THEN ${config.taskScoreFalsePositive}
WHEN ${Task.STATUS_ALREADY_FIXED} THEN ${config.taskScoreAlreadyFixed}
WHEN ${Task.STATUS_TOO_HARD} THEN ${config.taskScoreTooHard}
WHEN ${Task.STATUS_SKIPPED} THEN ${config.taskScoreSkipped}
ELSE 0
END)"""
}
/**
* Returns the SQL to sum review status actions for ranking purposes
**/
def reviewScoreSumSQL(config: Config): String = {
s"""SUM(CASE review_status
WHEN ${Task.REVIEW_STATUS_APPROVED} THEN 1
WHEN ${Task.REVIEW_STATUS_ASSISTED} THEN 1
WHEN ${Task.REVIEW_STATUS_REJECTED} THEN 1
WHEN ${Task.REVIEW_STATUS_DISPUTED} THEN 0
ELSE 0
END)"""
}
/**
* Returns the SQL to sum a user's number of completed tasks
**/
def tasksSumSQL(): String = {
s"""COALESCE(SUM(CASE sa.status
WHEN ${Task.STATUS_FIXED} THEN 1
WHEN ${Task.STATUS_FALSE_POSITIVE} THEN 1
WHEN ${Task.STATUS_ALREADY_FIXED} THEN 1
WHEN ${Task.STATUS_TOO_HARD} THEN 1
WHEN ${Task.STATUS_SKIPPED} THEN 0
ELSE 0
END), 0)"""
}
/**
* Returns the SQL to sum a user's number of tasks they reviewed
* Note: Disputed tasks in the task_review_history do not count
* since there will already be an entry for their original task review.
**/
def reviewSumSQL(): String = {
s"""COALESCE(SUM(CASE review_status
WHEN ${Task.REVIEW_STATUS_APPROVED} THEN 1
WHEN ${Task.REVIEW_STATUS_ASSISTED} THEN 1
WHEN ${Task.REVIEW_STATUS_REJECTED} THEN 1
WHEN ${Task.REVIEW_STATUS_DISPUTED} THEN 0
ELSE 0
END), 0)"""
}
/**
* Returns the SQL to sum a user's average time spent per task
**/
def timeSpentSQL(): String = {
s"""COALESCE(SUM(tasks.completed_time_spent) /
SUM(CASE
WHEN tasks.completed_time_spent > 0 THEN 1
ELSE 0
END), 0)"""
}
/**
* Returns the SQL to sum a user's average time spent per review
**/
def reviewTimeSpentSQL(): String = {
val avgReviewTime =
"CAST(EXTRACT(epoch FROM (task_review_history.reviewed_at - task_review_history.review_started_at)) * 1000 AS INT)"
s"""COALESCE(SUM(${avgReviewTime}) /
SUM(CASE
WHEN (${avgReviewTime}) > 0 THEN 1
ELSE 0
END), 0)"""
}
/**
* Returns the SQL to sum a user's number of tasks by review status
**/
def reviewStatusSumSQL(reviewStatus: Int): String = {
s"""COALESCE(SUM(CASE review_status
WHEN ${reviewStatus} THEN 1
ELSE 0
END), 0)"""
}
/**
* Returns the SQL to sum how many reviews were additional reviews
**/
def additionalReviewsSumSQL(): String = {
s"""COALESCE(SUM(CASE
WHEN original_reviewer IS NOT NULL THEN 1
ELSE 0
END), 0)"""
}
}
| mgcuthbert/maproulette2 | app/org/maproulette/framework/mixins/LeaderboardMixin.scala | Scala | apache-2.0 | 3,754 |
package io.flow.delta.actors.functions
import io.flow.delta.actors.SupervisorResult
import io.flow.delta.lib.config.Defaults
import io.flow.test.utils.FlowPlaySpec
class SetDesiredStateSpec extends FlowPlaySpec with db.Helpers {
"no-op if no tags" in {
val build = upsertBuild()
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Checkpoint("Project does not have any tags")))
}
"sets desired state to latest tag" in {
val project = createProject()
val build = upsertBuild(project)
// tag1
createTag(createTagForm(project).copy(name = "0.0.1"))
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Change("Desired state changed to: 0.0.1: 2 instances")))
// tag2
createTag(createTagForm(project).copy(name = "0.0.2"))
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Change("Desired state changed to: 0.0.2: 2 instances")))
// No-op if no change
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Ready("Desired versions remain: 0.0.2")))
}
"once set, desired state does not reset number of instances" in {
// let ECS manage number of instances on a go forward basis.
val project = createProject()
val build = upsertBuild(project)
// tag1
createTag(createTagForm(project).copy(name = "0.0.1"))
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Change("Desired state changed to: 0.0.1: 2 instances")))
setLastState(build, "0.0.1", 10)
// No-op if no change
SetDesiredState.run(build, Defaults.EcsBuildConfig).map(_ must be(SupervisorResult.Ready("Desired versions remain: 0.0.1")))
}
}
| flowcommerce/delta | api/test/actors/functions/SetDesiredStateSpec.scala | Scala | mit | 1,754 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rich
import java.util.regex.Pattern
import htsjdk.samtools.{ Cigar, CigarElement, CigarOperator, TextCigarCodec }
import org.bdgenomics.adam.models.{ Attribute, ReferencePosition, ReferenceRegion }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.util._
import org.bdgenomics.formats.avro.AlignmentRecord
import scala.collection.immutable.NumericRange
object RichAlignmentRecord {
val CIGAR_CODEC: TextCigarCodec = TextCigarCodec.getSingleton
val ILLUMINA_READNAME_REGEX = "[a-zA-Z0-9]+:[0-9]:([0-9]+):([0-9]+):([0-9]+).*".r
val cigarPattern = Pattern.compile("([0-9]+)([MIDNSHPX=])")
/**
* Parses a CIGAR string, and returns the aligned length with respect to the
* reference genome (i.e. skipping clipping, padding, and insertion operators)
*
* @param cigar The CIGAR string whose reference length is to be measured
* @return A non-negative integer, the sum of the MDNX= operators in the CIGAR string.
*/
def referenceLengthFromCigar(cigar: String): Int = {
val m = cigarPattern.matcher(cigar)
var i = 0
var len: Int = 0
while (i < cigar.length) {
if (m.find(i)) {
val op = m.group(2)
if ("MDNX=".indexOf(op) != -1) {
len += m.group(1).toInt
}
} else {
return len
}
i = m.end()
}
len
}
def apply(record: AlignmentRecord) = {
new RichAlignmentRecord(record)
}
implicit def recordToRichRecord(record: AlignmentRecord): RichAlignmentRecord = new RichAlignmentRecord(record)
implicit def richRecordToRecord(record: RichAlignmentRecord): AlignmentRecord = record.record
}
class IlluminaOptics(val tile: Long, val x: Long, val y: Long) {}
class RichAlignmentRecord(val record: AlignmentRecord) {
lazy val referenceLength: Int = RichAlignmentRecord.referenceLengthFromCigar(record.getCigar.toString)
lazy val readRegion = ReferenceRegion(this)
// Returns the quality scores as a list of bytes
lazy val qualityScores: Array[Int] = {
record.getQual.toString.toCharArray.map(q => q - 33)
}
// Parse the tags ("key:type:value" triples)
lazy val tags: Seq[Attribute] = AttributeUtils.parseAttributes(record.getAttributes.toString)
// Parses the readname to Illumina optics information
lazy val illuminaOptics: Option[IlluminaOptics] = {
try {
val RichAlignmentRecord.ILLUMINA_READNAME_REGEX(tile, x, y) = record.getReadName
Some(new IlluminaOptics(tile.toInt, x.toInt, y.toInt))
} catch {
case e: MatchError => None
}
}
lazy val samtoolsCigar: Cigar = {
RichAlignmentRecord.CIGAR_CODEC.decode(record.getCigar.toString)
}
// Returns the MdTag if the read is mapped, None otherwise
lazy val mdTag: Option[MdTag] = {
if (record.getReadMapped && record.getMismatchingPositions != null) {
Some(MdTag(record.getMismatchingPositions, record.getStart))
} else {
None
}
}
private def isClipped(el: CigarElement) = {
el.getOperator == CigarOperator.SOFT_CLIP ||
el.getOperator == CigarOperator.HARD_CLIP
}
// Returns the position of the unclipped end if the read is mapped, None otherwise
lazy val unclippedEnd: Option[Long] = {
if (record.getReadMapped) {
Some(samtoolsCigar.getCigarElements.reverse.takeWhile(isClipped).foldLeft(record.getEnd) {
(pos, cigarEl) => pos + cigarEl.getLength
})
} else {
None
}
}
// Returns the position of the unclipped start if the read is mapped, None otherwise.
lazy val unclippedStart: Option[Long] = {
if (record.getReadMapped) {
Some(samtoolsCigar.getCigarElements.takeWhile(isClipped).foldLeft(record.getStart) {
(pos, cigarEl) => pos - cigarEl.getLength
})
} else {
None
}
}
// Return the 5 prime position.
def fivePrimePosition: Option[Long] = {
if (record.getReadMapped) {
if (record.getReadNegativeStrand) unclippedEnd else unclippedStart
} else {
None
}
}
// Does this read overlap with the given reference position?
def overlapsReferencePosition(pos: ReferencePosition): Option[Boolean] = {
readRegion.map(_.contains(pos))
}
// Does this read mismatch the reference at the given reference position?
def isMismatchAtReferencePosition(pos: ReferencePosition): Option[Boolean] = {
if (mdTag.isEmpty || !overlapsReferencePosition(pos).get) {
None
} else {
mdTag.map(!_.isMatch(pos))
}
}
// Does this read mismatch the reference at the given offset within the read?
def isMismatchAtReadOffset(offset: Int): Option[Boolean] = {
// careful about offsets that are within an insertion!
if (referencePositions.isEmpty) {
None
} else {
readOffsetToReferencePosition(offset).flatMap(pos => isMismatchAtReferencePosition(pos))
}
}
def getReferenceContext(readOffset: Int, referencePosition: Long, cigarElem: CigarElement, elemOffset: Int): ReferenceSequenceContext = {
val position = if (ReferencePosition.mappedPositionCheck(record)) {
Some(new ReferencePosition(record.getContig.getContigName.toString, referencePosition))
} else {
None
}
def getReferenceBase(cigarElement: CigarElement, refPos: Long, readPos: Int): Option[Char] = {
mdTag.flatMap(tag => {
cigarElement.getOperator match {
case CigarOperator.M =>
if (!tag.isMatch(refPos)) {
tag.mismatchedBase(refPos)
} else {
Some(record.getSequence()(readPos))
}
case CigarOperator.D =>
// if a delete, get from the delete pool
tag.deletedBase(refPos)
case _ => None
}
})
}
val referenceBase = getReferenceBase(cigarElem, referencePosition, readOffset)
ReferenceSequenceContext(position, referenceBase, cigarElem, elemOffset)
}
lazy val referencePositions: Seq[Option[ReferencePosition]] = referenceContexts.map(ref => ref.flatMap(_.pos))
lazy val referenceContexts: Seq[Option[ReferenceSequenceContext]] = {
if (record.getReadMapped) {
val resultTuple = samtoolsCigar.getCigarElements.foldLeft((unclippedStart.get, List[Option[ReferenceSequenceContext]]()))((runningPos, elem) => {
// runningPos is a tuple, the first element holds the starting position of the next CigarOperator
// and the second element is the list of positions up to this point
val op = elem.getOperator
val currentRefPos = runningPos._1
val resultAccum = runningPos._2
val advanceReference = op.consumesReferenceBases || op == CigarOperator.S
val newRefPos = currentRefPos + (if (advanceReference) elem.getLength else 0)
val resultParts: Seq[Option[ReferenceSequenceContext]] =
if (op.consumesReadBases) {
val range = NumericRange(currentRefPos, currentRefPos + elem.getLength, 1L)
range.zipWithIndex.map(kv =>
if (advanceReference)
Some(getReferenceContext(resultAccum.size + kv._2, kv._1, elem, kv._2))
else None)
} else {
Seq.empty
}
(newRefPos, resultAccum ++ resultParts)
})
val results = resultTuple._2
results.toIndexedSeq
} else {
qualityScores.map(t => None)
}
}
def readOffsetToReferencePosition(offset: Int): Option[ReferencePosition] = {
if (record.getReadMapped) {
referencePositions(offset)
} else {
None
}
}
def readOffsetToReferenceSequenceContext(offset: Int): Option[ReferenceSequenceContext] = {
if (record.getReadMapped) {
referenceContexts(offset)
} else {
None
}
}
}
| tomwhite/adam | adam-core/src/main/scala/org/bdgenomics/adam/rich/RichAlignmentRecord.scala | Scala | apache-2.0 | 8,509 |
package dotty.tools
package dotc
package util
import scala.collection.mutable.ArrayBuffer
import dotty.tools.io._
import annotation.tailrec
import java.util.regex.Pattern
import java.io.IOException
import Chars._
import ScriptSourceFile._
import Positions._
import java.util.Optional
object ScriptSourceFile {
@sharable private val headerPattern = Pattern.compile("""^(::)?!#.*(\\r|\\n|\\r\\n)""", Pattern.MULTILINE)
private val headerStarts = List("#!", "::#!")
def apply(file: AbstractFile, content: Array[Char]) = {
/** Length of the script header from the given content, if there is one.
* The header begins with "#!" or "::#!" and ends with a line starting
* with "!#" or "::!#".
*/
val headerLength =
if (headerStarts exists (content startsWith _)) {
val matcher = headerPattern matcher content.mkString
if (matcher.find) matcher.end
else throw new IOException("script file does not close its header with !# or ::!#")
} else 0
new SourceFile(file, content drop headerLength) {
override val underlying = new SourceFile(file, content)
}
}
}
case class SourceFile(file: AbstractFile, content: Array[Char]) extends interfaces.SourceFile {
def this(_file: AbstractFile) = this(_file, _file.toCharArray)
def this(sourceName: String, cs: Seq[Char]) = this(new VirtualFile(sourceName), cs.toArray)
def this(file: AbstractFile, cs: Seq[Char]) = this(file, cs.toArray)
/** Tab increment; can be overridden */
def tabInc = 8
override def name = file.name
override def path = file.path
override def jfile = Optional.ofNullable(file.file)
override def equals(that : Any) = that match {
case that : SourceFile => file.path == that.file.path && start == that.start
case _ => false
}
override def hashCode = file.path.## + start.##
def apply(idx: Int) = content.apply(idx)
val length = content.length
/** true for all source files except `NoSource` */
def exists: Boolean = true
/** The underlying source file */
def underlying: SourceFile = this
/** The start of this file in the underlying source file */
def start = 0
def atPos(pos: Position): SourcePosition =
if (pos.exists) SourcePosition(underlying, pos)
else NoSourcePosition
def isSelfContained = underlying eq this
/** Map a position to a position in the underlying source file.
* For regular source files, simply return the argument.
*/
def positionInUltimateSource(position: SourcePosition): SourcePosition =
SourcePosition(underlying, position.pos shift start)
private def isLineBreak(idx: Int) =
if (idx >= length) false else {
val ch = content(idx)
// don't identify the CR in CR LF as a line break, since LF will do.
if (ch == CR) (idx + 1 == length) || (content(idx + 1) != LF)
else isLineBreakChar(ch)
}
private def calculateLineIndices(cs: Array[Char]) = {
val buf = new ArrayBuffer[Int]
buf += 0
for (i <- 0 until cs.length) if (isLineBreak(i)) buf += i + 1
buf += cs.length // sentinel, so that findLine below works smoother
buf.toArray
}
private lazy val lineIndices: Array[Int] = calculateLineIndices(content)
/** Map line to offset of first character in line */
def lineToOffset(index : Int): Int = lineIndices(index)
/** A cache to speed up offsetToLine searches to similar lines */
private var lastLine = 0
/** Convert offset to line in this source file
* Lines are numbered from 0
*/
def offsetToLine(offset: Int): Int = {
lastLine = Util.bestFit(lineIndices, lineIndices.length, offset, lastLine)
lastLine
}
/** The index of the first character of the line containing position `offset` */
def startOfLine(offset: Int): Int = {
require(offset >= 0)
lineToOffset(offsetToLine(offset))
}
/** The start index of the line following the one containing position `offset` */
def nextLine(offset: Int): Int =
lineToOffset(offsetToLine(offset) + 1 min lineIndices.length - 1)
/** The content of the line containing position `offset` */
def lineContent(offset: Int): String =
content.slice(startOfLine(offset), nextLine(offset)).mkString
/** The column corresponding to `offset`, starting at 0 */
def column(offset: Int): Int = {
var idx = startOfLine(offset)
var col = 0
while (idx != offset) {
col += (if (content(idx) == '\\t') (tabInc - col) % tabInc else 1)
idx += 1
}
col
}
override def toString = file.toString
}
@sharable object NoSource extends SourceFile("<no source>", Nil) {
override def exists = false
}
| densh/dotty | src/dotty/tools/dotc/util/SourceFile.scala | Scala | bsd-3-clause | 4,636 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import java.util.UUID
import org.specs2.mutable.Specification
import fs2.Stream
import cats.effect.{IO, Sync, Timer}
import cats.effect.laws.util.TestContext
import cats.kernel.Hash
import cats.implicits._
object RateLimiterSpec extends Specification {
implicit def executionContext: ExecutionContext = ExecutionContext.Implicits.global
implicit val timer: Timer[IO] = IO.timer(executionContext)
def freshKey: IO[UUID] = IO.delay(UUID.randomUUID())
"rate limiter" should {
"output events with real time" >> {
"one event in one window" in {
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 1, 1.seconds)).unsafeRunSync()
val back = Stream.eval_(limit) ++ Stream.emit(1)
back.compile.toList.unsafeRunSync() mustEqual(List(1))
}
"two events in one window" in {
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 2, 1.seconds)).unsafeRunSync()
val back =
Stream.eval_(limit) ++ Stream.emit(1) ++
Stream.eval_(limit) ++ Stream.emit(2)
back.compile.toList.unsafeRunSync() mustEqual(List(1, 2))
}
"two events in two windows" in {
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 1, 1.seconds)).unsafeRunSync()
val back =
Stream.eval_(limit) ++ Stream.emit(1) ++
Stream.eval_(limit) ++ Stream.emit(2)
back.compile.toList.unsafeRunSync() mustEqual(List(1, 2))
}
"events from two keys" in {
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID]).unsafeRunSync()
val RateLimiterEffects(limit1, _) =
key.flatMap(k => rl(k, 1, 1.seconds)).unsafeRunSync()
val RateLimiterEffects(limit2, _) =
key.flatMap(k => rl(k, 1, 1.seconds)).unsafeRunSync()
val back1 =
Stream.eval_(limit1) ++ Stream.emit(1) ++
Stream.eval_(limit1) ++ Stream.emit(2)
val back2 =
Stream.eval_(limit2) ++ Stream.emit(3) ++
Stream.eval_(limit2) ++ Stream.emit(4)
back1.compile.toList.unsafeRunSync() mustEqual(List(1, 2))
back2.compile.toList.unsafeRunSync() mustEqual(List(3, 4))
}
}
"output events with simulated time" >> {
"one event per second" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 1, 1.seconds)).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(4)
}
"one event per two seconds" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 1, 2.seconds)).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(4)
ctx.tick(1.seconds)
a mustEqual(4)
}
"two events per second" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 2, 1.seconds)).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(4)
ctx.tick(1.seconds)
a mustEqual(6)
}
"three events per second" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 3, 1.seconds)).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(6)
ctx.tick(1.seconds)
a mustEqual(8)
}
"with a caution of 0.75" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](0.75, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit, _) =
key.flatMap(k => rl(k, 4, 1.seconds)).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(6)
ctx.tick(1.seconds)
a mustEqual(8)
}
"do not overwrite configs (use existing config)" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val RateLimiterEffects(limit1, _) = rl(k1, 2, 1.seconds).unsafeRunSync()
val RateLimiterEffects(limit2, _) = rl(k1, 3, 1.seconds).unsafeRunSync()
var a: Int = 0
val run =
limit2 >> IO.delay(a += 1) >>
limit2 >> IO.delay(a += 1) >>
limit2 >> IO.delay(a += 1) >>
limit2 >> IO.delay(a += 1) >>
limit2 >> IO.delay(a += 1) >>
limit2 >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(4)
ctx.tick(1.seconds)
a mustEqual(6)
}
"support two keys on the same schedule" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit1, _) =
key.flatMap(k => rl(k, 2, 1.seconds)).unsafeRunSync()
val RateLimiterEffects(limit2, _) =
key.flatMap(k => rl(k, 3, 1.seconds)).unsafeRunSync()
var a1: Int = 0
var a2: Int = 0
val run1 =
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1)
val run2 =
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1)
run1.unsafeRunAsyncAndForget()
run2.unsafeRunAsyncAndForget()
a1 mustEqual(2)
a2 mustEqual(3)
ctx.tick(1.seconds)
a1 mustEqual(4)
a2 mustEqual(6)
ctx.tick(1.seconds)
a1 mustEqual(6)
a2 mustEqual(8)
}
"support two keys on different schedules" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val RateLimiterEffects(limit1, _) =
key.flatMap(k => rl(k, 2, 1.seconds)).unsafeRunSync()
val RateLimiterEffects(limit2, _) =
key.flatMap(k => rl(k, 2, 2.seconds)).unsafeRunSync()
var a1: Int = 0
var a2: Int = 0
val run1 =
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1) >>
limit1 >> IO.delay(a1 += 1)
val run2 =
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1) >>
limit2 >> IO.delay(a2 += 1)
run1.unsafeRunAsyncAndForget()
run2.unsafeRunAsyncAndForget()
a1 mustEqual(2)
a2 mustEqual(2)
ctx.tick(1.seconds)
a1 mustEqual(4)
a2 mustEqual(2)
ctx.tick(1.seconds)
a1 mustEqual(6)
a2 mustEqual(4)
ctx.tick(1.seconds)
a1 mustEqual(6)
a2 mustEqual(4)
ctx.tick(1.seconds)
a1 mustEqual(6)
a2 mustEqual(6)
}
}
"respect backoff effect" >> {
"backoff for key that hasn't been limited" in {
val ctx = TestContext()
val updater = new TestRateLimitUpdater[UUID]
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, updater)(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val RateLimiterEffects(limit, backoff) = rl(k1, 1, 1.seconds).unsafeRunSync()
var a: Int = 0
val run =
backoff >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(0)
updater.waits must containTheSameElementsAs(List(k1))
ctx.tick(1.seconds)
a mustEqual(1)
updater.waits must containTheSameElementsAs(List(k1, k1))
ctx.tick(1.seconds)
a mustEqual(2)
updater.waits must containTheSameElementsAs(List(k1, k1, k1))
ctx.tick(1.seconds)
a mustEqual(3)
updater.waits must containTheSameElementsAs(List(k1, k1, k1))
}
"backoff state for known key" in {
val ctx = TestContext()
val updater = new TestRateLimitUpdater[UUID]
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, updater)(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val RateLimiterEffects(limit, backoff) = rl(k1, 1, 1.seconds).unsafeRunSync()
var a: Int = 0
val run =
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
backoff >>
limit >> IO.delay(a += 1)
run.unsafeRunAsyncAndForget()
a mustEqual(1)
updater.waits must containTheSameElementsAs(List(k1))
ctx.tick(1.seconds)
a mustEqual(2)
updater.waits must containTheSameElementsAs(List(k1, k1))
ctx.tick(1.seconds)
a mustEqual(3)
updater.waits must containTheSameElementsAs(List(k1, k1))
}
}
"handle wait request" >> {
"wait for unknown key has no effect" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val wait = key.flatMap(k => rl.wait(k, 2.seconds))
val effectsF = key.flatMap(k => rl(k, 1, 1.seconds))
var a: Int = 0
val run =
(wait >> effectsF) flatMap {
case RateLimiterEffects(limit, _) =>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
}
"wait for known but unused key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k: UUID = key.unsafeRunSync()
val wait = rl.wait(k, 2.seconds)
val effectsF = rl(k, 1, 1.seconds)
var a: Int = 0
val run =
(wait >> effectsF) flatMap {
case RateLimiterEffects(limit, _) =>
wait >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(0)
ctx.tick(1.seconds)
a mustEqual(0)
ctx.tick(1.seconds)
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
}
"wait state for known key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k: UUID = key.unsafeRunSync()
val wait = rl.wait(k, 2.seconds)
val effectsF = rl(k, 1, 1.seconds)
var a: Int = 0
val run =
effectsF flatMap {
case RateLimiterEffects(limit, _) =>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
wait >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
}
}
"handle plus one request" >> {
"modify state for unknown key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val key1 = key.unsafeRunSync()
val key2 = key.unsafeRunSync()
val plusOne = rl.plusOne(key1)
val effectsF = rl(key2, 1, 1.seconds)
var a: Int = 0
val run =
effectsF flatMap {
case RateLimiterEffects(limit, _) =>
plusOne >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
}
"modify state for known key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val key1 = key.unsafeRunSync()
val plusOne = rl.plusOne(key1)
val effectsF = rl(key1, 1, 1.seconds)
var a: Int = 0
val run =
effectsF flatMap {
case RateLimiterEffects(limit, _) =>
plusOne >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(0)
ctx.tick(1.seconds)
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
}
}
"handle configure request" >> {
"add config for unknown key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val key1: UUID = key.unsafeRunSync()
val configure = rl.configure(key1, RateLimiterConfig(2, 1.seconds))
val effectsF = rl(key1, 1, 1.seconds)
var a: Int = 0
val run =
(configure >> effectsF) flatMap {
case RateLimiterEffects(limit, _) =>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(4)
}
"ignore config added for known key" in {
val ctx = TestContext()
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, NoopRateLimitUpdater[IO, UUID])(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val key1: UUID = key.unsafeRunSync()
val configure = rl.configure(key1, RateLimiterConfig(2, 1.seconds))
val effectsF = rl(key1, 1, 1.seconds)
var a: Int = 0
val run =
effectsF flatMap {
case RateLimiterEffects(limit, _) =>
configure >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1) >>
limit >> IO.delay(a += 1)
}
run.unsafeRunAsyncAndForget()
a mustEqual(1)
ctx.tick(1.seconds)
a mustEqual(2)
ctx.tick(1.seconds)
a mustEqual(3)
ctx.tick(1.seconds)
a mustEqual(4)
}
}
"send updates through the updater" >> {
"one event per second" in {
val ctx = TestContext()
val updater = new TestRateLimitUpdater[UUID]
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, updater)(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val RateLimiterEffects(limit, _) = rl(k1, 1, 1.seconds).unsafeRunSync()
updater.configs must containTheSameElementsAs(List(k1))
(limit >> limit >> limit).unsafeRunAsyncAndForget()
updater.plusOnes must containTheSameElementsAs(List(k1))
updater.waits must containTheSameElementsAs(List(k1))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k1))
updater.waits must containTheSameElementsAs(List(k1, k1))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k1, k1))
updater.waits must containTheSameElementsAs(List(k1, k1))
}
"two events per second" in {
val ctx = TestContext()
val updater = new TestRateLimitUpdater[UUID]
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, updater)(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val RateLimiterEffects(limit, _) = rl(k1, 2, 1.seconds).unsafeRunSync()
updater.configs must containTheSameElementsAs(List(k1))
(limit >> limit >> limit >> limit).unsafeRunAsyncAndForget()
updater.plusOnes must containTheSameElementsAs(List(k1, k1))
updater.waits must containTheSameElementsAs(List(k1))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k1, k1, k1))
updater.waits must containTheSameElementsAs(List(k1))
}
"two keys" in {
val ctx = TestContext()
val updater = new TestRateLimitUpdater[UUID]
val RateLimiting(rl, key) =
RateLimiter[IO, UUID](1.0, freshKey, updater)(Sync[IO], ctx.timer[IO], Hash[UUID]).unsafeRunSync()
val k1 = key.unsafeRunSync()
val k2 = key.unsafeRunSync()
val RateLimiterEffects(limit1, _) = rl(k1, 1, 1.seconds).unsafeRunSync()
val RateLimiterEffects(limit2, _) = rl(k2, 2, 1.seconds).unsafeRunSync()
updater.configs must containTheSameElementsAs(List(k1, k2))
(limit1 >> limit1 >> limit1 >> limit1).unsafeRunAsyncAndForget()
(limit2 >> limit2 >> limit2 >> limit2).unsafeRunAsyncAndForget()
updater.plusOnes must containTheSameElementsAs(List(k1, k2, k2))
updater.waits must containTheSameElementsAs(List(k1, k2))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k2, k2, k1, k2, k2))
updater.waits must containTheSameElementsAs(List(k1, k2, k1))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k2, k2, k1, k2, k2, k1))
updater.waits must containTheSameElementsAs(List(k1, k2, k1, k1))
ctx.tick(1.seconds)
updater.plusOnes must containTheSameElementsAs(List(k1, k2, k2, k1, k2, k2, k1, k1))
updater.waits must containTheSameElementsAs(List(k1, k2, k1, k1))
}
}
}
}
| slamdata/quasar | foundation/src/test/scala/quasar/RateLimiterSpec.scala | Scala | apache-2.0 | 23,156 |
package pl.touk.nussknacker.engine.flink.test
import com.typesafe.scalalogging.LazyLogging
import org.apache.flink.api.common.{JobExecutionResult, JobID, JobStatus}
import org.apache.flink.configuration._
import org.apache.flink.runtime.execution.ExecutionState
import org.apache.flink.runtime.executiongraph.AccessExecutionGraph
import org.apache.flink.runtime.jobgraph.JobGraph
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.api.graph.StreamGraph
import org.apache.flink.util.OptionalFailure
import org.scalactic.source.Position
import org.scalatest.{Assertion, Matchers}
import org.scalatest.concurrent.Eventually
import pl.touk.nussknacker.engine.flink.test.FlinkMiniClusterHolder.AdditionalEnvironmentConfig
import scala.collection.JavaConverters._
class MiniClusterExecutionEnvironment(flinkMiniClusterHolder: FlinkMiniClusterHolder, userFlinkClusterConfig: Configuration, envConfig: AdditionalEnvironmentConfig) extends StreamExecutionEnvironment
with LazyLogging with Matchers {
// Warning: this method assume that will be one job for all checks inside action. We highly recommend to execute
// job once per test class and then do many concurrent scenarios basing on own unique keys in input.
// Running multiple parallel instances of job in one test class can cause stealing of data from sources between those instances.
def withJobRunning[T](jobName: String)(actionToInvokeWithJobRunning: => T): T = {
val executionResult = executeAndWaitForStart(jobName)
try {
actionToInvokeWithJobRunning
} finally {
stopJob(jobName, executionResult)
}
}
def stopJob[T](jobName: String, executionResult: JobExecutionResult): Unit = {
stopJob(jobName, executionResult.getJobID)
}
def stopJob[T](jobName: String, jobID: JobID): Unit = {
flinkMiniClusterHolder.cancelJob(jobID)
waitForJobState(jobID, jobName, ExecutionState.CANCELED, ExecutionState.FINISHED, ExecutionState.FAILED)()
cleanupGraph()
}
def executeAndWaitForStart[T](jobName: String): JobExecutionResult = {
val res = execute(jobName)
waitForStart(res.getJobID, jobName)()
res
}
def executeAndWaitForFinished[T](jobName: String)(patience: Eventually.PatienceConfig = envConfig.defaultWaitForStatePatience): JobExecutionResult = {
val res = execute(jobName)
waitForJobState(res.getJobID, jobName, ExecutionState.FINISHED)(patience)
res
}
def waitForStart(jobID: JobID, name: String)(patience: Eventually.PatienceConfig = envConfig.defaultWaitForStatePatience): Unit = {
waitForJobState(jobID, name, ExecutionState.RUNNING, ExecutionState.FINISHED)(patience)
}
def waitForJobState(jobID: JobID, name: String, expectedState: ExecutionState*)(patience: Eventually.PatienceConfig = envConfig.defaultWaitForStatePatience): Unit = {
Eventually.eventually {
val executionGraph = flinkMiniClusterHolder.getExecutionGraph(jobID).get()
assertJobInitialized(executionGraph)
val executionVertices = executionGraph.getAllExecutionVertices.asScala
val notRunning = executionVertices.filterNot(v => expectedState.contains(v.getExecutionState))
assert(notRunning.isEmpty, s"Some vertices of $name are still not running: ${notRunning.map(rs => s"${rs.getTaskNameWithSubtaskIndex} - ${rs.getExecutionState}")}")
}(patience, implicitly[Position])
}
def waitForFail(jobID: JobID, name: String)(patience: Eventually.PatienceConfig = envConfig.defaultWaitForStatePatience): Unit = {
waitForJobState(jobID, name, ExecutionState.CANCELED, ExecutionState.FAILED)(patience)
}
//Protected, to be overridden in Flink < 1.13 compatibility layer
protected def assertJobInitialized(executionGraph: AccessExecutionGraph): Assertion = {
assert(executionGraph.getState != JobStatus.INITIALIZING)
}
override def execute(streamGraph: StreamGraph): JobExecutionResult = {
val jobGraph: JobGraph = streamGraph.getJobGraph
logger.debug("Running job on local embedded Flink flinkMiniCluster cluster")
jobGraph.getJobConfiguration.addAll(userFlinkClusterConfig)
// Is passed classloader is ok?
val jobId = flinkMiniClusterHolder.submitJob(jobGraph)
new JobExecutionResult(jobId, 0, new java.util.HashMap[String, OptionalFailure[AnyRef]]())
}
def cancel(jobId: JobID): Unit =
flinkMiniClusterHolder.cancelJob(jobId)
//this *has* to be done between tests, otherwise next .execute() will execute also current operators
def cleanupGraph(): Unit = {
transformations.clear()
}
}
| TouK/nussknacker | engine/flink/test-utils/src/main/scala/pl/touk/nussknacker/engine/flink/test/MiniClusterExecutionEnvironment.scala | Scala | apache-2.0 | 4,581 |
package io.flow.build
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class ApplicationSpec extends AnyFunSpec with Matchers {
it("parse valid strings") {
Application.parse("flow/user") should be(Some(Application("flow", "user", "latest")))
Application.parse(" flow/user ") should be(Some(Application("flow", "user", "latest")))
Application.parse(" flow / user ") should be(Some(Application("flow", "user", "latest")))
Application.parse(" flow / user:1.2.3 ") should be(Some(Application("flow", "user", "1.2.3")))
}
it("parse invalid strings") {
Application.parse("flow") should be(None)
Application.parse(" ") should be(None)
Application.parse("flow/user/bar") should be(None)
}
}
| flowcommerce/api-build | src/test/scala/io/flow/ApplicationSpec.scala | Scala | mit | 769 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.activemq.apollo.util.path
import java.util.HashSet
import java.util.Set
import java.util.SortedSet
import java.util.TreeSet
/**
* A Map-like data structure allowing values to be indexed by
* {@link String} and retrieved by path - supporting both *
* and > style of wildcard as well as composite paths. <br>
* This class assumes that the index changes rarely but that fast lookup into
* the index is required. So this class maintains a pre-calculated index for
* path steps. So looking up the values for "TEST.*" or "*.TEST" will be
* pretty fast. <br>
* Looking up of a value could return a single value or a List of matching
* values if a wildcard or composite path is used.
*
* @version $Revision: 1.3 $
*/
class PathMap[Value] {
/**
* Looks up the value(s) matching the given String key. For simple
* paths this is typically a List of one single value, for wild cards
* or composite paths this will typically be a List of matching
* values.
*
* @param key the path to lookup
* @return a List of matching values or an empty list if there are no
* matching values.
*/
def get(key: Path): Set[Value] = {
return findWildcardMatches(key)
}
def put(key: Path, value: Value): Unit = {
root.add(key, 0, value)
}
/**
* Removes the value from the associated path
*/
def remove(path: Path, value: Value): Boolean = {
return root.remove(path, 0, value)
}
def getRootNode = root
protected def findWildcardMatches(path: Path): Set[Value] = {
var answer: HashSet[Value] = new HashSet[Value]
root.appendMatchingValues(answer, path, 0)
return answer
}
/**
* @param key
* @return
*/
def removeAll(key: Path): Set[Value] = {
var rc: HashSet[Value] = new HashSet[Value]
root.removeAll(rc, key, 0)
return rc
}
/**
* Returns the value which matches the given path or null if there is
* no matching value. If there are multiple values, the results are sorted
* and the last item (the biggest) is returned.
*
* @param path the path to find the value for
* @return the largest matching value or null if no value matches
*/
def chooseValue(path: Path): Value = {
var set: Set[Value] = get(path)
if ((set == null) || set.isEmpty) {
return null.asInstanceOf[Value]
}
var first: Value = set.iterator().next()
if( set.size()==1 || !first.isInstanceOf[java.lang.Comparable[_]]) {
return first;
}
var sortedSet: SortedSet[Value] = new TreeSet[Value](set)
return sortedSet.last
}
private final val root = new PathMapNode[Value](null)
} | chirino/activemq-apollo | apollo-util/src/main/scala/org/apache/activemq/apollo/util/path/PathMap.scala | Scala | apache-2.0 | 3,487 |
package com.github.frankivo
import utest.{TestSuite, Tests, test}
object TestTrappedRainWater extends TestSuite {
val tests: Tests = Tests {
test("Example1") {
val result = TrappedRainWater.getAmount(Array(2, 1, 4, 1, 2, 5))
val expected = 6
assert(expected == result)
}
test("Example2") {
val result = TrappedRainWater.getAmount(Array(3, 1, 3, 1, 1, 5))
val expected = 6
assert(expected == result)
}
}
}
| frankivo/dailyprogrammer | perl79/src/test/scala/com/github/frankivo/TestTrappedRainWater.scala | Scala | gpl-3.0 | 483 |
// Project: angulate2 (https://github.com/jokade/angulate2)
// Description:
// Copyright (c) 2016 Johannes.Kastner <[email protected]>
// Distributed under the MIT License (see included LICENSE file)
package angulate2.core
import angulate2.internal.ClassDecorator
import scala.annotation.StaticAnnotation
class Output extends StaticAnnotation {
def this(bindingPropertyName: String) = this()
}
| jokade/angulate2 | bindings/src/main/scala/angulate2/core/Output.scala | Scala | mit | 422 |
package com.rlazoti.urlshortener.bin
import com.rlazoti.urlshortener.web.services.RoutingService
import com.rlazoti.urlshortener.web.filters.HandleExceptions
import com.twitter.finagle.builder.Server
import com.twitter.finagle.builder.ServerBuilder
import com.twitter.finagle.http.Http
import java.net.InetSocketAddress
object StartServer {
val routingService = new RoutingService
val handleExceptions = new HandleExceptions
val server: Server = ServerBuilder()
.codec(Http())
.bindTo(new InetSocketAddress(10000))
.name("URL Shortener")
.build(handleExceptions andThen routingService)
} | rlazoti/url-shortener | src/main/scala/com/rlazoti/urlshortener/bin/StartServer.scala | Scala | mit | 614 |
package im.actor.server.dialog.group
import akka.actor.{ ActorRef, ActorSystem, Props }
import akka.contrib.pattern.{ ClusterSharding, ShardRegion }
object GroupDialogRegion {
private val idExtractor: ShardRegion.IdExtractor = {
case c: GroupDialogCommand ⇒ (c.dialogId.groupId.toString, c)
}
private val shardResolver: ShardRegion.ShardResolver = msg ⇒ msg match {
case c: GroupDialogCommand ⇒ (c.dialogId.groupId % 100).toString // TODO: configurable
}
val typeName = "GroupDialog"
private def start(props: Option[Props])(implicit system: ActorSystem): GroupDialogRegion =
GroupDialogRegion(ClusterSharding(system).start(
typeName = typeName,
entryProps = props,
idExtractor = idExtractor,
shardResolver = shardResolver
))
def start()(implicit system: ActorSystem): GroupDialogRegion = start(Some(GroupDialog.props))
def startProxy()(implicit system: ActorSystem): GroupDialogRegion =
start(None)
}
case class GroupDialogRegion(ref: ActorRef)
| dsaved/africhat-platform-0.1 | actor-server/actor-core/src/main/scala/im/actor/server/dialog/group/GroupDialogRegion.scala | Scala | mit | 1,019 |
package facebook
object FacebookFixture {
def miniTimeline = """
{
"data": [
{
"id": "644378010_10151239069468011",
"from": {
"name": "Olivier Clavel",
"id": "547334298"
},
"to": {
"data": [
{
"name": "Nicolas Martinez",
"id": "644378010"
}
]
},
"message": "Tiens tiens tiens, j'en apprend des bien bonnes ! Une petite graine planqu\\u00e9e dans un coin, tu pars en vacances sans surveiller, ca germe en douce.... et pof ! un champs de VMs ! Fait gaffe Marty tu perds la main ! ",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/644378010/posts/10151239069468011"
},
{
"name": "Like",
"link": "https://www.facebook.com/644378010/posts/10151239069468011"
}
],
"type": "status",
"status_type": "wall_post",
"created_time": "2012-11-10T15:33:09+0000",
"updated_time": "2012-11-10T15:33:09+0000",
"likes": {
"data": [
{
"name": "Vincent Therry",
"id": "1255939193"
}
],
"count": 1
},
"comments": {
"count": 0
}
}
]
}
"""
def timeline = """
{
"data": [
{
"id": "644378010_10151239069468011",
"from": {
"name": "Olivier Clavel",
"id": "547334298"
},
"to": {
"data": [
{
"name": "Nicolas Martinez",
"id": "644378010"
}
]
},
"message": "Tiens tiens tiens, j'en apprend des bien bonnes ! Une petite graine planqu\\u00e9e dans un coin, tu pars en vacances sans surveiller, ca germe en douce.... et pof ! un champs de VMs ! Fait gaffe Marty tu perds la main ! ",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/644378010/posts/10151239069468011"
},
{
"name": "Like",
"link": "https://www.facebook.com/644378010/posts/10151239069468011"
}
],
"type": "status",
"status_type": "wall_post",
"created_time": "2012-11-10T15:33:09+0000",
"updated_time": "2012-11-10T15:33:09+0000",
"likes": {
"data": [
{
"name": "Vincent Therry",
"id": "1255939193"
}
],
"count": 1
},
"comments": {
"count": 0
}
},
{
"id": "303282426390498_471401779578343",
"from": {
"name": "R.I.P Megaupload.",
"category": "Cause",
"id": "303282426390498"
},
"message": "Paranormal Activity 4\\nAvec Katie Featherston, Kathryn Newton, Matt Shively.\\n\\nAvertissement : des sc\\u00e8nes, des propos ou des images peuvent heurter la sensibilit\\u00e9 des spectateurs",
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQDwXp-G2M30Y3U1&w=90&h=90&url=http\\u00253A\\u00252F\\u00252Fimages.allocine.fr\\u00252Fr_160_240\\u00252Fb_1_d6d6d6\\u00252Fmedias\\u00252Fnmedia\\u00252F18\\u00252F92\\u00252F36\\u00252F27\\u00252F20250875.jpg",
"link": "http://cinetube.pl/cinetube/paranormal-activity-4/",
"name": "Paranormal Activity 4 | Cinetube : films gratuits en streaming",
"caption": "cinetube.pl",
"description": "Avec Katie Featherston, Kathryn Newton, Matt Shively.\\n\\nAvertissement : des sc\\u00e8nes, des propos ou des images peuvent heurter la sensibilit\\u00e9 des spectateurs\\n\\nSynopsis :\\nLe quatri\\u00e8me volet de la saga Paranormal Activity.\\n\\nH\\u00e9bergeur : Youwatch",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yD/r/aS8ecmYRys0.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/303282426390498/posts/471401779578343"
},
{
"name": "Like",
"link": "https://www.facebook.com/303282426390498/posts/471401779578343"
}
],
"type": "link",
"status_type": "shared_story",
"created_time": "2012-11-10T15:17:36+0000",
"updated_time": "2012-11-10T15:39:56+0000",
"likes": {
"data": [
{
"name": "Laurent Cousseau",
"id": "1192245454"
},
{
"name": "Sims Giliens",
"id": "1387403592"
},
{
"name": "Audrey Sanders",
"id": "100002531618946"
},
{
"name": "Guillaume Duchesne",
"id": "571716852"
}
],
"count": 12
},
"comments": {
"data": [
{
"id": "303282426390498_471401779578343_5065250",
"from": {
"name": "Emilie Malli",
"id": "526066052"
},
"message": "Yeah !",
"created_time": "2012-11-10T15:21:03+0000"
},
{
"id": "303282426390498_471401779578343_5065350",
"from": {
"name": "Nicolas Souchet",
"id": "1293321002"
},
"message": "le pire des 4...",
"created_time": "2012-11-10T15:39:11+0000",
"likes": 1
},
{
"id": "303282426390498_471401779578343_5065355",
"from": {
"name": "Brice Boyer",
"id": "1489162197"
},
"message": "ca fonctionne pas :(",
"created_time": "2012-11-10T15:39:56+0000",
"likes": 2
}
],
"count": 3
}
},
{
"id": "20069718524_10151125264438525",
"from": {
"name": "R\\u00e9mi GAILLARD",
"category": "Actor/director",
"id": "20069718524"
},
"story": "R\\u00e9mi GAILLARD updated their cover photo.",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-prn1/539602_10151125264378525_1321793325_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=10151125264378525&set=a.10150583340308525.380594.20069718524&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/20069718524/posts/10151125264438525"
},
{
"name": "Like",
"link": "https://www.facebook.com/20069718524/posts/10151125264438525"
}
],
"type": "photo",
"object_id": "10151125264378525",
"created_time": "2012-11-10T15:01:34+0000",
"updated_time": "2012-11-10T15:01:34+0000",
"likes": {
"data": [
{
"name": "Gioele Andreolli",
"id": "100003333583082"
},
{
"name": "Dimitri Linares",
"id": "780181939"
},
{
"name": "Pat Knight",
"id": "100001349356784"
},
{
"name": "Elliot Saucier",
"id": "100001884250625"
}
],
"count": 1216
},
"comments": {
"count": 47
}
},
{
"id": "1001271820_367966653296185",
"from": {
"name": "Rudolph F\\u00e9licit\\u00e9",
"id": "1001271820"
},
"story": "Rudolph F\\u00e9licit\\u00e9 shared a link.",
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQAqnhN0ZJQjE1DO&w=90&h=90&url=http\\u00253A\\u00252F\\u00252Fwww.nokenny.com\\u00252FIMG\\u00252F201211101017065_mini.jpeg",
"link": "http://www.nokenny.com/fun/658",
"name": "Logique f\\u00e9minine",
"caption": "www.nokenny.com",
"description": "NoKenny : votre dose quotidienne d'humour en images et vid\\u00e9os",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yD/r/aS8ecmYRys0.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/1001271820/posts/367966653296185"
},
{
"name": "Like",
"link": "https://www.facebook.com/1001271820/posts/367966653296185"
}
],
"type": "link",
"status_type": "shared_story",
"created_time": "2012-11-10T14:58:29+0000",
"updated_time": "2012-11-10T14:58:29+0000",
"likes": {
"data": [
{
"name": "Seb Sebson",
"id": "100001552920967"
},
{
"name": "Florence Borquet",
"id": "100004191066955"
}
],
"count": 2
},
"comments": {
"count": 0
}
},
{
"id": "139895946066051_429172847138358",
"from": {
"name": "Norman fait des vid\\u00e9os",
"category": "Actor/director",
"id": "139895946066051"
},
"message": "Tr\\u00e8s fier de vous annoncer mon tout 1er film !! r\\u00e9alis\\u00e9 par Maurice Barth\\u00e9lemy :) C'est pour fin janvier 2013... juste apr\\u00e8s la fin du monde !",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash4/486370_429172083805101_1305729581_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=429172083805101&set=a.163178217071157.40873.139895946066051&type=1&relevant_count=1",
"name": "Presse \\u00e0 scandale",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/139895946066051/posts/429172847138358"
},
{
"name": "Like",
"link": "https://www.facebook.com/139895946066051/posts/429172847138358"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "429172083805101",
"created_time": "2012-11-10T14:21:59+0000",
"updated_time": "2012-11-10T14:21:59+0000",
"shares": {
"count": 68
},
"likes": {
"data": [
{
"name": "Cindy Hoffman",
"id": "100004077413572"
},
{
"name": "Louise Grau",
"id": "100002841084098"
},
{
"name": "Chlo\\u00e9 Terri\\u00e9",
"id": "100000677792937"
},
{
"name": "Daria Santoni",
"id": "100003191679257"
}
],
"count": 8459
},
"comments": {
"count": 351
}
},
{
"id": "138422716191215_486205071412976",
"from": {
"name": "La France a un incroyable talent",
"category": "Tv show",
"id": "138422716191215"
},
"message": "Quels candidats auriez-vous s\\u00e9lectionn\\u00e9s lors de la derni\\u00e8re \\u00e9mission ? Donnez votre avis sur le lien suivant =>http://www.m6.fr/emission-la_france_a_un_incroyable_talent/sondage/quels-talents-auriez-vous-selectionnes-7389.html",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-snc7/406846_486205054746311_1156917799_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=486205054746311&set=a.169226466444173.35482.138422716191215&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/138422716191215/posts/486205071412976"
},
{
"name": "Like",
"link": "https://www.facebook.com/138422716191215/posts/486205071412976"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "486205054746311",
"created_time": "2012-11-10T13:54:28+0000",
"updated_time": "2012-11-10T13:54:28+0000",
"likes": {
"data": [
{
"name": "Eleonore Rapp",
"id": "1388315611"
},
{
"name": "S\\u00e9verine Boudouin",
"id": "100000887495230"
},
{
"name": "Nicole Tournesol Kuan",
"id": "1285995566"
},
{
"name": "Tribout Oceane",
"id": "100003189196939"
}
],
"count": 230
},
"comments": {
"count": 34
}
},
{
"id": "122695097770591_497044343668996",
"from": {
"name": "Decathlon France",
"category": "Retail and consumer merchandise",
"id": "122695097770591"
},
"message": "C'est parti pour le Vend\\u00e9e Globe ! Est ce que vous allez suivre cette 7\\u00e8me \\u00e9dition ? Suivez l'\\u00e9v\\u00e9nement i\\u00e7i : http://www.vendeeglobe.org/fr/",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash4/374508_497044333668997_1528632177_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=497044333668997&set=a.157400717633362.30592.122695097770591&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/122695097770591/posts/497044343668996"
},
{
"name": "Like",
"link": "https://www.facebook.com/122695097770591/posts/497044343668996"
}
],
"place": {
"id": "108583222500110",
"name": "Les Sables-d'Olonne",
"location": {
"latitude": 46.499129476796,
"longitude": -1.7811899809826
}
},
"type": "photo",
"status_type": "added_photos",
"object_id": "497044333668997",
"created_time": "2012-11-10T13:46:22+0000",
"updated_time": "2012-11-10T15:02:02+0000",
"shares": {
"count": 11
},
"likes": {
"data": [
{
"name": "Christophe Duhamel",
"id": "100000172644378"
},
{
"name": "Emmanuel Sautebin",
"id": "100003414063949"
},
{
"name": "Brigitte Errami-Milhamont",
"id": "100000379503023"
},
{
"name": "Guillaume Cr\\u00e9pin",
"id": "100004169610285"
}
],
"count": 156
},
"comments": {
"data": [
{
"id": "122695097770591_497044343668996_90861514",
"from": {
"name": "Olivier Windwehr",
"id": "833739928"
},
"message": "Je remets pas en cause les capacit\\u00e9s, qualit\\u00e9s, comp\\u00e9tences humaines et techniques de ce qui va \\u00eatre accompli, mais pour moi c'est un non \\u00e9v\\u00e9nement auquel je suis compl\\u00e8tement herm\\u00e9tique. Chacun son truc :) ",
"created_time": "2012-11-10T13:51:46+0000",
"likes": 1
},
{
"id": "122695097770591_497044343668996_90861538",
"from": {
"name": "Virginie Bonaventure",
"id": "1717836912"
},
"message": "On en sera!!!! Allez Bureau Vall\\u00e9e et bon vent \\u00e0 la seule femme de cette course : Samantha Davies!!!",
"created_time": "2012-11-10T14:09:21+0000",
"likes": 1
},
{
"id": "122695097770591_497044343668996_90861622",
"from": {
"name": "Laurent Dherbecourt",
"id": "100000329760953"
},
"message": "bon courage",
"created_time": "2012-11-10T15:02:02+0000"
}
],
"count": 3
}
},
{
"id": "14696440021_10152266083570022",
"from": {
"name": "Mozilla Firefox",
"category": "Product/service",
"id": "14696440021"
},
"message": "In less than 20 years the Web has changed our lives. How as the Internet changed your life?",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/14696440021/posts/10152266083570022"
},
{
"name": "Like",
"link": "https://www.facebook.com/14696440021/posts/10152266083570022"
}
],
"type": "status",
"status_type": "mobile_status_update",
"created_time": "2012-11-10T13:42:47+0000",
"updated_time": "2012-11-10T13:42:47+0000",
"shares": {
"count": 38
},
"likes": {
"data": [
{
"name": "Shosho Miya",
"id": "100002386051795"
},
{
"name": "Syed Abu Dahir Ali",
"id": "100001790398085"
},
{
"name": "Robin Tiongson",
"id": "100001631580311"
},
{
"name": "Author Deborah Lane",
"id": "100000052780336"
}
],
"count": 2231
},
"comments": {
"count": 552
}
},
{
"id": "128943287142362_124318551055997",
"from": {
"name": "La Geekerie",
"category": "Company",
"id": "128943287142362"
},
"message": "http://lageekerie.com/mag/la-bande-annonce-de-world-war-z/",
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQCAmQUGp75dSAHO&w=90&h=90&url=http\\u00253A\\u00252F\\u00252Flageekerie.com\\u00252Fmag\\u00252Fwp-content\\u00252Fuploads\\u00252F2012\\u00252F11\\u00252Fworld-war-z-look.jpg",
"link": "http://lageekerie.com/mag/la-bande-annonce-de-world-war-z/",
"name": "Bande-annonce de World War Z",
"caption": "lageekerie.com",
"description": " Alors que son tournage a \\u00e9t\\u00e9 notoirement perturb\\u00e9 entre multiples r\\u00e9\\u00e9critures du sc\\u00e9nario, date de sortie repouss\\u00e9e et conflit entre Brad Pit ",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yD/r/aS8ecmYRys0.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/128943287142362/posts/124318551055997"
},
{
"name": "Like",
"link": "https://www.facebook.com/128943287142362/posts/124318551055997"
}
],
"type": "link",
"status_type": "shared_story",
"created_time": "2012-11-10T13:32:43+0000",
"updated_time": "2012-11-10T13:32:43+0000",
"shares": {
"count": 1
},
"likes": {
"data": [
{
"name": "Holly Shyt",
"id": "1137647608"
},
{
"name": "Kris Stark",
"id": "100000101874324"
},
{
"name": "William Faust Lef\\u00e8vre",
"id": "1347236197"
},
{
"name": "St\\u00e9phane Wantiez",
"id": "698421819"
}
],
"count": 12
},
"comments": {
"count": 0
}
},
{
"id": "658009908_385811558163608",
"from": {
"name": "Hugues Zugus Pedreno",
"id": "658009908"
},
"message": "Bien jou\\u00e9 l'agglo !",
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQATChiCg8TVTJa9&w=90&h=90&url=http\\u00253A\\u00252F\\u00252Fimages.midilibre.fr\\u00252Fimages\\u00252F2012\\u00252F11\\u00252F09\\u00252Fune-delegation-de-commercants-s-est-presentee-hier-a-la_472646_510x255.jpg",
"link": "http://www.midilibre.fr/2012/11/09/la-ou-ils-payaient-385-eur-on-leur-reclame-2-546-eur,591640.php",
"name": "Taxes : les commer\\u00e7ants payaient 385 \\u20ac, on leur r\\u00e9clame 2 546 \\u20ac",
"caption": "www.midilibre.fr",
"description": "Les commer\\u00e7ants de l'Agglo sont fous de rage \\u00e0 la suite d'une augmentation de taxe. Pour une petite entreprise, le montant moyen annuel d\\u2019imposition passe de 385 \\u20ac \\u00e0 2 546 \\u20ac. Un v\\u00e9ritable choc...",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yD/r/aS8ecmYRys0.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/658009908/posts/385811558163608"
},
{
"name": "Like",
"link": "https://www.facebook.com/658009908/posts/385811558163608"
}
],
"type": "link",
"status_type": "shared_story",
"application": {
"name": "Links",
"id": "2309869772"
},
"created_time": "2012-11-10T13:08:58+0000",
"updated_time": "2012-11-10T13:08:58+0000",
"likes": {
"data": [
{
"name": "Aurelie Pedreno",
"id": "1611251722"
}
],
"count": 1
},
"comments": {
"count": 0
}
},
{
"id": "100002361816174_373992392689485",
"from": {
"name": "Richard Von Sternberg",
"id": "100002361816174"
},
"icon": "https://fbcdn-photos-a.akamaihd.net/photos-ak-snc7/v85005/77/405037082898329/app_101_405037082898329_1432244304.png",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/100002361816174/posts/373992392689485"
},
{
"name": "Like",
"link": "https://www.facebook.com/100002361816174/posts/373992392689485"
},
{
"name": "S'inscrire \\u00e0 Spotify",
"link": "http://www.spotify.com/redirect/download-social"
}
],
"type": "link",
"status_type": "app_created_story",
"application": {
"name": "Spotify",
"namespace": "get-spotify",
"id": "174829003346"
},
"created_time": "2012-11-10T13:06:16+0000",
"updated_time": "2012-11-10T13:06:16+0000",
"comments": {
"count": 0
}
},
{
"id": "222008437823118_372543432834497",
"from": {
"name": "Yann Barth\\u00e8s",
"category": "Public figure",
"id": "222008437823118"
},
"to": {
"data": [
{
"name": "Gangnam Style",
"category": "Music video",
"id": "323448221082896"
}
]
},
"message": "Quand Catherine et Liliane d\\u00e9couvrent le Gangnam Style...",
"message_tags": {
"41": [
{
"id": "323448221082896",
"name": "Gangnam Style",
"type": "page",
"offset": 41,
"length": 13
}
]
},
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQCtFKiQyB9U5V3o&w=130&h=130&url=http\\u00253A\\u00252F\\u00252Fmedia.canal-plus.com\\u00252Fwwwplus\\u00252Fimage\\u00252F4\\u00252F12\\u00252F6\\u00252FLE_PETIT_JOURNAL_LA_REVUE_DE_PRESSE_DE_CATHERINE_ET_ELIANE_121109_CAN_294409_image_H.jpg",
"link": "http://www.canalplus.fr/c-divertissement/pid3351-le-petit-journal.html?vid=762039",
"source": "http://player.canalplus.fr/embed/flash/player.swf?param=facebook&videoId=762039",
"name": "Le Petit Journal du 09/11 - La revue de presse de Catherine et Liliane",
"description": "Retrouvez une fois par semaine une revue de presse d\\u00e9cal\\u00e9e !",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yj/r/v2OnaTyTQZE.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/222008437823118/posts/372543432834497"
},
{
"name": "Like",
"link": "https://www.facebook.com/222008437823118/posts/372543432834497"
}
],
"type": "video",
"status_type": "shared_story",
"created_time": "2012-11-10T13:05:12+0000",
"updated_time": "2012-11-10T13:05:12+0000",
"shares": {
"count": 78
},
"likes": {
"data": [
{
"name": "Sylvain E Kouassi",
"id": "100001921413225"
},
{
"name": "Ch\\u00e9rie Coco Strazzieri",
"id": "1184356078"
},
{
"name": "Marcia Arm",
"id": "1088051718"
},
{
"name": "Andr\\u00e9a Mimi",
"id": "100000635215198"
}
],
"count": 298
},
"comments": {
"count": 38
}
},
{
"id": "205852812128_462301370500698",
"from": {
"name": "Oreille malade",
"category": "Health/wellness website",
"id": "205852812128"
},
"message": "Quoi de neuf cette semaine ? 10/11/2012",
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQCMUnp1yxX2O6Aa&w=90&h=90&url=http\\u00253A\\u00252F\\u00252Fstatic.ladepeche.fr\\u00252Fcontent\\u00252Fphoto\\u00252Fbiz\\u00252F2012\\u00252F11\\u00252F05\\u00252F201211051446_w350.jpg",
"link": "http://eepurl.com/rCTMz",
"name": "Quoi de neuf cette semaine ? 10/11/2012",
"caption": "us2.campaign-archive2.com",
"description": "\\"thank you for your e-mail and your interest in our tinnitus-therapy. ANM is a very young company that launched the CR\\u00ae-Neurostimulation-Therapy 2 \\u00bd year ago in Germany. [\\u2026]",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yD/r/aS8ecmYRys0.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/205852812128/posts/462301370500698"
},
{
"name": "Like",
"link": "https://www.facebook.com/205852812128/posts/462301370500698"
}
],
"type": "link",
"status_type": "shared_story",
"application": {
"name": "MailChimp",
"namespace": "mailchimp",
"id": "100265896690345"
},
"created_time": "2012-11-10T13:03:28+0000",
"updated_time": "2012-11-10T13:03:28+0000",
"likes": {
"data": [
{
"name": "S\\u00e9verine Leclercq",
"id": "581478874"
}
],
"count": 1
},
"comments": {
"count": 0
}
},
{
"id": "122898471086693_285018851617392",
"from": {
"name": "Le Petit Journal",
"category": "Tv show",
"id": "122898471086693"
},
"to": {
"data": [
{
"name": "Barack Obama",
"category": "Politician",
"id": "6815841748"
}
]
},
"message": "Catherine et Liliane reviennent sur l'\\u00e9lection de Barack Obama dans leur revue de presse hebdomadaire !",
"message_tags": {
"50": [
{
"id": "6815841748",
"name": "Barack Obama",
"type": "page",
"offset": 50,
"length": 12
}
]
},
"picture": "https://fbexternal-a.akamaihd.net/safe_image.php?d=AQCtFKiQyB9U5V3o&w=130&h=130&url=http\\u00253A\\u00252F\\u00252Fmedia.canal-plus.com\\u00252Fwwwplus\\u00252Fimage\\u00252F4\\u00252F12\\u00252F6\\u00252FLE_PETIT_JOURNAL_LA_REVUE_DE_PRESSE_DE_CATHERINE_ET_ELIANE_121109_CAN_294409_image_H.jpg",
"link": "http://www.canalplus.fr/c-divertissement/pid3351-le-petit-journal.html?vid=762039",
"source": "http://player.canalplus.fr/embed/flash/player.swf?param=facebook&videoId=762039",
"name": "Le Petit Journal du 09/11 - La revue de presse de Catherine et Liliane",
"description": "Retrouvez une fois par semaine une revue de presse d\\u00e9cal\\u00e9e !",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yj/r/v2OnaTyTQZE.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/122898471086693/posts/285018851617392"
},
{
"name": "Like",
"link": "https://www.facebook.com/122898471086693/posts/285018851617392"
}
],
"type": "video",
"status_type": "shared_story",
"created_time": "2012-11-10T13:02:46+0000",
"updated_time": "2012-11-10T16:00:47+0000",
"shares": {
"count": 36
},
"likes": {
"data": [
{
"name": "J\\u00e9r\\u00e9my Dupont",
"id": "1190807569"
},
{
"name": "Alexandre Prvst",
"id": "100004461606279"
},
{
"name": "Jack Oneil",
"id": "100004450600996"
},
{
"name": "Emmanuelle Delasalle",
"id": "789612631"
}
],
"count": 230
},
"comments": {
"data": [
{
"id": "122898471086693_285018851617392_1261655",
"from": {
"name": "Zo\\u00e9 Sfez",
"id": "695695920"
},
"message": "Compliqu\\u00e9 de les suivre , il faut d'une part maitriser les r\\u00e9f\\u00e9rences, s'adapter au rythme qui est tr\\u00e8s rapide, et ensuite appr\\u00e9cier cet humour du huiti\\u00e8me degr\\u00e9. Mais pour moi c'est l'un des concepts les plus brillants et les plus dr\\u00f4les du PAF.",
"created_time": "2012-11-10T15:49:16+0000"
},
{
"id": "122898471086693_285018851617392_1261680",
"from": {
"name": "Mihoub Melon P\\u00e9p\\u00e9n\\u00e9",
"id": "625272524"
},
"message": "Bravo pour toi et ta maitrise si tu trouve que c'est le meilleur humour du \\" PAF \\", tu connais pas les guignols alors :) ils mettent \\u00e7a le vendredi histoire de meubler l'\\u00e9mission parceque c'est pas du direct.",
"created_time": "2012-11-10T16:00:47+0000"
}
],
"count": 25
}
},
{
"id": "1453312287_4533446901581",
"from": {
"name": "Aude Corbi",
"id": "1453312287"
},
"message": "Ma merveille, mon Karolin, ma Dou' \\u003C3",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-prn1/63130_4533446741577_514010385_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=4533446741577&set=a.4152631021422.167821.1453312287&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yx/r/og8V99JVf8G.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/1453312287/posts/4533446901581"
},
{
"name": "Like",
"link": "https://www.facebook.com/1453312287/posts/4533446901581"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "4533446741577",
"created_time": "2012-11-10T11:20:52+0000",
"updated_time": "2012-11-10T11:51:15+0000",
"likes": {
"data": [
{
"name": "Elodie Massit",
"id": "1185668427"
},
{
"name": "Alexia Abram",
"id": "1107144799"
},
{
"name": "Michele Corbi",
"id": "1264268746"
},
{
"name": "M\\u00e9lanie Jacob",
"id": "759712569"
}
],
"count": 10
},
"comments": {
"data": [
{
"id": "1453312287_4533446901581_2761489",
"from": {
"name": "Emmanuelle Alcina",
"id": "100000888871221"
},
"message": "la plus belle \\u003C3",
"created_time": "2012-11-10T11:35:25+0000"
},
{
"id": "1453312287_4533446901581_2761494",
"from": {
"name": "Emmanuelle Alcina",
"id": "100000888871221"
},
"message": "je t'ai pique la photo!!!!",
"created_time": "2012-11-10T11:36:51+0000"
},
{
"id": "1453312287_4533446901581_2761510",
"from": {
"name": "Elodie Massit",
"id": "1185668427"
},
"message": "Magnifique ta morue :)",
"created_time": "2012-11-10T11:51:15+0000"
}
],
"count": 3
}
},
{
"id": "742154418_10151249587829419",
"from": {
"name": "Aur\\u00e9lien Kohler",
"id": "742154418"
},
"story": "Aur\\u00e9lien Kohler added a new photo.",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-prn1/12693_10151249587794419_1904309148_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=10151249587794419&set=p.10151249587794419&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/742154418/posts/10151249587829419"
},
{
"name": "Like",
"link": "https://www.facebook.com/742154418/posts/10151249587829419"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "10151249587794419",
"application": {
"name": "iOS",
"id": "213546525407071"
},
"created_time": "2012-11-10T10:05:44+0000",
"updated_time": "2012-11-10T10:05:44+0000",
"likes": {
"data": [
{
"name": "Rami Bin Moslih",
"id": "569715836"
},
{
"name": "Julie Vignolle",
"id": "1296901161"
}
],
"count": 2
},
"comments": {
"count": 0
}
},
{
"id": "100001012614817_461908520519564",
"from": {
"name": "Celine Gea Milhet",
"id": "100001012614817"
},
"story": "Celine Gea Milhet added a new photo.",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash3/522422_461908507186232_1850801_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=461908507186232&set=a.460458997331183.103074.100001012614817&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/100001012614817/posts/461908520519564"
},
{
"name": "Like",
"link": "https://www.facebook.com/100001012614817/posts/461908520519564"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "461908507186232",
"created_time": "2012-11-10T09:59:39+0000",
"updated_time": "2012-11-10T13:50:30+0000",
"likes": {
"data": [
{
"name": "Audrey Gueton",
"id": "1058204318"
},
{
"name": "Melinda Guilbert",
"id": "1543400289"
},
{
"name": "Laure P\\u00e9m\\u00e9ant-ouros",
"id": "1442381730"
},
{
"name": "Annie Assaleix",
"id": "1509752415"
}
],
"count": 8
},
"comments": {
"data": [
{
"id": "100001012614817_461908520519564_1355336",
"from": {
"name": "Sabine Coge",
"id": "100000362795669"
},
"message": "en vacances ?",
"created_time": "2012-11-10T10:38:58+0000"
},
{
"id": "100001012614817_461908520519564_1355395",
"from": {
"name": "Marie-h\\u00e9l\\u00e8ne Barri\\u00e8re",
"id": "100000216816801"
},
"message": "vous allez ou com \\u00e7a petites cachotti\\u00e8re ???",
"created_time": "2012-11-10T11:50:50+0000"
},
{
"id": "100001012614817_461908520519564_1355514",
"from": {
"name": "Celine Gea Milhet",
"id": "100001012614817"
},
"message": "C'\\u00e9tait fin octobre les filles.",
"created_time": "2012-11-10T13:48:40+0000"
},
{
"id": "100001012614817_461908520519564_1355515",
"from": {
"name": "Sabine Coge",
"id": "100000362795669"
},
"message": "ok",
"created_time": "2012-11-10T13:49:33+0000"
},
{
"id": "100001012614817_461908520519564_1355516",
"from": {
"name": "Celine Gea Milhet",
"id": "100001012614817"
},
"message": "oups fin septembre !!",
"created_time": "2012-11-10T13:50:30+0000",
"likes": 1
}
],
"count": 5
}
},
{
"id": "128943287142362_450039685032719",
"from": {
"name": "La Geekerie",
"category": "Company",
"id": "128943287142362"
},
"message": "http://lageekerie.com/mag/maquillage-comics-geek-glamour/\\r\\n\\r\\nMaquillage Comics : quand le geek devient glamour !",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-snc7/311423_450039675032720_1599944291_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=450039675032720&set=a.129623390407685.9984.128943287142362&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/128943287142362/posts/450039685032719"
},
{
"name": "Like",
"link": "https://www.facebook.com/128943287142362/posts/450039685032719"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "450039675032720",
"created_time": "2012-11-10T09:37:15+0000",
"updated_time": "2012-11-10T09:37:15+0000",
"shares": {
"count": 2
},
"likes": {
"data": [
{
"name": "Marie Bert",
"id": "1152897409"
},
{
"name": "Milie Bounette",
"id": "100002285140048"
},
{
"name": "Aur\\u00e9lia Castella",
"id": "1204128035"
},
{
"name": "S\\u00e9verine Braamichoukette Adams",
"id": "846884795"
}
],
"count": 70
},
"comments": {
"count": 0
}
},
{
"id": "113890368647890_447875111916079",
"from": {
"name": "BERNARD WERBER OFFICIEL",
"category": "Author",
"id": "113890368647890"
},
"message": "Aujourd'hui, samedi 10 nov, je pars pour le Salon du Livre de Brives. Donc RDV l\\u00e0 bas pour ceux qui sont dans le coin...",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/113890368647890/posts/447875111916079"
},
{
"name": "Like",
"link": "https://www.facebook.com/113890368647890/posts/447875111916079"
}
],
"type": "status",
"status_type": "mobile_status_update",
"created_time": "2012-11-10T06:32:09+0000",
"updated_time": "2012-11-10T06:32:09+0000",
"shares": {
"count": 2
},
"likes": {
"data": [
{
"name": "Dominique Plante",
"id": "824864164"
},
{
"name": "Nathalie Houbart",
"id": "1200025029"
},
{
"name": "Justine M\\u00e9noire",
"id": "100001158047738"
},
{
"name": "Marjorie Micheu",
"id": "1252963422"
}
],
"count": 282
},
"comments": {
"count": 53
}
},
{
"id": "100000470052866_547106201981710",
"from": {
"name": "Vivien Grampeau Denizart",
"id": "100000470052866"
},
"message": "dernier album de Christina... \\u003C3 !!",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/100000470052866/posts/547106201981710"
},
{
"name": "Like",
"link": "https://www.facebook.com/100000470052866/posts/547106201981710"
}
],
"type": "status",
"status_type": "mobile_status_update",
"created_time": "2012-11-10T06:28:14+0000",
"updated_time": "2012-11-10T06:28:14+0000",
"comments": {
"count": 0
}
},
{
"id": "1185668427_4779668048035",
"from": {
"name": "Elodie Massit",
"id": "1185668427"
},
"message": "Pourquoi le monde est peupl\\u00e9 de gens faux et int\\u00e9ress\\u00e9 ? Pk? Pk? Pk? .... C'est nul...",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/1185668427/posts/4779668048035"
},
{
"name": "Like",
"link": "https://www.facebook.com/1185668427/posts/4779668048035"
}
],
"type": "status",
"status_type": "mobile_status_update",
"application": {
"name": "Facebook for iPhone",
"namespace": "fbiphone",
"id": "6628568379"
},
"created_time": "2012-11-10T06:23:58+0000",
"updated_time": "2012-11-10T13:18:47+0000",
"likes": {
"data": [
{
"name": "Claire Muller",
"id": "1675753060"
},
{
"name": "Sophie Maniez",
"id": "1353098081"
},
{
"name": "Fanny Bernard",
"id": "1177693303"
},
{
"name": "Angel Manning",
"id": "100000717527625"
}
],
"count": 15
},
"comments": {
"data": [
{
"id": "1185668427_4779668048035_5592700",
"from": {
"name": "Lucie-faye Mory",
"id": "745901759"
},
"message": "Tout a fais daccord avec toi",
"created_time": "2012-11-10T07:16:23+0000",
"likes": 1
},
{
"id": "1185668427_4779668048035_5592860",
"from": {
"name": "Monique Brun Massit",
"id": "100000677544538"
},
"message": "ma pauvre ch\\u00e9rie nous sommes dans un monde de charognards et de faux-culs !!! les gens bien sont tr\\u00e8s rares et tr\\u00e8s pr\\u00e9cieux .... mais trace ton chemin et ne perds pas ton temps avec ceux qui n'en valent pas la peine , tu es trop bien pour eux ! bisous",
"created_time": "2012-11-10T08:45:35+0000",
"likes": 3
},
{
"id": "1185668427_4779668048035_5593284",
"from": {
"name": "Amandine Aubert",
"id": "1624694148"
},
"message": "Malheureusement, on en fait tous les frais un jour ou lautre. Ms tt le monde nest pas comme \\u00e7a:)",
"created_time": "2012-11-10T13:18:47+0000",
"likes": 2
}
],
"count": 3
}
},
{
"id": "5510679098_10151225166484099",
"from": {
"name": "Startup Weekend",
"category": "Non-profit organization",
"id": "5510679098"
},
"to": {
"data": [
{
"name": "Global Startup Battle",
"category": "Non-profit organization",
"id": "162695083755807"
}
]
},
"message": "Global Startup Battle is taking over the world right now! 'Like' this is if you're at an event right now having an amazing time with some awesome people!\\r\\n\\r\\nResources for following #GSB2012 on Twitter:\\r\\n\\r\\n1. http://www.tweetbeam.com/show?id=f40jT\\r\\n2. http://tagboard.com/gsb2012\\r\\n3. http://owl.li/faSIE",
"message_tags": {
"0": [
{
"id": "162695083755807",
"name": "Global Startup Battle",
"type": "page",
"offset": 0,
"length": 21
}
]
},
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-ash3/178968_10151225166469099_2006989381_s.png",
"link": "https://www.facebook.com/photo.php?fbid=10151225166469099&set=a.10150145730999099.301829.5510679098&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/5510679098/posts/10151225166484099"
},
{
"name": "Like",
"link": "https://www.facebook.com/5510679098/posts/10151225166484099"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "10151225166469099",
"created_time": "2012-11-10T01:46:28+0000",
"updated_time": "2012-11-10T01:46:28+0000",
"shares": {
"count": 12
},
"likes": {
"data": [
{
"name": "Luis A. Mercado",
"id": "512056846"
},
{
"name": "Zineb Rharrasse",
"id": "735584891"
},
{
"name": "Sergio P Ferreira",
"id": "1551815947"
},
{
"name": "Andrew Angus",
"id": "731860625"
}
],
"count": 32
},
"comments": {
"count": 0
}
},
{
"id": "82771544063_10151192200819064",
"from": {
"name": "Avatar",
"category": "Movie",
"id": "82771544063"
},
"message": "Here is a picture of Jorge Omastott in his Avatar form. What would your Avatar look like?",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-prn1/552390_10151192200764064_409479464_s.jpg",
"link": "https://www.facebook.com/photo.php?fbid=10151192200764064&set=a.131878604063.108571.82771544063&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/82771544063/posts/10151192200819064"
},
{
"name": "Like",
"link": "https://www.facebook.com/82771544063/posts/10151192200819064"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "10151192200764064",
"created_time": "2012-11-10T01:41:09+0000",
"updated_time": "2012-11-10T01:41:09+0000",
"shares": {
"count": 923
},
"likes": {
"data": [
{
"name": "Adrian G\\u0105dek",
"id": "100003344997476"
},
{
"name": "Zoran Golic",
"id": "1505951802"
},
{
"name": "Rahon Rick Chard",
"id": "100000965576511"
},
{
"name": "Catarina Pascoal",
"id": "100000591831638"
}
],
"count": 25899
},
"comments": {
"count": 919
}
},
{
"id": "100002611743226_345873455509710",
"from": {
"name": "Florian-Pierre Zanardi",
"id": "100002611743226"
},
"message": "\\"Le spectacle de l\\u2019homme, \\u2013 quel vomitif ! L\\u2019amour, \\u2013 une rencontre de deux salives\\u2026 Tous les sentiments puisent leur absolu dans la mis\\u00e8re des glandes. Il n\\u2019est de noblesse que dans la n\\u00e9gation de l\\u2019existence, dans un sourire qui surplombe des paysages an\\u00e9antis.\\"",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/100002611743226/posts/345873455509710"
},
{
"name": "Like",
"link": "https://www.facebook.com/100002611743226/posts/345873455509710"
}
],
"type": "status",
"status_type": "mobile_status_update",
"created_time": "2012-11-10T01:37:58+0000",
"updated_time": "2012-11-10T08:04:54+0000",
"likes": {
"data": [
{
"name": "Pierre Montagnon",
"id": "664246510"
},
{
"name": "Amia Jacqueline",
"id": "1142204918"
}
],
"count": 2
},
"comments": {
"data": [
{
"id": "100002611743226_345873455509710_1845573",
"from": {
"name": "Elden Knight",
"id": "593498972"
},
"message": "PTDR ! C'est de qui encore, ces \\u00e2neries ?... un vieux c\\u00e9libataire aigri, s\\u00fbrement... :-)",
"created_time": "2012-11-10T07:21:16+0000"
},
{
"id": "100002611743226_345873455509710_1845599",
"from": {
"name": "Florian-Pierre Zanardi",
"id": "100002611743226"
},
"message": "Ah, tu es mon spectacle de la soir\\u00e9e... :)",
"created_time": "2012-11-10T07:31:46+0000"
},
{
"id": "100002611743226_345873455509710_1845670",
"from": {
"name": "Elden Knight",
"id": "593498972"
},
"message": "J'aimerais bien... :-(",
"created_time": "2012-11-10T08:04:54+0000"
}
],
"count": 3
}
},
{
"id": "129459787073480_508226612530127",
"from": {
"name": "Developpeurs",
"category": "Product/service",
"id": "129459787073480"
},
"message": "Un grand MERCI \\u00e0 tous les d\\u00e9veloppeurs pour cette belle conf\\u00e9rence avec Steve Ballmer ! Nous avons publi\\u00e9 quelques photos de ce super moment avec vous.",
"picture": "https://fbcdn-photos-a.akamaihd.net/hphotos-ak-prn1/69500_508226602530128_472284777_s.png",
"link": "https://www.facebook.com/photo.php?fbid=508226602530128&set=a.136468186372640.20217.129459787073480&type=1&relevant_count=1",
"icon": "https://s-static.ak.facebook.com/rsrc.php/v2/yz/r/StEh3RhPvjk.gif",
"actions": [
{
"name": "Comment",
"link": "https://www.facebook.com/129459787073480/posts/508226612530127"
},
{
"name": "Like",
"link": "https://www.facebook.com/129459787073480/posts/508226612530127"
}
],
"type": "photo",
"status_type": "added_photos",
"object_id": "508226602530128",
"created_time": "2012-11-10T00:03:56+0000",
"updated_time": "2012-11-10T11:13:36+0000",
"likes": {
"data": [
{
"name": "Thomas Nigro",
"id": "1175138076"
},
{
"name": "G\\u00fcnther Valentin",
"id": "1557343625"
},
{
"name": "Alexandre Equoy",
"id": "566239512"
},
{
"name": "Maria-Isabelle Galbert",
"id": "100001919200107"
}
],
"count": 26
},
"comments": {
"data": [
{
"id": "129459787073480_508226612530127_1548429",
"from": {
"name": "Nathanael Marchand",
"id": "666051040"
},
"message": "A chaque fois qu'on vient, on repart avec des \\u00e9toiles plein les yeux ! :D",
"created_time": "2012-11-10T00:21:24+0000",
"likes": 2
},
{
"id": "129459787073480_508226612530127_1549346",
"from": {
"name": "Olivier Jacques",
"id": "100000796404842"
},
"message": "C'\\u00e9tait excellent. Quelle \\u00e9nergie ce M. Ballmer ! Encore plus fort qu'\\u00e0 Seattle.",
"created_time": "2012-11-10T11:13:36+0000",
"likes": 1
}
],
"count": 2
}
}
],
"paging": {
"previous": "https://graph.facebook.com/666077625/home?access_token=AAAAAAITEghMBAIN9ZBubCcUqbifmctXXSnxhPr2Qhydkx9glQ2HHKw0VxtRlbpngjEcZCRcGLeAZBH0mw1T3ZBURPxAHkGPLrRIslbmIsQZDZD&limit=25&since=1352561589&__previous=1",
"next": "https://graph.facebook.com/666077625/home?access_token=AAAAAAITEghMBAIN9ZBubCcUqbifmctXXSnxhPr2Qhydkx9glQ2HHKw0VxtRlbpngjEcZCRcGLeAZBH0mw1T3ZBURPxAHkGPLrRIslbmIsQZDZD&limit=25&until=1352505835"
}
}
"""
} | Froggies/Skimbo | test/parser/facebook/FacebookFixture.scala | Scala | agpl-3.0 | 59,048 |
package example.akkawschat
import java.util.Date
import akka.actor.ActorSystem
import akka.http.scaladsl.model.ws.{ Message, TextMessage }
import akka.http.scaladsl.server.Directives
import akka.stream.scaladsl.Flow
import shared.Protocol
import shared.Protocol._
import upickle.default._
import scala.concurrent.duration._
import scala.util.Failure
class Webservice(implicit system: ActorSystem) extends Directives {
val theChat = Chat.create(system)
import system.dispatcher
system.scheduler.schedule(15.second, 15.second) {
theChat.injectMessage(ChatMessage(sender = "clock", s"Bling! The time is ${new Date().toString}."))
}
def route =
get {
pathSingleSlash {
getFromResource("web/index.html")
} ~
// Scala-JS puts them in the root of the resource directory per default,
// so that's where we pick them up
path("frontend-launcher.js")(getFromResource("frontend-launcher.js")) ~
path("frontend-fastopt.js")(getFromResource("frontend-fastopt.js")) ~
path("chat") {
parameter('name) { name ⇒
handleWebSocketMessages(websocketChatFlow(sender = name))
}
}
} ~
getFromResourceDirectory("web")
def websocketChatFlow(sender: String): Flow[Message, Message, Any] =
Flow[Message]
.collect {
case TextMessage.Strict(msg) ⇒ msg // unpack incoming WS text messages...
// This will lose (ignore) messages not received in one chunk (which is
// unlikely because chat messages are small) but absolutely possible
// FIXME: We need to handle TextMessage.Streamed as well.
}
.via(theChat.chatFlow(sender)) // ... and route them through the chatFlow ...
.map {
case msg: Protocol.Message ⇒
TextMessage.Strict(write(msg)) // ... pack outgoing messages into WS JSON messages ...
}
.via(reportErrorsFlow) // ... then log any processing errors on stdin
def reportErrorsFlow[T]: Flow[T, T, Any] =
Flow[T]
.watchTermination()((_, f) => f.onComplete {
case Failure(cause) =>
println(s"WS stream failed with $cause")
case _ => // ignore regular completion
})
}
| yukihirai0505/tutorial-program | programming/scala/websocket/backend/src/main/scala/example/akkawschat/Webservice.scala | Scala | mit | 2,217 |
package org.pinky.representation
import java.io.{OutputStream, OutputStreamWriter}
import javax.servlet.ServletContext
import com.google.inject.Inject
import org.fusesource.scalate.TemplateEngine
import org.fusesource.scalate.support.TemplateFinder
/**
* HTML representation which uses Scalate to do its heavy lifting.
*
* @param ctx ServletContext instance
* @author [email protected]
*/
class HtmlRepresentationScalate @Inject()(ctx: ServletContext) extends Representation {
val roots: List[String] = ctx.getRealPath("template") :: Nil
val defaultExtension: Option[String] = None
protected def engine0 =
Some(new TemplateEngine).map {
e =>
e.templateDirectories = roots
e.allowReload = true
e.allowCaching = false
e
}.get
lazy val engine = engine0
private lazy val finder = new TemplateFinder(engine)
def write(data: Map[String, AnyRef], out: OutputStream): Unit =
Some(new OutputStreamWriter(out)).map {
writer =>
val path = data.get("template").map(_.asInstanceOf[String]).map {
p => if (p.contains(".")) p else "%s%s".format(p, defaultExtension.map(".%s".format(_)).getOrElse(""))
}.getOrElse(throw new IllegalArgumentException("no 'template' key found"))
writer.write(engine.layout(finder.findTemplate(path).getOrElse(throw new Exception("failed to find template %s".format(path))), data))
writer.flush
}
}
| d5nguyenvan/pinky | src/main/scala/org/pinky/representation/HtmlRepresentationScalate.scala | Scala | bsd-3-clause | 1,429 |