code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
//: ---------------------------------------------------------------------------- //: Copyright (C) 2015 Verizon. All Rights Reserved. //: //: Licensed under the Apache License, Version 2.0 (the "License"); //: you may not use this file except in compliance with the License. //: You may obtain a copy of the License at //: //: http://www.apache.org/licenses/LICENSE-2.0 //: //: Unless required by applicable law or agreed to in writing, software //: distributed under the License is distributed on an "AS IS" BASIS, //: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. //: See the License for the specific language governing permissions and //: limitations under the License. //: //: ---------------------------------------------------------------------------- package funnel package chemist import scalaz.==>> import java.net.URI object Sharding { type Distribution = Flask ==>> Set[Target] object Distribution { def empty: Distribution = ==>>() def empty(flasks: Seq[Flask]) = ==>>.fromList(flasks.toList.map(f => f -> Set.empty[Target])) } /** * obtain a list of flasks ordered by flasks with the least * assigned work first. */ def shards(d: Distribution): IndexedSeq[Flask] = sorted(d).map(_._1).toIndexedSeq /** * sort the current distribution by the size of the url * set currently assigned to the index flask. Resulting * snapshot is ordered by flasks with least assigned * work first. */ def sorted(d: Distribution): Seq[(Flask, Set[Target])] = d.toList.sortBy(_._2.size) /** * dump out the current snapshot of how chemist believes work * has been assigned to flasks. */ def snapshot(d: Distribution): Map[FlaskID, Map[ClusterName, List[URI]]] = d.toList.map { case (i,s) => i.id -> s.groupBy(_.cluster).mapValues(_.toList.map(_.uri)) }.toMap /** * obtain the entire set of what chemist views as the * distributed world of urls. */ def targets(d: Distribution): Set[Target] = d.values match { case Nil => Set.empty[Target] case other => other.reduceLeft(_ ++ _) } /** * Given a set of inputs, check against the current known set of urls * that we're not already monitoring the inputs (thus ensuring that * the cluster is not having duplicated monitoring items) */ private[chemist] def deduplicate(next: Set[Target])(d: Distribution): Set[Target] = { // get the full list of targets we currently know about val existing = targets(d) // determine if any of the supplied urls are existing targets val delta = next.map(_.uri) &~ existing.map(_.uri) // having computed the targets that we actually care about, // rehydrae a `Set[Target]` from the given `Set[SafeURL]` delta.foldLeft(Set.empty[Target]){ (a,b) => a ++ next.filter(_.uri == b) } } }
neigor/funnel
chemist/src/main/scala/Sharding.scala
Scala
apache-2.0
2,881
package frameless import org.apache.spark.sql.Row import org.apache.spark.sql.types.StructType import shapeless.{HList, LabelledGeneric} import shapeless.test.illTyped import org.scalatest.matchers.should.Matchers case class UnitsOnly(a: Unit, b: Unit) case class TupleWithUnits(u0: Unit, _1: Int, u1: Unit, u2: Unit, _2: String, u3: Unit) object TupleWithUnits { def apply(_1: Int, _2: String): TupleWithUnits = TupleWithUnits((), _1, (), (), _2, ()) } case class OptionalNesting(o: Option[TupleWithUnits]) object RecordEncoderTests { case class A(x: Int) case class B(a: Seq[A]) case class C(b: B) } class RecordEncoderTests extends TypedDatasetSuite with Matchers { test("Unable to encode products made from units only") { illTyped("""TypedEncoder[UnitsOnly]""") } test("Dropping fields") { def dropUnitValues[L <: HList](l: L)(implicit d: DropUnitValues[L]): d.Out = d(l) val fields = LabelledGeneric[TupleWithUnits].to(TupleWithUnits(42, "something")) dropUnitValues(fields) shouldEqual LabelledGeneric[(Int, String)].to((42, "something")) } test("Representation skips units") { assert(TypedEncoder[(Int, String)].catalystRepr == TypedEncoder[TupleWithUnits].catalystRepr) } test("Serialization skips units") { val df = session.createDataFrame(Seq((1, "one"), (2, "two"))) val ds = df.as[TupleWithUnits](TypedExpressionEncoder[TupleWithUnits]) val tds = TypedDataset.create(Seq(TupleWithUnits(1, "one"), TupleWithUnits(2, "two"))) df.collect shouldEqual tds.toDF.collect ds.collect.toSeq shouldEqual tds.collect.run } test("Empty nested record value becomes null on serialization") { val ds = TypedDataset.create(Seq(OptionalNesting(Option.empty))) val df = ds.toDF df.na.drop.count shouldBe 0 } test("Empty nested record value becomes none on deserialization") { val rdd = sc.parallelize(Seq(Row(null))) val schema = TypedEncoder[OptionalNesting].catalystRepr.asInstanceOf[StructType] val df = session.createDataFrame(rdd, schema) val ds = TypedDataset.createUnsafe(df)(TypedEncoder[OptionalNesting]) ds.firstOption.run.get.o.isEmpty shouldBe true } test("Deeply nested optional values have correct deserialization") { val rdd = sc.parallelize(Seq(Row(true, Row(null, null)))) type NestedOptionPair = X2[Boolean, Option[X2[Option[Int], Option[String]]]] val schema = TypedEncoder[NestedOptionPair].catalystRepr.asInstanceOf[StructType] val df = session.createDataFrame(rdd, schema) val ds = TypedDataset.createUnsafe(df)(TypedEncoder[NestedOptionPair]) ds.firstOption.run.get shouldBe X2(true, Some(X2(None, None))) } test("Nesting with collection") { import RecordEncoderTests._ val obj = C(B(Seq(A(1)))) val rdd = sc.parallelize(Seq(obj)) val ds = session.createDataset(rdd)(TypedExpressionEncoder[C]) ds.collect.head shouldBe obj } }
adelbertc/frameless
dataset/src/test/scala/frameless/RecordEncoderTests.scala
Scala
apache-2.0
2,914
def evenGT5(v: Vector[Int]) = { var result = Vector[Int]() for {n <- v if n > 5 if n % 2 == 0} { result = result :+ n } result } val v = Vector(1, 2, 3, 4, 5, 6, 7, 8, 9) println(evenGT5(v)) def oddGT5Refactored(v: Vector[Int]) = { val result = for {n <- v if n > 5 if n % 2 != 0} yield n result } def oddGT5Refactored2(v: Vector[Int]) = { for {n <- v if n > 5 isOdd = (n % 2 != 0) if (isOdd)} yield n } println(oddGT5Refactored2(v)) def oddGT5Refactored3(v: Vector[Int]) = { for {n <- v if n > 5 isOdd = (n % 2 != 0) if (isOdd)} yield { val u = n * 10 u + 2 } } println(oddGT5Refactored3(v)) def yielding4(v: Vector[Int]) = { for {n <- v if n > 5 isOdd = (n % 2 != 0) if (isOdd)} yield { for (u <- Range(0, n)) yield u } } println(yielding4(v))
mkoltsov/AtomicScala
Basics/Comprehensions.scala
Scala
mit
962
import scala.quoted.Expr object Macro { inline def foo(inline n: Int): Int = ${ 'n } }
som-snytt/dotty
tests/pos/quote-lift-inline-params-b.scala
Scala
apache-2.0
94
/* * Copyright 2016 Actian Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.actian.spark_vector.datastream import org.scalatest.funsuite.AnyFunSuite import org.scalatest.matchers.should._ import org.scalatest.PrivateMethodTester import com.actian.spark_vector.test.IntegrationTest import com.actian.spark_vector.vector.{VectorJDBC, VectorFixture, ColumnMetadata} @IntegrationTest class VectorEndpointTest extends AnyFunSuite with Matchers with VectorFixture with PrivateMethodTester { test("iivwtable_datastreams contains host and/or qhost") { VectorJDBC.withJDBC(connectionProps) { cxn => noException shouldBe thrownBy{ VectorEndpoint.fromDataStreamsTable(cxn) } } } test("check extractHostColumnName") { val extractHostColumnName = PrivateMethod[String]('extractHostColumnName) val result_qHost = Seq(ColumnMetadata("qhost", "", false, 0, 0), ColumnMetadata("host", "", false, 0, 0), ColumnMetadata("test", "", false, 0, 0)) val result_qHost2 = Seq(ColumnMetadata("host", "", false, 0, 0), ColumnMetadata("qhost", "", false, 0, 0)) val result_qHost3 = Seq(ColumnMetadata("qhost", "", false, 0, 0)) val result_Host = Seq(ColumnMetadata("host", "", false, 0, 0)) val result_Exc = Seq(ColumnMetadata("test", "", false, 0, 0)) VectorEndpoint invokePrivate extractHostColumnName(result_qHost) shouldEqual "qhost" VectorEndpoint invokePrivate extractHostColumnName(result_qHost2) shouldEqual "qhost" VectorEndpoint invokePrivate extractHostColumnName(result_qHost3) shouldEqual "qhost" VectorEndpoint invokePrivate extractHostColumnName(result_Host) shouldEqual "host" an[IllegalStateException] shouldBe thrownBy{ VectorEndpoint invokePrivate extractHostColumnName(result_Exc) } } }
ActianCorp/spark-vector
src/test/scala/com/actian/spark_vector/datastream/VectorEndpointTest.scala
Scala
apache-2.0
2,399
package com.github.diegopacheco.scala.pocs.pipes import scala.util.chaining._ import scala.language.implicitConversions object PipesMainApp extends App { def plus1(i: Int) = i + 1 def double(i: Int) = i * 2 def square(i: Int) = i * i val x = 1.pipe(plus1).pipe(double).pipe(square) println(s"x is == ${x}") val x2 = 1.pipe(plus1) .pipe(double) .tap(res => println(s"DEBUG: x = $res")) println(s"x2 is == ${x2}") implicit class Piper[A](val a: A) { import scala.util.chaining._ implicit def |||[B](f: (A) => B): B = a.pipe(f) } val x3 = 1 ||| plus1 ||| double ||| square println(s"x3 is == ${x3}") }
diegopacheco/scala-playground
scala-2.13-pipes-fun/src/main/scala/com/github/diegopacheco/scala/pocs/pipes/PipesMainApp.scala
Scala
unlicense
660
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.security.auth import java.util import java.util.concurrent.locks.ReentrantReadWriteLock import kafka.common.{NotificationHandler, ZkNodeChangeNotificationListener} import org.apache.zookeeper.Watcher.Event.KeeperState import kafka.network.RequestChannel.Session import kafka.server.KafkaConfig import kafka.utils.CoreUtils.{inReadLock, inWriteLock} import kafka.utils._ import org.I0Itec.zkclient.{IZkStateListener, ZkClient} import org.apache.kafka.common.security.auth.KafkaPrincipal import scala.collection.JavaConverters._ import org.apache.log4j.Logger object SimpleAclAuthorizer { //optional override zookeeper cluster configuration where acls will be stored, if not specified acls will be stored in //same zookeeper where all other kafka broker info is stored. val ZkUrlProp = "authorizer.zookeeper.url" val ZkConnectionTimeOutProp = "authorizer.zookeeper.connection.timeout.ms" val ZkSessionTimeOutProp = "authorizer.zookeeper.session.timeout.ms" //List of users that will be treated as super users and will have access to all the resources for all actions from all hosts, defaults to no super users. val SuperUsersProp = "super.users" //If set to true when no acls are found for a resource , authorizer allows access to everyone. Defaults to false. val AllowEveryoneIfNoAclIsFoundProp = "allow.everyone.if.no.acl.found" /** * The root acl storage node. Under this node there will be one child node per resource type (Topic, Cluster, ConsumerGroup). * under each resourceType there will be a unique child for each resource instance and the data for that child will contain * list of its acls as a json object. Following gives an example: * * <pre> * /kafka-acl/Topic/topic-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]} * /kafka-acl/Cluster/kafka-cluster => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]} * /kafka-acl/ConsumerGroup/group-1 => {"version": 1, "acls": [ { "host":"host1", "permissionType": "Allow","operation": "Read","principal": "User:alice"}]} * </pre> */ val AclZkPath = "/kafka-acl" //notification node which gets updated with the resource name when acl on a resource is changed. val AclChangedZkPath = "/kafka-acl-changes" //prefix of all the change notificiation sequence node. val AclChangedPrefix = "acl_changes_" } class SimpleAclAuthorizer extends Authorizer with Logging { private val authorizerLogger = Logger.getLogger("kafka.authorizer.logger") private var superUsers = Set.empty[KafkaPrincipal] private var shouldAllowEveryoneIfNoAclIsFound = false private var zkClient: ZkClient = null private var aclChangeListener: ZkNodeChangeNotificationListener = null private val aclCache = new scala.collection.mutable.HashMap[Resource, Set[Acl]] private val lock = new ReentrantReadWriteLock() /** * Guaranteed to be called before any authorize call is made. */ override def configure(javaConfigs: util.Map[String, _]) { val configs = javaConfigs.asScala val props = new java.util.Properties() configs foreach { case (key, value) => props.put(key, value.toString) } val kafkaConfig = KafkaConfig.fromProps(props) superUsers = configs.get(SimpleAclAuthorizer.SuperUsersProp).collect { case str: String if str.nonEmpty => str.split(",").map(s => KafkaPrincipal.fromString(s.trim)).toSet }.getOrElse(Set.empty[KafkaPrincipal]) shouldAllowEveryoneIfNoAclIsFound = configs.get(SimpleAclAuthorizer.AllowEveryoneIfNoAclIsFoundProp).map(_.toString.toBoolean).getOrElse(false) val zkUrl = configs.getOrElse(SimpleAclAuthorizer.ZkUrlProp, kafkaConfig.zkConnect).toString val zkConnectionTimeoutMs = configs.getOrElse(SimpleAclAuthorizer.ZkConnectionTimeOutProp, kafkaConfig.zkConnectionTimeoutMs).toString.toInt val zkSessionTimeOutMs = configs.getOrElse(SimpleAclAuthorizer.ZkSessionTimeOutProp, kafkaConfig.zkSessionTimeoutMs).toString.toInt zkClient = ZkUtils.createZkClient(zkUrl, zkConnectionTimeoutMs, zkSessionTimeOutMs) ZkUtils.makeSurePersistentPathExists(zkClient, SimpleAclAuthorizer.AclZkPath) loadCache() ZkUtils.makeSurePersistentPathExists(zkClient, SimpleAclAuthorizer.AclChangedZkPath) aclChangeListener = new ZkNodeChangeNotificationListener(zkClient, SimpleAclAuthorizer.AclChangedZkPath, SimpleAclAuthorizer.AclChangedPrefix, AclChangedNotificaitonHandler) aclChangeListener.init() zkClient.subscribeStateChanges(ZkStateChangeListener) } override def authorize(session: Session, operation: Operation, resource: Resource): Boolean = { val principal: KafkaPrincipal = session.principal val host = session.host val acls = getAcls(resource) //check if there is any Deny acl match that would disallow this operation. val denyMatch = aclMatch(session, operation, resource, principal, host, Deny, acls) //if principal is allowed to read or write we allow describe by default, the reverse does not apply to Deny. val ops = if (Describe == operation) Set[Operation](operation, Read, Write) else Set[Operation](operation) //now check if there is any allow acl that will allow this operation. val allowMatch = ops.exists(operation => aclMatch(session, operation, resource, principal, host, Allow, acls)) //we allow an operation if a user is a super user or if no acls are found and user has configured to allow all users //when no acls are found or if no deny acls are found and at least one allow acls matches. val authorized = isSuperUser(operation, resource, principal, host) || isEmptyAclAndAuthorized(operation, resource, principal, host, acls) || (!denyMatch && allowMatch) logAuditMessage(principal, authorized, operation, resource, host) authorized } def isEmptyAclAndAuthorized(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String, acls: Set[Acl]): Boolean = { if (acls.isEmpty) { authorizerLogger.debug(s"No acl found for resource $resource, authorized = $shouldAllowEveryoneIfNoAclIsFound") shouldAllowEveryoneIfNoAclIsFound } else false } def isSuperUser(operation: Operation, resource: Resource, principal: KafkaPrincipal, host: String): Boolean = { if (superUsers.exists( _ == principal)) { authorizerLogger.debug(s"principal = $principal is a super user, allowing operation without checking acls.") true } else false } private def aclMatch(session: Session, operations: Operation, resource: Resource, principal: KafkaPrincipal, host: String, permissionType: PermissionType, acls: Set[Acl]): Boolean = { acls.find ( acl => acl.permissionType == permissionType && (acl.principal == principal || acl.principal == Acl.WildCardPrincipal) && (operations == acl.operation || acl.operation == All) && (acl.host == host || acl.host == Acl.WildCardHost) ).map { acl: Acl => authorizerLogger.debug(s"operation = $operations on resource = $resource from host = $host is $permissionType based on acl = $acl") true }.getOrElse(false) } override def addAcls(acls: Set[Acl], resource: Resource) { if (acls != null && acls.nonEmpty) { val updatedAcls = getAcls(resource) ++ acls val path = toResourcePath(resource) if (ZkUtils.pathExists(zkClient, path)) ZkUtils.updatePersistentPath(zkClient, path, Json.encode(Acl.toJsonCompatibleMap(updatedAcls))) else ZkUtils.createPersistentPath(zkClient, path, Json.encode(Acl.toJsonCompatibleMap(updatedAcls))) updateAclChangedFlag(resource) updateCache(resource, updatedAcls) } } override def removeAcls(aclsTobeRemoved: Set[Acl], resource: Resource): Boolean = { if (ZkUtils.pathExists(zkClient, toResourcePath(resource))) { val existingAcls = getAcls(resource) val filteredAcls = existingAcls.filter((acl: Acl) => !aclsTobeRemoved.contains(acl)) val aclNeedsRemoval = (existingAcls != filteredAcls) if (aclNeedsRemoval) { val path: String = toResourcePath(resource) if (filteredAcls.nonEmpty) ZkUtils.updatePersistentPath(zkClient, path, Json.encode(Acl.toJsonCompatibleMap(filteredAcls))) else ZkUtils.deletePath(zkClient, toResourcePath(resource)) updateAclChangedFlag(resource) updateCache(resource, filteredAcls) } aclNeedsRemoval } else false } override def removeAcls(resource: Resource): Boolean = { if (ZkUtils.pathExists(zkClient, toResourcePath(resource))) { ZkUtils.deletePath(zkClient, toResourcePath(resource)) updateAclChangedFlag(resource) updateCache(resource, Set.empty[Acl]) true } else false } override def getAcls(resource: Resource): Set[Acl] = { inReadLock(lock) { aclCache.get(resource).getOrElse(Set.empty[Acl]) } } private def getAclsFromZk(resource: Resource): Set[Acl] = { val aclJson = ZkUtils.readDataMaybeNull(zkClient, toResourcePath(resource))._1 aclJson.map(Acl.fromJson).getOrElse(Set.empty) } override def getAcls(principal: KafkaPrincipal): Map[Resource, Set[Acl]] = { aclCache.mapValues { acls => acls.filter(_.principal == principal) }.filter { case (_, acls) => acls.nonEmpty }.toMap } private def loadCache() { var acls = Set.empty[Acl] val resourceTypes = ZkUtils.getChildren(zkClient, SimpleAclAuthorizer.AclZkPath) for (rType <- resourceTypes) { val resourceType = ResourceType.fromString(rType) val resourceTypePath = SimpleAclAuthorizer.AclZkPath + "/" + resourceType.name val resourceNames = ZkUtils.getChildren(zkClient, resourceTypePath) for (resourceName <- resourceNames) { acls = getAclsFromZk(Resource(resourceType, resourceName.toString)) updateCache(new Resource(resourceType, resourceName), acls) } } } private def updateCache(resource: Resource, acls: Set[Acl]) { inWriteLock(lock) { if (acls.nonEmpty) aclCache.put(resource, acls) else aclCache.remove(resource) } } def toResourcePath(resource: Resource): String = { SimpleAclAuthorizer.AclZkPath + "/" + resource.resourceType + "/" + resource.name } private def logAuditMessage(principal: KafkaPrincipal, authorized: Boolean, operation: Operation, resource: Resource, host: String) { val permissionType = if (authorized) "Allowed" else "Denied" authorizerLogger.debug(s"Principal = $principal is $permissionType Operation = $operation from host = $host on resource = $resource") } private def updateAclChangedFlag(resource: Resource) { ZkUtils.createSequentialPersistentPath(zkClient, SimpleAclAuthorizer.AclChangedZkPath + "/" + SimpleAclAuthorizer.AclChangedPrefix, resource.toString) } object AclChangedNotificaitonHandler extends NotificationHandler { override def processNotification(notificationMessage: String) { val resource: Resource = Resource.fromString(notificationMessage) val acls = getAclsFromZk(resource) updateCache(resource, acls) } } object ZkStateChangeListener extends IZkStateListener { override def handleNewSession() { aclChangeListener.processAllNotifications } override def handleSessionEstablishmentError(error: Throwable) { fatal("Could not establish session with zookeeper", error) } override def handleStateChanged(state: KeeperState) { //no op } } }
robort/kafka
core/src/main/scala/kafka/security/auth/SimpleAclAuthorizer.scala
Scala
apache-2.0
12,429
package org.hrscala.sbt object First extends App { println("=" * 100) println("Hello from First") println("-" * 100) }
HRScala/its-so-sbt
40-custom-tasks/first/src/main/scala/org/hrscala/sbt/First.scala
Scala
unlicense
126
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.repl.amaterasu.runners.spark import java.io.ByteArrayOutputStream import java.util import org.apache.amaterasu.common.execution.actions.Notifier import org.apache.amaterasu.common.logging.Logging import org.apache.amaterasu.common.runtime.Environment import org.apache.amaterasu.sdk.AmaterasuRunner import org.apache.amaterasu.executor.runtime.AmaContext import org.apache.spark.sql.{Dataset, SparkSession} import scala.collection.mutable import scala.collection.JavaConverters._ import scala.io.Source import scala.tools.nsc.interpreter.{IMain, Results} class ResHolder(var value: Any) class SparkScalaRunner(var env: Environment, var jobId: String, var interpreter: IMain, var outStream: ByteArrayOutputStream, var spark: SparkSession, var notifier: Notifier, var jars: Seq[String]) extends Logging with AmaterasuRunner { private def scalaOptionError(msg: String): Unit = { notifier.error("", msg) } override def getIdentifier = "scala" val holder = new ResHolder(null) override def executeSource(actionSource: String, actionName: String, exports: util.Map[String, String]): Unit = { //notifier.info(s"actionSource ${actionSource}, actionName ${actionName}") val source = Source.fromString(actionSource) interpretSources(source, actionName, exports.asScala.toMap) } def interpretSources(source: Source, actionName: String, exports: Map[String, String]): Unit = { notifier.info(s"================= started action $actionName =================") for (line <- source.getLines()) { if (!line.isEmpty) { outStream.reset() log.debug(line) if (line.startsWith("import")) { interpreter.interpret(line) } else { val intresult = interpreter.interpret(line) val result = interpreter.prevRequestList.last.lineRep.call("$result") // intresult: Success, Error, etc // result: the actual result (RDD, df, etc.) for caching // outStream.toString gives you the error message intresult match { case Results.Success => log.debug("Results.Success") notifier.success(line) val resultName = interpreter.prevRequestList.last.termNames.last if (exports.contains(resultName.toString)) { val format = exports(resultName.toString) if (result != null) { result match { case ds: Dataset[_] => log.debug(s"persisting DataFrame: $resultName") interpreter.interpret(s"""$resultName.write.mode(SaveMode.Overwrite).format("$format").save("${env.workingDir}/$jobId/$actionName/$resultName")""") log.debug(s"persisted DataFrame: $resultName") case _ => println(result) } } } case Results.Error => log.debug("Results.Error") val err = outStream.toString notifier.error(line, err) throw new Exception(err) case Results.Incomplete => log.debug("Results.Incomplete") } } } } notifier.info(s"================= finished action $actionName =================") } def initializeAmaContext(env: Environment): Unit = { // setting up some context :) val sc = this.spark.sparkContext val sqlContext = this.spark.sqlContext interpreter.interpret("import scala.util.control.Exception._") interpreter.interpret("import org.apache.spark.{ SparkContext, SparkConf }") interpreter.interpret("import org.apache.spark.sql.SQLContext") interpreter.interpret("import org.apache.spark.sql.{ Dataset, SparkSession }") interpreter.interpret("import org.apache.spark.sql.SaveMode") interpreter.interpret("import org.apache.amaterasu.executor.runtime.AmaContext") interpreter.interpret("import org.apache.amaterasu.common.runtime.Environment") // creating a map (_contextStore) to hold the different spark contexts // in th REPL and getting a reference to it interpreter.interpret("var _contextStore = scala.collection.mutable.Map[String, AnyRef]()") val contextStore = interpreter.prevRequestList.last.lineRep.call("$result").asInstanceOf[mutable.Map[String, AnyRef]] AmaContext.init(spark, jobId, env) // populating the contextStore contextStore.put("sc", sc) contextStore.put("sqlContext", sqlContext) contextStore.put("env", env) contextStore.put("spark", spark) contextStore.put("ac", AmaContext) interpreter.interpret("val sc = _contextStore(\\"sc\\").asInstanceOf[SparkContext]") interpreter.interpret("val sqlContext = _contextStore(\\"sqlContext\\").asInstanceOf[SQLContext]") interpreter.interpret("val env = _contextStore(\\"env\\").asInstanceOf[Environment]") interpreter.interpret("val spark = _contextStore(\\"spark\\").asInstanceOf[SparkSession]") interpreter.interpret("val AmaContext = _contextStore(\\"ac\\").asInstanceOf[AmaContext]") interpreter.interpret("import sqlContext.implicits._") // initializing the AmaContext println(s"""AmaContext.init(sc, sqlContext ,"$jobId")""") } } object SparkScalaRunner extends Logging { def apply(env: Environment, jobId: String, spark: SparkSession, outStream: ByteArrayOutputStream, notifier: Notifier, jars: Seq[String]): SparkScalaRunner = { new SparkScalaRunner(env, jobId, SparkRunnerHelper.getOrCreateScalaInterperter(outStream, jars), outStream, spark, notifier, jars) } }
nadav-har-tzvi/amaterasu
executor/src/main/scala/org/apache/spark/repl/amaterasu/runners/spark/SparkScalaRunner.scala
Scala
apache-2.0
6,619
/* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ package viper.silicon.supporters.functions import org.slf4s.Logging import viper.silver.ast import viper.silver.ast.utility.Functions import viper.silver.verifier.errors.{PostconditionViolated, ContractNotWellformed, FunctionNotWellformed} import viper.silicon.supporters.PredicateSupporter import viper.silicon._ import viper.silicon.interfaces.decider.Decider import viper.silicon.interfaces.state.factoryUtils.Ø import viper.silicon.interfaces._ import viper.silicon.interfaces.state._ import viper.silicon.state.{IdentifierFactory,ListBackedHeap, DefaultContext, SymbolConvert} import viper.silicon.state.terms import viper.silicon.state.terms._ import viper.silicon.state.terms.predef.`?s` import viper.silicon.SymbExLogger trait FunctionSupporter[H <: Heap[H]] extends VerificationUnit[H, ast.Function] object FunctionSupporter { def limitedVersion(function: HeapDepFun): HeapDepFun = { val id = function.id.withSuffix("%", "limited") HeapDepFun(id, function.argSorts, function.resultSort) } def statelessVersion(function: HeapDepFun): Fun = { val id = function.id.withSuffix("%", "stateless") Fun(id, function.argSorts.tail, terms.sorts.Bool) } } trait FunctionSupporterProvider[ST <: Store[ST], H <: Heap[H], S <: State[ST, H, S]] { this: Logging with Evaluator[ST, H, S, DefaultContext[H]] with Producer[ST, H, S, DefaultContext[H]] with Consumer[ST, H, S, DefaultContext[H]] => private type C = DefaultContext[H] private type AxiomGenerator = () => Quantification protected val config: Config protected val decider: Decider[ST, H, S, C] protected val stateFactory: StateFactory[ST, H, S] protected val symbolConverter: SymbolConvert protected val identifierFactory: IdentifierFactory protected val predicateSupporter: PredicateSupporter[ST, H, S, C] import decider.fresh import stateFactory._ private case class Phase1Data(σPre: S, πPre: Set[Term], cPre: C) object functionsSupporter extends FunctionSupporter[H] { private var program: ast.Program = null private var functionData: Map[ast.Function, FunctionData] = null private val expressionTranslator = new HeapAccessReplacingExpressionTranslator(symbolConverter, fresh) def analyze(program: ast.Program) { this.program = program val heights = Functions.heights(program).toSeq.sortBy(_._2).reverse functionData = toMap( heights.map { case (func, height) => val quantifiedFields = toSet(ast.utility.QuantifiedPermissions.quantifiedFields(func, program)) val data = new FunctionData(func, height, quantifiedFields, program)(symbolConverter, expressionTranslator, identifierFactory, pred => predicateSupporter.data(pred), config) func -> data}) /* TODO: FunctionData and HeapAccessReplacingExpressionTranslator depend * on each other. Refactor s.t. this delayed assignment is no * longer needed. */ expressionTranslator.functionData = functionData } def units = functionData.keys.toSeq def sorts: Set[Sort] = Set.empty def declareSorts(): Unit = { /* No sorts need to be declared */ } def declareSymbols(): Unit = { decider.prover.logComment("Declaring program functions") functionData.values foreach { data => decider.prover.declare(FunctionDecl(data.function)) decider.prover.declare(FunctionDecl(data.limitedFunction)) decider.prover.declare(FunctionDecl(data.statelessFunction)) data.fvfGenerators.values foreach (fvfGen => decider.prover.declare(FunctionDecl(fvfGen))) } decider.prover.logComment("Snapshot variable to be used during function verification") decider.prover.declare(ConstDecl(`?s`)) } def verify(function: ast.Function, c: DefaultContext[H]): Seq[VerificationResult] = { val comment = ("-" * 10) + " FUNCTION " + function.name + ("-" * 10) log.debug(s"\n\n$comment\n") decider.prover.logComment(comment) SymbExLogger.insertMember(function, Σ(Ø, Ø, Ø), decider.π, c.asInstanceOf[DefaultContext[ListBackedHeap]]) val data = functionData(function) data.formalArgs.values foreach (v => decider.prover.declare(ConstDecl(v))) decider.prover.declare(ConstDecl(data.formalResult)) Seq(handleFunction(function, c)) } private def handleFunction(function: ast.Function, c0: DefaultContext[H]): VerificationResult = { val data = functionData(function) val c = c0.copy(quantifiedVariables = c0.quantifiedVariables ++ data.arguments, functionRecorder = ActualFunctionRecorder(data)) /* Phase 1: Check well-definedness of the specifications */ checkSpecificationWelldefinedness(function, c) match { case (result1: FatalResult, _) => data.verificationFailures = data.verificationFailures :+ result1 result1 case (result1, phase1data) => decider.prover.assume(data.limitedAxiom) decider.prover.assume(data.triggerAxiom) data.postAxiom foreach decider.prover.assume if (function.body.isEmpty) result1 else { /* Phase 2: Verify the function's postcondition */ val result2 = verify(function, phase1data, program) result2 match { case fatalResult: FatalResult => data.verificationFailures = data.verificationFailures :+ fatalResult case _ => data.definitionalAxiom foreach decider.prover.assume } result1 && result2 } } } private def checkSpecificationWelldefinedness(function: ast.Function, c: C) : (VerificationResult, Seq[Phase1Data]) = { val comment = ("-" * 5) + " Well-definedness of specifications " + ("-" * 5) log.debug(s"\n\n$comment\n") decider.prover.logComment(comment) val data = functionData(function) val pres = function.pres val posts = function.posts val γ = Γ(data.formalArgs + (function.result -> data.formalResult)) val σ = Σ(γ, Ø, Ø) var phase1Data: Seq[Phase1Data] = Vector.empty var recorders: Seq[FunctionRecorder] = Vector.empty val result = decider.locally { val preMark = decider.setPathConditionMark() produces(σ, sort => `?s`.convert(sort), pres, ContractNotWellformed, c)((σ1, c1) => { phase1Data :+= Phase1Data(σ1, decider.pcs.after(preMark).assumptions, c1) produces(σ1, sort => `?s`.convert(sort), posts, ContractNotWellformed, c1)((_, c2) => { recorders :+= c2.functionRecorder Success()})})} data.advancePhase(recorders) (result, phase1Data) } private def verify(function: ast.Function, phase1data: Seq[Phase1Data], program: ast.Program) : VerificationResult = { val comment = ("-" * 10) + " FUNCTION " + function.name + " (verify) " + ("-" * 10) log.debug(s"\n\n$comment\n") decider.prover.logComment(comment) val data = functionData(function) val posts = function.posts val body = function.body.get /* NOTE: Only non-abstract functions are expected! */ val postconditionViolated = (offendingNode: ast.Exp) => PostconditionViolated(offendingNode, function) var recorders: Seq[FunctionRecorder] = Vector.empty val result = phase1data.foldLeft(Success(): VerificationResult) { case (fatalResult: FatalResult, _) => fatalResult case (intermediateResult, p1d) => intermediateResult && decider.locally { decider.assume(p1d.πPre) eval(p1d.σPre, body, FunctionNotWellformed(function), p1d.cPre)((tBody, c1) => { decider.assume(data.formalResult === tBody) consumes( p1d.σPre, posts, postconditionViolated, c1)((_, _, c2) => { recorders :+= c2.functionRecorder Success()})})}} data.advancePhase(recorders) result } def emitAxioms(): Unit = { /* No axioms need to be emitted (before function verification starts) */ } /* Lifetime */ def start(): Unit = { functionData = Map.empty } def reset() { program = null functionData = Map.empty } def stop() {} } }
sccblom/vercors
viper/silicon/src/main/scala/supporters/functions/FunctionSupporter.scala
Scala
mpl-2.0
8,962
import java.io._ import com.omegaup._ import com.omegaup.data._ import com.omegaup.runner._ import com.omegaup.libinteractive.idl.Parser import com.omegaup.libinteractive.target.Options import com.omegaup.libinteractive.target.Command import org.slf4j._ import org.scalatest.BeforeAndAfterAll import org.scalatest.FreeSpec import org.scalatest.Matchers import org.scalatest.matchers.BeMatcher import org.scalatest.matchers.MatchResult case class CaseResult(name: String, status: String, output: Option[String], score: Option[Double], meta: Map[String, String]) class OKMatcher(output: String) extends BeMatcher[CaseResult] with Matchers { def apply(result: CaseResult) = MatchResult( result.status == "OK" && result.output.getOrElse("") == output, "for case `" + result.name + "', status was " + result.status + " and " + output + " was " + result.output.getOrElse("(None)"), "for case `" + result.name + "' status was " + result.status + " and expecting `" + output + "', got `" + result.output.getOrElse("(None)") + "'" ) } class StatusMatcher(status: String) extends BeMatcher[CaseResult] with Matchers { def apply(result: CaseResult) = MatchResult( result.status == status, "for case `" + result.name + "', " + status + " was " + result.status, "for case `" + result.name + "' expecting " + status + ", got " + result.status ) } class ScoreMatcher(score: Double) extends BeMatcher[CaseResult] with Matchers { def apply(result: CaseResult) = MatchResult( !result.score.isEmpty && approximately(result.score.get, score), "for case `" + result.name + "', " + score + " was " + result.score, "for case `" + result.name + "' expecting " + score + ", got " + result.score ) def approximately(a: Double, b: Double) = Math.abs(a - b) <= Math.abs(1e-4 * a) } trait CaseMatchers { def OK(output: String): BeMatcher[CaseResult] = new OKMatcher(output) val JudgeError = new StatusMatcher("JE") val MemoryLimitExceeded = new StatusMatcher("ML") val OutputLimitExceeded = new StatusMatcher("OL") val RestrictedFunction = new StatusMatcher("FO") val RuntimeError = new StatusMatcher("RE") val Signal = new StatusMatcher("SG") val TimeLimitExceeded = new StatusMatcher("TO") def Scored(score: Double): BeMatcher[CaseResult] = new ScoreMatcher(score) } class CallbackListener(expectations: Map[String, CaseResult => Unit]) extends Object with RunCaseCallback with Matchers { var output: Option[String] = None def apply(filename: String, length: Long, stream: InputStream): Unit = { val caseName = FileUtil.removeExtension(filename) expectations should contain key (caseName) if (filename.endsWith(".out")) { output = Some(FileUtil.read(stream)) } else if (filename.endsWith(".meta")) { val meta = MetaFile.load(new InputStreamReader(stream)) val score = (if (meta.contains("score")) { Some(meta("score").toDouble) } else { None }) expectations(caseName)(CaseResult(caseName, meta("status"), output, score, meta.toMap)) output = None } } } object NullRunCaseCallback extends Object with RunCaseCallback { def apply(filename: String, length: Long, stream: InputStream): Unit = {} } class CompileSpec extends FreeSpec with Matchers with CaseMatchers with BeforeAndAfterAll with ContextMixin { private implicit val runner = new Runner("test", Minijail) override def beforeAll() = { import java.util.zip._ val root = new File("test-env") if (root.exists()) { FileUtil.deleteDirectory(root.getCanonicalPath) } val compileRoot = new File(root, "compile") root.mkdir compileRoot.mkdir config = config.copy( runner = RunnerConfig( preserve = true ), common = CommonConfig( roots = RootsConfig( compile = compileRoot.getCanonicalPath ), paths = PathsConfig( minijail = "/var/lib/minijail" ) ) ) ctx = new Context(config) Logging.init } def compileTest(message: CompileInputMessage) (expectations: CompileOutputMessage => Unit) (implicit runner: Runner) = { val compileOutput = runner.compile(message) expectations(compileOutput) } def runTest(message: CompileInputMessage, cases: ((String, String), CaseResult => Unit)*) (implicit runner: Runner) = { val compileOutput = runner.compile(message) compileOutput.status should equal ("ok") compileOutput.token should not equal None runner.run(RunInputMessage(compileOutput.token.get, cases = Some( cases.map(c => new CaseData(c._1._1, c._1._2)).toList )), new CallbackListener( cases.map(c => c._1._1 -> c._2).toMap )) } def runTestWithCustomMessage(message: CompileInputMessage, runInputMessage: RunInputMessage, cases: ((String, String), CaseResult => Unit)*) (implicit runner: Runner) = { val compileOutput = runner.compile(message) compileOutput.status should equal ("ok") compileOutput.token should not equal None runner.run(runInputMessage.copy( token = compileOutput.token.get, cases = Some(cases.map(c => new CaseData(c._1._1, c._1._2)).toList) ), new CallbackListener( cases.map(c => c._1._1 -> c._2).toMap )) } "Compile error" - { "for C" - { "should trigger in trivial cases" in { compileTest(CompileInputMessage("c", List(("Main.c", "foo")))) { _.status should equal ("compile error") } } "should trigger when main is missing" in { compileTest(CompileInputMessage("c", List(("Main.c", "#include<stdio.h>")))) { _.status should equal ("compile error") } } "should trigger when incluiding a forbidden file" in { compileTest(CompileInputMessage("c", List(("Main.c", "#include</dev/random>")))) { _.status should equal ("compile error") } } } "for C++" - { "should trigger in trivial cases" in { compileTest(CompileInputMessage("cpp", List(("Main.cpp", "foo")))) { _.status should equal ("compile error") } } "should trigger when main is missing" in { compileTest(CompileInputMessage("cpp", List(("Main.cpp", "#include<stdio.h>")))) { _.status should equal ("compile error") } } "should trigger when incluiding a forbidden file" in { compileTest(CompileInputMessage("cpp", List(("Main.cpp", "#include</dev/random>")))) { _.status should equal ("compile error") } } } "for Java" - { "should tigger in trivial cases" in { compileTest(CompileInputMessage("java", List(("Main.java", "foo")))) { test => { test.status should equal ("compile error") test.error should not equal (Some("Class should be called \"Main\".")) }} } "should trigger when class is not called Main" in { compileTest(CompileInputMessage("java", List(("Main.java", """ class Foo { public static void main(String[] args) { System.out.println("Hello, World!\n"); } } """)))) { test => { test.status should equal ("compile error") test.error should equal (Some("Class should be called \"Main\".")) }} } } "for Karel" - { "should trigger for trivial cases" in { compileTest(CompileInputMessage("kj", List(("Main.kj", "foo")))) { _.status should equal ("compile error") } } "should not trigger with a correct program" in { compileTest(CompileInputMessage("kj", List(("Main.kj", """ class program { program() { while(notFacingEast) turnleft(); pickbeeper(); turnoff(); } } """)))) { _.status should equal ("ok") } } } } "Status" - { "should be correctly assigned in C" in { runTest( CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { int x; (void)scanf("%d", &x); switch (x) { case 0: printf("Hello, World!\n"); break; case 1: while(1); break; case 2: fork(); break; case 3: while(1) { void* mem = malloc(1024*1024); memset(mem, -1, 1024 * 1024); } break; case 4: while(1) printf("trololololo\n"); break; case 5: printf("%d", *(int*)(x-6)); break; case 6: printf("%d", 1/(x-6)); break; case 7: return 1; } return 0; } """))), ("ok", "0") -> { _ should be (OK("Hello, World!")) }, ("tle", "1") -> { _ should be (TimeLimitExceeded) }, ("rfe", "2") -> { result => { result should be (RestrictedFunction) result.meta should contain key ("syscall") result.meta("syscall") should (be ("fork") or be("clone")) }}, ("mle", "3") -> { _ should be (MemoryLimitExceeded) }, ("ole", "4") -> { _ should be (OutputLimitExceeded) }, ("segfault", "5") -> { _ should be (Signal) }, ("zerodiv", "6") -> { _ should be (RuntimeError) } ) } "should be correctly assigned in C++" in { runTest( CompileInputMessage("cpp", List(("Main.cpp", """ #include<iostream> #include<stdlib.h> #include<cstring> #include<unistd.h> using namespace std; int main() { int x; cin >> x; switch (x) { case 0: cout << "Hello, World!" << endl; break; case 1: while(1); break; case 2: fork(); break; case 3: while(1) { void* mem = malloc(1024*1024); memset(mem, -1, 1024 * 1024); } break; case 4: while(1) cout << "trololololo" << endl; break; case 5: cout << *reinterpret_cast<int*>(x-5) << endl; break; case 6: cout << 1/(x-6) << endl; break; case 8: return 1; } return 0; } """))), ("ok", "0") -> { _ should be (OK("Hello, World!")) }, ("tle", "1") -> { _ should be (TimeLimitExceeded) }, ("rfe", "2") -> { result => { result should be (RestrictedFunction) result.meta should contain key ("syscall") result.meta("syscall") should (be ("fork") or be("clone")) }}, ("mle", "3") -> { _ should be (MemoryLimitExceeded) }, ("ole", "4") -> { _ should be (OutputLimitExceeded) }, ("segfault", "5") -> { _ should be (Signal) }, ("zerodiv", "6") -> { _ should be (RuntimeError) } ) } "should be correctly assigned in Java" in { runTest( CompileInputMessage("java", List(("Main.java", """ import java.io.*; import java.util.*; class Main { public static void main(String[] args) throws Exception { Scanner in = new Scanner(System.in); List l = new ArrayList(); switch (in.nextInt()){ case 0: System.out.println("Hello, World!\n"); break; case 1: while(true) {} case 2: Runtime.getRuntime().exec("/bin/ls").waitFor(); break; case 3: while(true) { l.add(new ArrayList(1024*1024)); } case 4: while(true) { System.out.println("trololololo"); } case 5: System.out.println(l.get(0)); break; case 6: System.out.println(1 / (int)(Math.sin(0.1))); break; case 7: System.exit(1); break; } } } """))), ("ok", "0") -> { _ should be (OK("Hello, World!")) }, ("tle", "1") -> { _ should be (TimeLimitExceeded) }, // ("rfe", "2") -> { _ should be (RestrictedFunction) }, // ("mle", "3") -> { _ should be (MemoryLimitExceeded) }, // ("ole", "4") -> { _ should be (OutputLimitExceeded) }, ("segfault", "5") -> { _ should be (RuntimeError) }, ("zerodiv", "6") -> { _ should be (RuntimeError) } ) } "should return TLE when exceeding overall time limit" in { runTestWithCustomMessage( CompileInputMessage("java", List(("Main.java", """ import java.io.*; import java.util.*; class Main { public static void main(String[] args) throws Exception { Thread.sleep(700); System.out.println("OK"); } } """))), RunInputMessage(null, overallWallTimeLimit = 1000L), ("ok", "") -> { _ should be (OK("OK")) }, ("tle", "") -> { _ should be (TimeLimitExceeded) } ) } } "Exploits" - { "should be prevented" - { "x86 forkbomb" in { runTest( CompileInputMessage("cpp", List(("Main.cpp", """ int main() { (*(void (*)())"\x6a\x02\x58\xcd\x80\xeb\xf9")(); } """))), ("x86_forkbomb", "") -> { r: CaseResult => r should be (RestrictedFunction) } ) } "x86_64 forkbomb" in { runTest( CompileInputMessage("cpp", List(("Main.cpp", """ int main() { (*(void (*)())"\x48\x31\xc0\xb0\x39\xcd\x80\xeb\xfa")(); } """))), ("x86_64_forkbomb", "") -> { r: CaseResult => r should be (RestrictedFunction) } ) } "Java6 parse double bug in compiler: CVE-2010-4476" in { compileTest(CompileInputMessage("java", List(("Main.java", """ class Main { public static void main(String[] args) { double d = 2.2250738585072012e-308; System.out.println("Value: " + d); } } """)))) { _.status should equal ("ok") } } "Java6 parse double bug in runtime: CVE-2010-4476" in { runTest( CompileInputMessage("java", List(("Main.java", """ class Main { public static void main(String[] args) { double d = Double.parseDouble("2.2250738585072012e-308"); System.out.println("Value: " + d); } } """))), ("ok", "") -> { r: CaseResult => r should be (OK("Value: 2.2250738585072014E-308")) } ) } "2^200 error messages" in { compileTest(CompileInputMessage("c", List(("Main.c", """ #include "Main.c" #include "Main.c" """)))) { _.error should not equal (None) } } } } "Custom validators" - { "should return JE when no validator present" in { compileTest(CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("100\n"); return 0; } """)), Some("c"))) { _.status should equal ("judge error") } } "should return JE when validator is invalid" in { compileTest(CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("100\n"); return 0; } """)), Some("c"), Some(List(("Main.c", "foo"))))) { _.status should equal ("judge error") } } "should report 0 points when printing 0" in { runTest( CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("100\n"); return 0; } """)), Some("c"), Some(List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("0\n"); return 0; } """)))), ("zero", "0") -> { r: CaseResult => r should be (OK("100")) } ) } "should return JE when printing something non-numeric" in { runTest( CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("100\n"); return 0; } """)), Some("c"), Some(List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { printf("foo\n"); return 0; } """)))), ("je", "0") -> { r: CaseResult => r should be (JudgeError) } ) } "should work correctly in C" in { runTest( CompileInputMessage("c", List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { double a, b; scanf("%lf %lf", &a, &b); printf("%lf\n", a + b); return 0; } """)), Some("c"), Some(List(("Main.c", """ #include<stdio.h> #include<stdlib.h> int main() { FILE* data = fopen("data.in", "r"); double a, b, answer, user; (void)fscanf(data, "%lf %lf", &a, &b); (void)scanf("%lf", &user); answer = a*a + b*b; printf("%lf\n", 1.0 / (1.0 + (answer - user) * (answer - user))); return 0; } """)))), ("one", "1 1\n") -> { _ should be (Scored(1.0)) }, ("zero", "0 0\n") -> { _ should be (Scored(1.0)) }, ("two", "2 2\n") -> { _ should be (Scored(0.058824)) }, ("half", "0.5 0.5\n") -> { _ should be (Scored(0.8)) } ) } "should work correctly in Python" in { runTest( CompileInputMessage("cpp", List(("Main.cpp", """ #include<iostream> int main() { double a, b; std::cin >> a >> b; std::cout << a*a + b*b << std::endl; return 0; } """)), Some("py"), Some(List(("Main.py", """ data = open("data.in", "r") a, b = map(float, data.readline().strip().split()) user = float(raw_input().strip()) answer = a**2 + b**2 print 1.0 / (1.0 + (answer - user)**2) """)))), ("one", "1 1\n") -> { _ should be (Scored(1.0)) }, ("zero", "0 0\n") -> { _ should be (Scored(1.0)) }, ("two", "2 2\n") -> { _ should be (Scored(1.0)) }, ("half", "0.5 0.5\n") -> { _ should be (Scored(1.0)) } ) } } "libinteractive" - { "should compile and run" in { val parser = new Parser val runner = new Runner("test", Minijail) val interactive = InteractiveDescription( """ interface Main {}; interface summer { int summer(int a, int b); }; """, parentLang = "cpp", childLang = "cpp", moduleName = "summer" ) val idl = parser.parse(interactive.idlSource) val test1 = runner.compile(CompileInputMessage("cpp", List(("summer.cpp", """ #include "summer.h" int summer(int a, int b) { return a + b; } """), ("Main.cpp", """ #include <cstdio> #include "summer.h" using namespace std; int main() { int a, b; scanf("%d %d\n", &a, &b); printf("%d\n", summer(a, b)); } """)), interactive = Some(interactive))) test1.status should equal ("ok") test1.token should not equal None runner.run( RunInputMessage( token = test1.token.get, cases= Some(List( new CaseData("three", "1 2\n") )), interactive = Some(InteractiveRuntimeDescription( main = idl.main.name, interfaces = idl.interfaces.map(_.name), parentLang = interactive.parentLang )) ), NullRunCaseCallback ) } } }
omegaup/backend
runner/src/test/scala/CompileTests.scala
Scala
bsd-2-clause
20,714
package at.forsyte.apalache.tla.bmcmt.rules import at.forsyte.apalache.tla.bmcmt._ import at.forsyte.apalache.tla.bmcmt.rules.aux.PowSetCtor import at.forsyte.apalache.tla.lir.OperEx import at.forsyte.apalache.tla.lir.oper.{BmcOper, TlaSetOper} /** * This rule expands a powerset, that is, SUBSET S for a set S. In the future, it might also expand a set of functions, * that is, [S -> T], but this does not seem to be practical. * * @author Igor Konnov */ class SetExpandRule(rewriter: SymbStateRewriter) extends RewritingRule { override def isApplicable(symbState: SymbState): Boolean = { symbState.ex match { case OperEx(BmcOper.expand, OperEx(TlaSetOper.SUBSET, _)) => true case OperEx(BmcOper.expand, OperEx(TlaSetOper.funSet, _, _)) => true case _ => false } } override def apply(state: SymbState): SymbState = { state.ex match { case ex @ OperEx(BmcOper.expand, OperEx(TlaSetOper.funSet, _, _)) => throw new RewriterException("Trying to expand a set of functions. This will blow up the solver.", ex) case ex @ OperEx(BmcOper.expand, OperEx(TlaSetOper.SUBSET, basesetEx)) => var nextState = rewriter.rewriteUntilDone(state.setRex(basesetEx)) new PowSetCtor(rewriter).confringo(nextState, nextState.asCell) case e @ _ => throw new RewriterException("%s is not applicable to %s".format(getClass.getSimpleName, e), state.ex) } } }
konnov/dach
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/rules/SetExpandRule.scala
Scala
apache-2.0
1,440
package views import com.micronautics.cache._ import play.api.i18n.Lang import play.twirl.api._ import scala.language.implicitConversions import java.io.File import play.api.Environment trait PFViewImplicits { implicit def pfViewToHtml(pfView: PFView): String = Html(pfView.toString()) implicit def pfViewToString(pfView: PFView): String = pfView.toString() implicit def sbToString(sb: StringBuilder): String = sb.toString() implicit def appendableToString(appendable: HtmlFormat.Appendable): String = appendable.toString() //implicit def htmlToString(html: Html): String = html.toString() implicit def sbToHtml(sb: StringBuilder): Html = Html(sb.toString()) implicit def stringToHtml(string: String): Html = Html(string) } object PFView { def apply(block: => Any)(implicit env: Environment): String = { val pfView = new PFView() pfView.++(block.toString) pfView.toString() } } class PFView(implicit env: Environment) extends PFViewImplicits { implicit val sb = new StringBuilder("") /** Side effect: appends contents of String to PFView's StringBuffer. */ // TODO incorporate https://gist.github.com/javierfs89/eca13fa3429af26b9ac9 @inline def ++(s: => String=""): StringBuilder = sb.append(s) @inline private val includeFileInline = (baseDir: String, path: String) => includeFileFn(path) { case (fileName, fileType) => env.getExistingFile(s"$baseDir${ File.separator }$fileName$fileType") } private val memoizedIncludeFile = (baseDir: String, path: String) => Memoizer(includeFileInline(baseDir, path)) /** Include a local file if it exists; cache results for relative filePaths. * Side effect: appends contents of file to PFView's StringBuffer. * @param filePath * @param baseDir can be relative or absolute. Default is to look in the Play app's `public` directory */ def includeFile(filePath: String, baseDir: String = "public", memoize: Boolean=true): String = { val path: String = s"$baseDir${ File.separator }$filePath" if (filePath.startsWith(File.separator) || !memoize) includeFileInline(baseDir, path) else memoizedIncludeFile(baseDir, path).toString } @inline def includeLocalizedFileInline(lang: Lang): ((String, String)) => StringBuilder = tuple2 => { val (filePath: String, baseDir: String) = tuple2 includeFileFn(s"$baseDir${ File.separator }$filePath") { case (fileName, fileType) => val l10n = "_" + lang.language val l10nCountry = if (lang.country.nonEmpty) "_" + lang.language + "-" + lang.country else l10n // Retrieve the file with the current language & country, or just the generic language version, or just the originally specified version env.getExistingFile( s"$baseDir/$fileName$l10nCountry$fileType") .orElse(env.getExistingFile(s"$baseDir/$fileName$l10n$fileType")) .orElse(env.getExistingFile(s"$baseDir/$filePath")) } } private def memoizedIncludeLocalizedFile(lang: Lang) = Memoizer(includeLocalizedFileInline(lang)) /** Include a local file, using a localized version if it exists. * For example, specify `filePath` `blah.html` and `lang` `en-US` to search for `blah_en-US.html` with a fallback to `blah_en.html` and then `blah.html`. * Side effect: appends contents of file to PFView's StringBuffer. * @param filePath can be a generic i18n path. * @param baseDir Default is to look in the Play app's `public` directory * @param lang Language to consider for filePath l10n; does not need to contain a country code */ def includeLocalizedFile(filePath: String, baseDir: String = "public", memoize: Boolean=true)(implicit lang: Lang=Lang("en")): StringBuilder = { if (filePath.startsWith(File.separator) || !memoize) includeLocalizedFileInline(lang)((filePath, baseDir)) else memoizedIncludeLocalizedFile(lang)((filePath, baseDir)) } /** Include a local file if it exists. File contents are memoized if file is local. * Side effect: appends contents of file to PFView's StringBuffer. * @param path can be a generic i18n path, either absolute or relative. * @param fn Function2 accepts (fileName, fileType) => Option[File]; performs whatever magic is required */ def includeFileFn(path: String)(fn: ((String, String)) => Option[File]): StringBuilder = { val lastDotIndex = path.lastIndexOf(".") val nameType: (String, String) = if (lastDotIndex>=0) path.splitAt(lastDotIndex) else (path, "") val maybeFile: Option[File] = fn(nameType) val result = maybeFile.map { file: File => import scala.io.Source.fromFile val file2 = if (file.isDirectory) new File(file, "index.html") else file val content = fromFile(file2).mkString content }.getOrElse(s"""PFVIew file failed to include '$path'""") sb.append(result) } /** Include the contents of a URL; relative URLs are not supported. * @param url String representation of URL to fetch * @param encoding defaults to UTF-8 */ def includeUrl(url: String, encoding: String="UTF-8"): StringBuilder = sb.append(try { scala.io.Source.fromURL(url, encoding: String).mkString } catch { case e: Exception => s"""PFVIew URL include failed; ${ e.getClass.getName }: ${ e.getMessage } for $url with encoding $encoding""" }) /** Side effect: appends contents of thenClause to PFView's StringBuffer if predicate is true. */ @inline def unIf(predicate: Boolean)(thenClause: => String): String = if (predicate) thenClause else "" /** Side effect: appends contents of thenClause to PFView's StringBuffer if predicate is true. */ @inline def If(predicate: Boolean)(thenClause: => String): String = unIf (predicate) (thenClause) /** @return contents of StringBuffer as Html */ def toHtml = Html(toString) /** @return contents of StringBuffer as String */ override def toString: String = sb.toString() }
mslinn/PFView
src/main/scala-2.11/views/PFView.scala
Scala
mit
5,914
package bifrost.network import scala.util.Random trait SendingStrategy { def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] } object SendToRandom extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = peers.nonEmpty match { case true => Seq(peers(Random.nextInt(peers.length))) case false => Seq() } } case object Broadcast extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = peers } case class BroadcastExceptOf(exceptOf: Seq[ConnectedPeer]) extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = peers.filterNot(exceptOf.contains) } case class SendToPeer(chosenPeer: ConnectedPeer) extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = Seq(chosenPeer) } case class SendToPeers(chosenPeers: Seq[ConnectedPeer]) extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = chosenPeers } case class SendToRandomFromChosen(chosenPeers: Seq[ConnectedPeer]) extends SendingStrategy { override def choose(peers: Seq[ConnectedPeer]): Seq[ConnectedPeer] = Seq(chosenPeers(Random.nextInt(chosenPeers.length))) }
Topl/Project-Bifrost
src/main/scala/bifrost/network/SendingStrategy.scala
Scala
mpl-2.0
1,285
package com.outr.arango.query.dsl import com.outr.arango.Ref import com.outr.arango.query.Query sealed trait CollectWith { def value: String } object CollectWith { object Count extends CollectWith { override def value: String = "COUNT" } class Partial(collectWith: CollectWith) { def INTO(ref: Ref): Unit = { val context = QueryBuilderContext() val query = Query( s"COLLECT WITH ${collectWith.value} INTO ${context.name(ref)}" ) context.addQuery(query) } } }
outr/scarango
driver/src/main/scala/com/outr/arango/query/dsl/CollectWith.scala
Scala
mit
516
package fr.inria.hopla.visitorCase.HtmlTag /** * Created by JIN Benli on 26/03/14. */ object Tags extends Tags trait Tags { val html = "html".tag val head = "head".tag val body = "body".tag val h1 = "h1".tag }
rouvoy/hopla
hopla-toolchain/src/main/scala/fr/inria/hopla/VisitorCase/HtmlTag/Tags.scala
Scala
apache-2.0
223
package japgolly.scalajs.react.component import scala.scalajs.js import japgolly.scalajs.react.internal._ import japgolly.scalajs.react.{Callback, Children, CtorType, PropsChildren, vdom, raw => RAW} import scala.scalajs.js.| object Js extends JsBaseComponentTemplate[RAW.React.ComponentClassP] { def apply[P <: js.Object, C <: Children, S <: js.Object] (raw: js.Any) (implicit s: CtorType.Summoner[P, C], where: sourcecode.FullName, line: sourcecode.Line): Component[P, S, s.CT] = { InspectRaw.assertIsComponent(raw, "JsComponent", where, line) force[P, C, S](raw)(s) } def force[P <: js.Object, C <: Children, S <: js.Object](raw: js.Any)(implicit s: CtorType.Summoner[P, C]): Component[P, S, s.CT] = { val rc = raw.asInstanceOf[RAW.React.ComponentClass[P, S]] component[P, C, S](rc)(s) } // =================================================================================================================== type RawMounted[P <: js.Object, S <: js.Object] = RAW.React.Component[P, S] type Component[P <: js.Object, S <: js.Object, CT[-p, +u] <: CtorType[p, u]] = ComponentWithRawType[P, S, RawMounted[P, S], CT] type Unmounted[P <: js.Object, S <: js.Object] = UnmountedWithRawType[P, S, RawMounted[P, S]] type Mounted [P <: js.Object, S <: js.Object] = MountedWithRawType [P, S, RawMounted[P, S]] type ComponentWithFacade[P <: js.Object, S <: js.Object, F <: js.Object, CT[-p, +u] <: CtorType[p, u]] = ComponentWithRawType[P, S, RawMounted[P, S] with F, CT] type UnmountedWithFacade[P <: js.Object, S <: js.Object, F <: js.Object] = UnmountedWithRawType[P, S, RawMounted[P, S] with F] type MountedWithFacade[P <: js.Object, S <: js.Object, F <: js.Object] = MountedWithRawType[P, S, RawMounted[P, S] with F] type ComponentWithRawType[P <: js.Object, S <: js.Object, R <: RawMounted[P, S], CT[-p, +u] <: CtorType[p, u]] = ComponentRoot[P, CT, UnmountedWithRawType[P, S, R]] type UnmountedWithRawType[P <: js.Object, S <: js.Object, R <: RawMounted[P, S]] = UnmountedRoot[P, MountedWithRawType[P, S, R]] type MountedWithRawType[P <: js.Object, S <: js.Object, R <: RawMounted[P, S]] = MountedRoot[Effect.Id, P, S, R] private def readDisplayName(a: RAW.HasDisplayName): String = a.displayName.getOrElse("") override protected val rawComponentDisplayName: RAW.React.ComponentClassUntyped => String = readDisplayName // =================================================================================================================== sealed trait UnmountedSimple[P, M] extends Generic.UnmountedSimple[P, M] { override def mapUnmountedProps[P2](f: P => P2): UnmountedSimple[P2, M] override def mapMounted[M2](f: M => M2): UnmountedSimple[P, M2] override type Raw <: RAW.React.ComponentElement[_ <: js.Object] override final def displayName = readDisplayName(raw.`type`) } sealed trait UnmountedWithRoot[P1, M1, P0 <: js.Object, M0] extends UnmountedSimple[P1, M1] with Generic.UnmountedWithRoot[P1, M1, P0, M0] { override final type Root = UnmountedRoot[P0, M0] override final type Raw = RAW.React.ComponentElement[P0] override def mapUnmountedProps[P2](f: P1 => P2): UnmountedWithRoot[P2, M1, P0, M0] override def mapMounted[M2](f: M1 => M2): UnmountedWithRoot[P1, M2, P0, M0] } type UnmountedRoot[P <: js.Object, M] = UnmountedWithRoot[P, M, P, M] def unmountedRoot[P <: js.Object, S <: js.Object, M](r: RAW.React.ComponentElement[P], m: RAW.React.Component[P, S] => M): UnmountedRoot[P, M] = new UnmountedRoot[P, M] { override def root = this override val raw = r override val mountRaw = m.asInstanceOf[RAW.React.ComponentUntyped => M] // TODO Do better override val vdomElement = vdom.VdomElement(raw) override def key = jsNullToOption(raw.key) override def ref = jsNullToOption(raw.ref) override def props = raw.props override def propsChildren = PropsChildren.fromRawProps(raw.props) override def mapUnmountedProps[P2](f: P => P2) = mappedU(this)(f, identityFn) override def mapMounted[M2](f: M => M2) = mappedU(this)(identityFn, f) } private def mappedU[P2, M2, P1, M1, P0 <: js.Object, M0](from: UnmountedWithRoot[P1, M1, P0, M0]) (mp: P1 => P2, mm: M1 => M2) : UnmountedWithRoot[P2, M2, P0, M0] = new UnmountedWithRoot[P2, M2, P0, M0] { override def root = from.root override def props = mp(from.props) override val raw = from.raw override def vdomElement = from.vdomElement override def key = from.key override def ref = from.ref override def propsChildren = from.propsChildren override def mapUnmountedProps[P3](f: P2 => P3) = mappedU(from)(f compose mp, mm) override def mapMounted[M3](f: M2 => M3) = mappedU(from)(mp, f compose mm) override val mountRaw = mm compose from.mountRaw } // =================================================================================================================== sealed trait MountedSimple[F[_], P, S, R <: RawMounted[_ <: js.Object, _ <: js.Object]] extends Generic.MountedSimple[F, P, S] { override type WithEffect[F2[_]] <: MountedSimple[F2, P, S, R] override type WithMappedProps[P2] <: MountedSimple[F, P2, S, R] override type WithMappedState[S2] <: MountedSimple[F, P, S2, R] override final type Raw = R override final def displayName = readDisplayName(raw.constructor) def addFacade[T <: js.Object]: MountedSimple[F, P, S, R with T] // def getDefaultProps: Props // def getInitialState: js.Object | Null // def render(): React.Element } sealed trait MountedWithRoot[F[_], P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object] extends MountedSimple[F, P1, S1, R] with Generic.MountedWithRoot[F, P1, S1, P0, S0] { override final type Root = MountedRoot[F, P0, S0, R] override final type WithEffect[F2[_]] = MountedWithRoot[F2, P1, S1, R, P0, S0] override final type WithMappedProps[P2] = MountedWithRoot[F, P2, S1, R, P0, S0] override final type WithMappedState[S2] = MountedWithRoot[F, P1, S2, R, P0, S0] final def withRawType[R2 <: RawMounted[P0, S0]]: MountedWithRoot[F, P1, S1, R2, P0, S0] = this.asInstanceOf[MountedWithRoot[F, P1, S1, R2, P0, S0]] override final def addFacade[T <: js.Object]: MountedWithRoot[F, P1, S1, R with T, P0, S0] = withRawType[R with T] } type MountedRoot[F[_], P <: js.Object, S <: js.Object, R <: RawMounted[P, S]] = MountedWithRoot[F, P, S, R, P, S] def mountedRoot[P <: js.Object, S <: js.Object, R <: RawMounted[P, S]](r: R): MountedRoot[Effect.Id, P, S, R] = new Template.MountedWithRoot[Effect.Id, P, S] with MountedRoot[Effect.Id, P, S, R] { override implicit def F = Effect.idInstance override def root = this override val raw = r override def props = raw.props override def propsChildren = PropsChildren.fromRawProps(raw.props) override def state = raw.state override def getDOMNode = Generic.MountedDomNode.force(RAW.ReactDOM.findDOMNode(raw)) override def setState(state: S, callback: Callback): Unit = raw.setState(state, callback.toJsFn) override def modState(mod: S => S, callback: Callback): Unit = { val jsFn1 = mod: js.Function1[S, S] val jsFn2 = jsFn1.asInstanceOf[js.Function2[S, P, S | Null]] raw.modState(jsFn2, callback.toJsFn) } override def modState(mod: (S, P) => S, callback: Callback): Unit = { val jsFn1 = mod: js.Function2[S, P, S] val jsFn2 = jsFn1.asInstanceOf[js.Function2[S, P, S | Null]] raw.modState(jsFn2, callback.toJsFn) } override def setStateOption(state: Option[S], callback: Callback): Unit = setState(state getOrElse null.asInstanceOf[S], callback) override def modStateOption(mod: S => Option[S], callback: Callback): Unit = modState(mod(_) getOrElse null.asInstanceOf[S], callback) override def modStateOption(mod: (S, P) => Option[S], callback: Callback): Unit = modState(mod(_, _) getOrElse null.asInstanceOf[S], callback) override def forceUpdate(callback: Callback): Unit = raw.forceUpdate(callback.toJsFn) override type Mapped[F1[_], P1, S1] = MountedWithRoot[F1, P1, S1, R, P, S] override def mapped[F[_], P1, S1](mp: P => P1, ls: Lens[S, S1])(implicit ft: Effect.Trans[Effect.Id, F]) = mappedM(this)(mp, ls) } private def mappedM[F[_], P2, S2, P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object] (from: MountedWithRoot[Effect.Id, P1, S1, R, P0, S0]) (mp: P1 => P2, ls: Lens[S1, S2]) (implicit ft: Effect.Trans[Effect.Id, F]) : MountedWithRoot[F, P2, S2, R, P0, S0] = new Template.MountedMapped[F, P2, S2, P1, S1, P0, S0](from)(mp, ls) with MountedWithRoot[F, P2, S2, R, P0, S0] { override def root = from.root.withEffect[F] override val raw = from.raw override type Mapped[F3[_], P3, S3] = MountedWithRoot[F3, P3, S3, R, P0, S0] override def mapped[F3[_], P3, S3](mp: P1 => P3, ls: Lens[S1, S3])(implicit ft: Effect.Trans[Effect.Id, F3]) = mappedM(from)(mp, ls)(ft) } // =================================================================================================================== def component[P <: js.Object, C <: Children, S <: js.Object](rc: RAW.React.ComponentClass[P, S])(implicit s: CtorType.Summoner[P, C]): Component[P, S, s.CT] = componentRoot[P, s.CT, Unmounted[P, S]](rc, s.pf.rmap(s.summon(rc))(unmounted))(s.pf) def unmounted[P <: js.Object, S <: js.Object](r: RAW.React.ComponentElement[P]): Unmounted[P, S] = unmountedRoot(r, mounted) def mounted[P <: js.Object, S <: js.Object](r: RawMounted[P, S]): Mounted[P, S] = mountedRoot(r) // =================================================================================================================== type ComponentMapped[F[_], P1, S1, CT1[-p, +u] <: CtorType[p, u], R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object, CT0[-p, +u] <: CtorType[p, u]] = ComponentWithRoot[P1, CT1, UnmountedMapped[F, P1, S1, R, P0, S0], P0, CT0, UnmountedWithRawType[P0, S0, R]] type UnmountedMapped[F[_], P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object] = UnmountedWithRoot[P1, MountedMapped[F, P1, S1, R, P0, S0], P0, MountedWithRawType[P0, S0, R]] type MountedMapped[F[_], P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object] = MountedWithRoot[F, P1, S1, R, P0, S0] implicit def toJsUnmountedOps[F[_], P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object](x: UnmountedMapped[F, P1, S1, R, P0, S0]): JsUnmountedOps[F, P1, S1, R, P0, S0] = new JsUnmountedOps(x) implicit def toJsComponentOps[F[_], P1, S1, CT1[-p, +u] <: CtorType[p, u], R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object, CT0[-p, +u] <: CtorType[p, u]](x: ComponentMapped[F, P1, S1, CT1, R, P0, S0, CT0]): JsComponentOps[F, P1, S1, CT1, R, P0, S0, CT0] = new JsComponentOps(x) // Scala bug requires special help for the Effect.Id case implicit def toJsUnmountedOpsI[P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object](x: UnmountedMapped[Effect.Id, P1, S1, R, P0, S0]): JsUnmountedOps[Effect.Id, P1, S1, R, P0, S0] = new JsUnmountedOps(x) implicit def toJsComponentOpsI[P1, S1, CT1[-p, +u] <: CtorType[p, u], R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object, CT0[-p, +u] <: CtorType[p, u]](x: ComponentMapped[Effect.Id, P1, S1, CT1, R, P0, S0, CT0]): JsComponentOps[Effect.Id, P1, S1, CT1, R, P0, S0, CT0] = new JsComponentOps(x) final class JsUnmountedOps[F[_], P1, S1, R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object](private val self: UnmountedMapped[F, P1, S1, R, P0, S0]) extends AnyVal { def withRawType[R2 <: RawMounted[P0, S0]]: UnmountedMapped[F, P1, S1, R2, P0, S0] = self.asInstanceOf[UnmountedMapped[F, P1, S1, R2, P0, S0]] def addFacade[T <: js.Object]: UnmountedMapped[F, P1, S1, R with T, P0, S0] = withRawType[R with T] def mapProps[P2](f: P1 => P2): UnmountedMapped[F, P2, S1, R, P0, S0] = self.mapUnmountedProps(f).mapMounted(_ mapProps f) def xmapState[S2](f: S1 => S2)(g: S2 => S1): UnmountedMapped[F, P1, S2, R, P0, S0] = self.mapMounted(_.xmapState(f)(g)) def zoomState[S2](get: S1 => S2)(set: S2 => S1 => S1): UnmountedMapped[F, P1, S2, R, P0, S0] = self.mapMounted(_.zoomState(get)(set)) } final class JsComponentOps[F[_], P1, S1, CT1[-p, +u] <: CtorType[p, u], R <: RawMounted[P0, S0], P0 <: js.Object, S0 <: js.Object, CT0[-p, +u] <: CtorType[p, u]](private val self: ComponentMapped[F, P1, S1, CT1, R, P0, S0, CT0]) extends AnyVal { def withRawType[R2 <: RawMounted[P0, S0]]: ComponentMapped[F, P1, S1, CT1, R2, P0, S0, CT0] = self.asInstanceOf[ComponentMapped[F, P1, S1, CT1, R2, P0, S0, CT0]] def addFacade[T <: js.Object]: ComponentMapped[F, P1, S1, CT1, R with T, P0, S0, CT0] = withRawType[R with T] def xmapProps[P2](f: P1 => P2)(g: P2 => P1): ComponentMapped[F, P2, S1, CT1, R, P0, S0, CT0] = self.cmapCtorProps(g).mapUnmounted(_ mapProps f) def xmapState[S2](f: S1 => S2)(g: S2 => S1): ComponentMapped[F, P1, S2, CT1, R, P0, S0, CT0] = self.mapUnmounted(_.xmapState(f)(g)) def zoomState[S2](get: S1 => S2)(set: S2 => S1 => S1): ComponentMapped[F, P1, S2, CT1, R, P0, S0, CT0] = self.mapUnmounted(_.zoomState(get)(set)) def mapMounted[M2](f: MountedMapped[F, P1, S1, R, P0, S0] => M2) = self.mapUnmounted(_ mapMounted f) } }
matthughes/scalajs-react
core/src/main/scala/japgolly/scalajs/react/component/Js.scala
Scala
apache-2.0
14,389
package org.phenoscape.owl import org.phenoscape.owl.Vocab._ import org.phenoscape.scowl._ import org.semanticweb.owlapi.apibinding.OWLManager import org.semanticweb.owlapi.model._ import org.semanticweb.owlapi.model.parameters.Imports import java.io.File import scala.jdk.CollectionConverters._ object ReverseDevelopsFromRuleGenerator { //FIXME rename to reverse absence rules def main(args: Array[String]): Unit = { val manager = OWLManager.createOWLOntologyManager val ontology = manager.loadOntologyFromOntologyDocument(new File(args(0))) val developsFromOntology = generateDevelopsFromRules(ontology) manager.saveOntology(developsFromOntology, IRI.create(new File(args(1)))) } def generateDevelopsFromRules(ontology: OWLOntology): OWLOntology = { val manager = ontology.getOWLOntologyManager val newIRI = ontology.getOntologyID.getOntologyIRI.toString + "/reverse_develops_from_rules.owl" val newAxioms = ontology.getClassesInSignature(Imports.EXCLUDED).asScala flatMap createRules manager.createOntology(newAxioms.toSet[OWLAxiom].asJava, IRI.create(newIRI)) } def createRules(ontClass: OWLClass): Set[OWLSubClassOfAxiom] = Set( (has_part some (DEVELOPS_FROM some ontClass)) SubClassOf (has_part some ontClass), (has_part some (part_of some ontClass)) SubClassOf (has_part some ontClass) ) }
phenoscape/phenoscape-owl-tools
src/main/scala/org/phenoscape/owl/ReverseDevelopsFromRuleGenerator.scala
Scala
mit
1,369
import scala.io._ class InnoString(str : String){ private var name = str override def toString = str def concat(arg: String) = { name = name + arg println(this.name) } def +(that: InnoString) = new InnoString(this.name + that.name) def ==(that: InnoString) = new InnoString((this.name == that.name).toString) def isPalindrome() = { this.name == this.name.reverse } } object ex3 { def main(args: Array[String]) :Unit = { val instance = new InnoString("Inno") instance.concat("Vaccer") val instance2 = new InnoString("ABCBA") println(instance + instance2) println(instance == instance2) println(instance2.isPalindrome) } }
devmukul44/Scala_Starter
ex3.scala
Scala
mit
649
/* Copyright 2014 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding.thrift.macros import com.twitter.bijection.macros.MacroGenerated import com.twitter.scalding.serialization.JavaStreamEnrichments._ import com.twitter.scalding.serialization.OrderedSerialization import java.io._ import org.scalatest.Matchers object TestHelper extends Matchers { def isMg[T](t: T): T = { t shouldBe a[MacroGenerated] t } def rt[T](t: T)(implicit orderedBuffer: OrderedSerialization[T]) = { val r = deserializeSeq(40, serializeSeq((0 until 40).map(_ => t))) assert(r.distinct.size == 1) r.head } def oBufCompare[T](a: T, b: T)(implicit obuf: OrderedSerialization[T]): Int = obuf.compare(a, b) def deserializeSeq[T](items: Int, buf: InputStream)(implicit orderedBuffer: OrderedSerialization[T] ): Seq[T] = (0 until items).map { _ => orderedBuffer.read(buf).get }.toList def serialize[T](t: T)(implicit orderedBuffer: OrderedSerialization[T]): InputStream = serializeSeq(List(t)) def serializeSeq[T](t: Seq[T])(implicit orderedBuffer: OrderedSerialization[T]): InputStream = { val baos = new ByteArrayOutputStream t.foreach { e => orderedBuffer.write(baos, e) } baos.toInputStream } def rawCompare[T](a: T, b: T)(implicit obuf: OrderedSerialization[T]): Int = obuf.compareBinary(serialize(a), serialize(b)).unsafeToInt def checkManyExplicit[T](i: List[T])(implicit obuf: OrderedSerialization[T]) = { val serializedA = serializeSeq(i) val serializedB = serializeSeq(i) (0 until i.size).foreach { _ => assert(obuf.compareBinary(serializedA, serializedB).unsafeToInt === 0) } } def compareSerialized[T](a: T, b: T)(implicit orderedBuffer: OrderedSerialization[T] ): OrderedSerialization.Result = { val bufA = serializeSeq[T]((0 until 20).map(_ => a)) val bufB = serializeSeq[T]((0 until 20).map(_ => b)) val r = (0 until 20).map { _ => orderedBuffer.compareBinary(bufA, bufB) } if (r.distinct.size == 1) r.head else sys.error("Results are inconsistent.." + r) } }
twitter/scalding
scalding-thrift-macros/src/test/scala/com/twitter/scalding/thrift/macros/TestHelper.scala
Scala
apache-2.0
2,645
package ch.randm.uoi import Unit._ /** Storage size is the amount of Bits that a certain entity (i.e. a file) takes up in memory or on a disk. * * This class provides a way for developers to abstract over * [[https://en.wikipedia.org/wiki/Units_of_information Units of Information]]. An initial value can be created by * calling the static `apply(double, Unit)` method, which will return a new `UnitsOfInformation` object, where the * size is correctly calculated from the `amount` and `unit` parameters. * * This class is immutable, meaning you can not change the internal state once it's set. You're only able to get the * value back in the unit you want. * * While the `amount` is self explanatory, it may not be so clear what number should be passed as `unit`. Here the * predefined Units come into play. Let's assume you want to represent 2.5 Kilobytes. * * {{{ * // Java * UnitsOfInformation.apply(2.5, Unit.KB) * // Scala (with imported implicits) * 2.5.KB * }}} * * General usage examples are shown below. * * {{{ * // Create the UnitsOfInformation object * val size = UnitsOfInformation(250, Unit.MB) * // Read in Mebibyte * size in MiB * // Will print "0.25 GB" * println(size format(GB, "%.2f")) * // Will print "250 MB" * println(size format "%.0f") * }}} * * One practical feature of this class is that you can read the value out with it's best suited Unit. * * {{{ * // Will return MB * UnitsOfInformation(250000000L) unit * // Will return GB * UnitsOfInformation(2500000000L) unit * }}} * * In Scala you can initialize the `UnitsOfInformation` object with the help of implicit conversions. For this to work, * you have to import the implicit class [[ch.randm.uoi.UnitsOfInformation.Implicits]]. * * {{{ * // Will print "2.5 GB" * (2.5 GB) format "%.1f" * // Returns 2.5 * 2500.MB in GB * }}} * * @param size The internal amount of the UnitsOfInformation object, in Bits * @throws IllegalArgumentException In case the supplied `size` is negative */ class UnitsOfInformation(val size: BigInt) { import UnitsOfInformation._ if(size < 0) throw new IllegalArgumentException("Amount must be greater than or equal to zero.") /** @see [[unit(unit:ch\.randm\.uoi\.Unit):ch\.randm\.uoi\.Unit*]] */ def unit(): Unit = unit(B) /** Find out and return the Unit best suited to represent the internal value by dividing the `size` by the Unit's * value and checking if the result is greater than one, starting with the biggest Unit. * * The number system (decimal or binary) is determined automatically. * * {{{ * // Create the UnitsOfInformation object * val size1 = UnitsOfInformation(250000000000L) * // Return the best Byte Unit (in this case, `Unit.GB`) * size1.unit * // Return the best Bit Unit (in this case, `Unit.GiB`) * size1.unit(b) * }}} * * @param unit The base unit, either `b` (Bit) or `B` (Byte) * @return The Unit best suited to represent this instance's value */ def unit(unit: Unit): Unit = { // Find out if we're using a decimal or binary Unit. This is determined by dividing the Bit count by 1024 and // multiplying that number with 100. If the result is a natural number that means that `size` can be represented // with two decimal places in the binary system, which is good enough. Otherwise we use decimal val system = (BigDecimal(size) / Decimal * 100).isWhole match { case true => Decimal case false => (BigDecimal(size) / Binary * 100).isWhole match { case true => Binary case false => Decimal } } val units = UnitsOfInformation.getUnit(system, unit.multiplier).filter(size / _.value >= 1).sortBy(_.exponent).reverse units.headOption match { case Some(x) => x case None => unit } } /** * Returns the internal value in the requested Unit. * * {{{ * // Create the UnitsOfInformation object * val size = UnitsOfInformation(250, Unit.MB) * // Read in Mebibyte * size.in(MiB) * }}} * * @param unit The requested Unit of information * @return A `double` value representing the object's `size` in the requested Unit of information */ def in(unit: Unit): Double = (BigDecimal(size) / BigDecimal(unit.value)).toDouble /** @see [[format(unit:ch\.randm\.uoi\.Unit,formatting:String):String*]] */ def format(formatting: String): String = format(unit(), formatting) /** * This is a convenience method to return a formatted string, which automatically appends the Unit's abbreviation. * Internally the default Scala sting formatter is used, please refer to that documentation if you want to find out * what String to pass to the `format` argument. * * {{{ * // Create the UnitsOfInformation object * UnitsOfInformation size = UnitsOfInformation.apply(250, Unit.MB); * // Print in Gigabyte (will print "0.25 GB") * System.out.println(size.format(Unit.GB, "%.2f")); * // Print in Gigabyte (will print "0.3 GB") * System.out.println(size.format(Unit.GB, "%.1f")); * // Will print "250 MB" * System.out.println(size.format("%f")); * }}} * * @param unit The requested Unit of information * @param formatting The format in which to format the `double` value * @return A String with format `format + " " + unit.name()` */ def format(unit: Unit, formatting: String): String = formatting.format(in(unit)) + " " + unit.name() /** Immutable addition of two `UnitsOfInformation` objects. * * {{{ * // returns "5 MB" * 3.MB + 2.MB format "%f" * }}} * * @param that The amount of units of information to add to this one * @return A new `UnitsOfInformation` object with the combined size of both objects */ def +(that: UnitsOfInformation) = UnitsOfInformation((size + that.size).toDouble) /** Immutable subtraction of two `UnitsOfInformation` objects. * * {{{ * // returns "1 MB" * 3.MB - 2.MB format "%f" * }}} * * @param that The amount of units of information to subtract from this one * @return A new `UnitsOfInformation` object with size `this` minus `that` * @throws IllegalArgumentException If the result is below zero */ def -(that: UnitsOfInformation) = UnitsOfInformation((size - that.size).toDouble) /** Immutable multiplication of a `UnitsOfInformation` object. * * {{{ * // returns "7.5 MB" * 3.MB * 2.5 format "%.1f" * }}} * * @param multiplier The multiplier * @return A new `UnitsOfInformation` object whose size is `multiplier` times bigger than the original * @throws IllegalArgumentException If the result is below zero or a fraction of a bit */ def *(multiplier: Double) = UnitsOfInformation(size.toDouble * multiplier) /** Immutable division of a `UnitsOfInformation` object. * * {{{ * // returns "3 MB" * 4.5.MB / 1.5 format "%f" * }}} * * @param divisor The divisor * @return A new `UnitsOfInformation` object whose size is divided by the `divisor` * @throws IllegalArgumentException If the result is below zero or a fraction of a bit * @throws ArithmeticException If the divisor is zero */ def /(divisor: Double) = UnitsOfInformation(size.toDouble / divisor) override def equals(o: Any): Boolean = o match { case that: UnitsOfInformation => size == that.size case _ => false } override def hashCode(): Int = size.toInt override def toString: String = "UnitsOfInformation{" + format(unit(), "%.2f") + "}" } /** Companion object of the [[UnitsOfInformation]] class, holding the static values and methods. */ object UnitsOfInformation { /** Floating point precision, used when dividing [[BigDecimal]] numbers to round the result, if necessary. */ private val Precision: Int = 5 /** The default value for all the parts of a Unit. */ val Default: Int = 1 // There are two ways of calculating units of information: decimal and binary val Decimal: Int = 1000 val Binary: Int = 1024 // We implement the storage size magnitudes up until x^8, x being either `Decimal` or `Binary` val Kilo: Int = 1 val Mega: Int = 2 val Giga: Int = 3 val Tera: Int = 4 val Peta: Int = 5 val Exa: Int = 6 val Zetta: Int = 7 val Yotta: Int = 8 /** A list of all the available Units */ val units: List[Unit] = Unit.values().toList /** @see [[apply(amount:Double,unit:ch\.randm\.uoi\.Unit):ch\.randm\.uoi\.Unit*]] */ def apply(amount: Double): UnitsOfInformation = apply(amount, b) /** Factory method for the UnitsOfInformation class, to have a more sophisticated API. * * {{{ * // Create a UnitsOfInformation object representing 2.5 MB * UnitsOfInformation(2.5, Unit.MB); * // Create a UnitsOfInformation object representing 1024 Bit * UnitsOfInformation(1024); * }}} * * @param amount The amount of Units to represent (must be greater than zero) * @param unit Unit to be used to calculate the real value of the instance * @return An object that can be used to calculate any unit of information from the internal value * @throws IllegalArgumentException If the `amount` is infinite or a fraction of a Bit */ def apply(amount: Double, unit: Unit): UnitsOfInformation = { if(amount.isInfinite || amount.isNaN) throw new IllegalArgumentException("Amount can not be infinite.") // Recursive inner function to map the `amount` and `unit` in case the `amount` is not an integer. This is // required because we want to make sure that `amount` and `unit` combined doesn't result in a fraction of a // Bit. In that case, we would throw an `IllegalArgumentException` def inner(d: Double, u: Unit): UnitsOfInformation = if(d.isWhole) new UnitsOfInformation(u.value * d.longValue()) else // Get the next Unit where the exponent is one lower getUnit(u.system, u.multiplier, u.exponent - 1) match { case Some(x) => inner(d * u.system, x) case None => throw new IllegalArgumentException("Amount cannot be a fraction of a Bit.") } inner(amount, unit) } /** Returns a List of Units, filtered by the conditions passed as arguments. * * @param system The system to filter by * @param multiplier The multiplier to filter by * @return A filtered List of Units */ private def getUnit(system: Int, multiplier: Int): List[Unit] = units .filter(_.system == system) // Get all Units with the defined number system .filter(_.multiplier == multiplier) // Get all Units with the defined Bit multiplier /** Returns an Optional of Unit, filtered by the conditions passed as arguments. * * @param system The system to filter by * @param multiplier The multiplier to filter by * @param exponent The exponent to filter by * @return An Option of Unit if the conditions hold, or an empty one */ private def getUnit(system: Int, multiplier: Int, exponent: Int): Option[Unit] = getUnit(system, multiplier).find(_.exponent == exponent) // Get Units that have the defined exponent /** Allows implicit conversions of Double values to `UnitsOfInformation` objects. * * {{{ * 1 bit // Equal to UnitsOfInformation(1) * 3 MB // Equal to UnitsOfInformation(3, MB) * 2.6 gigabytes // Equal to UnitsOfInformation(2.6, GB) * }}} * * @param amount The amount that should be converted to UnitsOfInformation */ implicit class Implicits(amount: Double) { def bits = apply(amount) def b = apply(amount) def bytes = apply(amount, Unit.B) def B = apply(amount, Unit.B) def kilobits = apply(amount, Unit.Kbit) def Kbit = apply(amount, Unit.Kbit) def kilobytes = apply(amount, Unit.KB) def KB = apply(amount, Unit.KB) def megabits = apply(amount, Unit.Mbit) def Mbit = apply(amount, Unit.Mbit) def megabytes = apply(amount, Unit.MB) def MB = apply(amount, Unit.MB) def gigabits = apply(amount, Unit.Gbit) def Gbit = apply(amount, Unit.Gbit) def gigabytes = apply(amount, Unit.GB) def GB = apply(amount, Unit.GB) } }
randm-ch/units-of-information
src/main/scala/ch/randm/uoi/UnitsOfInformation.scala
Scala
mit
12,393
package org.jetbrains.plugins.scala package lang package psi package stubs import com.intellij.psi.stubs.StubElement import com.intellij.util.ArrayUtil.EMPTY_STRING_ARRAY import org.jetbrains.plugins.scala.lang.psi.api.statements.ScValueOrVariable import org.jetbrains.plugins.scala.lang.psi.stubs.impl.{ScExpressionOwnerStub, ScTypeElementOwnerStub} /** * @author adkozlov */ trait ScPropertyStub[P <: ScValueOrVariable] extends StubElement[P] with ScTopLevelElementStub[P] with ScTypeElementOwnerStub[P] with ScExpressionOwnerStub[P] with ScMemberOrLocal with ScImplicitStub { def isDeclaration: Boolean def isImplicit: Boolean def names: Array[String] /** * Non-trivial class names in property type. * It is in the same form as written in source or decompiled class file, so it may have prefix. */ def classNames: Array[String] override def implicitClassNames: Array[String] = if (isImplicit) classNames else EMPTY_STRING_ARRAY }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/ScPropertyStub.scala
Scala
apache-2.0
979
package component import javafx.fxml.FXML import javafx.scene.control.Label import me.mtrupkin.core.Point import model.space.Entity /** * Created by mtrupkin on 5/22/2015. */ class EntityReadoutController { @FXML var position: Label = _ @FXML var distance: Label = _ @FXML var typeName: Label = _ @FXML var id: Label = _ @FXML var name: Label = _ implicit def toString(v: core.Vector): String = { f"[${v.x}%.2f : ${v.y}%.2f]" } implicit def toString(p: Point): String = { f"[${p.x} : ${p.y}]" } def update(entity: Entity): Unit = { val p = entity.position position.setText(p) typeName.setText(Entity.typeName(entity)) id.setText(entity.id) name.setText(entity.name) } }
mtrupkin/flagship
src/main/scala/component/EntityReadoutController.scala
Scala
mit
764
/* * Copyright 2008-present MongoDB, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.mongodb.scala import org.mongodb.scala.internal._ import org.reactivestreams.{ Publisher, Subscriber } import scala.collection.mutable.ListBuffer import scala.concurrent.{ ExecutionContext, Future, Promise } import scala.util.Try /** * A companion object for [[Observable]] */ object Observable { /** * Creates an Observable from an Iterable. * * Convenient for testing and or debugging. * * @param from the iterable to create the observable from * @tparam A the type of Iterable * @return an Observable that emits each item from the Iterable */ def apply[A](from: Iterable[A]): Observable[A] = IterableObservable[A](from) } /** * A `Observable` represents a MongoDB operation and implements the `Publisher` interface. * * As such it is a provider of a potentially unbounded number of sequenced elements, publishing them according to the demand received * from its [[Observer]](s). * * Extends the `Publisher` interface and adds helpers to make Observables composable and simple to Subscribe to. * * @define forComprehensionExamples * Example: * * {{{ * def f = Observable(1 to 10) * def g = Observable(100 to 100) * val h = for { * x: Int <- f // returns Observable(1 to 10) * y: Int <- g // returns Observable(100 to 100) * } yield x + y * }}} * * is translated to: * * {{{ * f flatMap { (x: Int) => g map { (y: Int) => x + y } } * }}} * * @tparam T the type of element signaled. */ trait Observable[T] extends Publisher[T] { /** * Request `Observable` to start streaming data. * * This is a "factory method" and can be called multiple times, each time starting a new [[Subscription]]. * Each `Subscription` will work for only a single [[Observer]]. * * If the `Observable` rejects the subscription attempt or otherwise fails it will signal the error via [[Observer.onError]]. * * @param observer the `Observer` that will consume signals from this `Observable` */ def subscribe(observer: Observer[_ >: T]): Unit /** * Handles the automatic boxing of a Java `Observable` so it conforms to the interface. * * @note Users should not have to implement this method but rather use the Scala `Observable`. * @param observer the `Observer` that will consume signals from this `Observable` */ override def subscribe(observer: Subscriber[_ >: T]): Unit = subscribe(BoxedSubscriber[T](observer)) /** * Subscribes to the [[Observable]] and requests `Long.MaxValue`. * * @param doOnNext anonymous function to apply to each emitted element. */ def subscribe(doOnNext: T => Any): Unit = subscribe(doOnNext, t => t) /** * Subscribes to the [[Observable]] and requests `Long.MaxValue`. * * @param doOnNext anonymous function to apply to each emitted element. * @param doOnError anonymous function to apply if there is an error. */ def subscribe(doOnNext: T => Any, doOnError: Throwable => Any): Unit = subscribe(doOnNext, doOnError, () => ()) /** * Subscribes to the [[Observable]] and requests `Long.MaxValue`. * * @param doOnError anonymous function to apply if there is an error. * @param doOnComplete anonymous function to apply on completion. */ def subscribe(doOnError: Throwable => Any, doOnComplete: () => Any): Unit = subscribe(r => r, doOnError, doOnComplete) /** * Subscribes to the [[Observable]] and requests `Long.MaxValue`. * * Uses the default or overridden `onNext`, `onError`, `onComplete` partial functions. * * @param doOnNext anonymous function to apply to each emitted element. * @param doOnError anonymous function to apply if there is an error. * @param doOnComplete anonymous function to apply on completion. */ def subscribe(doOnNext: T => Any, doOnError: Throwable => Any, doOnComplete: () => Any): Unit = { subscribe(new Observer[T] { override def onSubscribe(subscription: Subscription): Unit = subscription.request(Long.MaxValue) override def onNext(tResult: T): Unit = doOnNext(tResult) override def onError(throwable: Throwable): Unit = doOnError(throwable) override def onComplete(): Unit = doOnComplete() }) } /* Monadic operations */ /** * Applies a function applied to each emitted result. * * Automatically requests all results * * @param doOnEach the anonymous function applied to each emitted item * @tparam U the resulting type after the transformation */ def foreach[U](doOnEach: T => U): Unit = subscribe(doOnEach) /** * Creates a new Observable by applying the `resultFunction` function to each emitted result. * If there is an error and `onError` is called the `errorFunction` function is applied to the failed result. * * @param mapFunction function that transforms a each result of the receiver and passes the result to the returned Observable * @param errorMapFunction function that transforms a failure of the receiver into a failure of the returned observer * @tparam S the resulting type of each item in the Observable * @return an Observable with transformed results and / or error. */ def transform[S](mapFunction: T => S, errorMapFunction: Throwable => Throwable): Observable[S] = MapObservable(this, mapFunction, errorMapFunction) /** * Creates a new Observable by applying a function to each emitted result of the [[Observable]]. * If the Observable calls errors then then the new Observable will also contain this exception. * * $forComprehensionExamples * * @param mapFunction function that transforms a each result of the receiver and passes the result to the returned Observable * @tparam S the resulting type of each item in the Observable * @return an Observable with transformed results and / or error. */ def map[S](mapFunction: T => S): Observable[S] = MapObservable(this, mapFunction) /** * Creates a new Observable by applying a function to each emitted result of the [[Observable]]. * If the Observable calls errors then then the new Observable will also contain this exception. * * As each emitted item passed to `onNext` returns an Observable, we tightly control the requests to the parent Observable. * The requested amount is then passed to the child Observable and only when that is completed does the parent become available for * requesting more data. * * $forComprehensionExamples * * @param mapFunction function that transforms a each result of the receiver into an Observable and passes each result of that * Observable to the returned Observable. * @tparam S the resulting type of each item in the Observable * @return an Observable with transformed results and / or error. */ def flatMap[S](mapFunction: T => Observable[S]): Observable[S] = FlatMapObservable(this, mapFunction) /** * Creates a new [[Observable]] by filtering the value of the current Observable with a predicate. * * If the current Observable fails, then the resulting Observable also fails. * * Example: * {{{ * val oddValues = Observable(1 to 100) filter { _ % 2 == 1 } * }}} * * @param predicate the function that is applied to each result emitted if it matches that result is passes to the returned Observable * @return an Observable only containing items matching that match the predicate */ def filter(predicate: T => Boolean): Observable[T] = FilterObservable(this, predicate) /** * Used by for-comprehensions. */ final def withFilter(p: T => Boolean): Observable[T] = FilterObservable(this, p) /** * Collects all the values of the [[Observable]] into a list and returns a new Observable with that list. * * Example: * {{{ * val listOfNumbers = Observable(1 to 100).collect() * }}} * * @note If the Observable is large then this will consume lots of memory! * If the underlying Observable is infinite this Observable will never complete. * @see Uses [[foldLeft]] underneath * @return an Observable that emits a single item, the result of accumulator. */ def collect[S](): SingleObservable[Seq[T]] = FoldLeftObservable(this, ListBuffer[T](), (l: ListBuffer[T], v: T) => l += v).map(_.toSeq) /** * Builds a new [[Observable]] by applying a partial function to all elements. * * Example: * {{{ * val justStrings = Observable(Iterable("this", 1, 2, "that")).collect{ case s: String => s } * }}} * * @param pf function that transforms each result of the receiver into an Observable and passes each result of that * Observable to the returned Observable. * @tparam S the resulting type of each item in the Observable * @return an Observable with transformed results and / or error. */ def collect[S](pf: PartialFunction[T, S]): Observable[S] = CollectObservable(this, pf) /** * Creates a new [[Observable]] that contains the single result of the applied accumulator function. * * The first item emitted by the Observable is passed to the supplied accumulator function alongside the initial value, then all other * emitted items are passed along with the previous result of the accumulator function. * * Example: * {{{ * val countingObservable = Observable(1 to 100) foldLeft(0)((v, i) => v + 1) * }}} * * @note If this function is used to collect results into a collection then it could use lots of memory! * If the underlying Observable is infinite this Observable will never complete. * @param initialValue the initial (seed) accumulator value * @param accumulator an accumulator function to be invoked on each item emitted by the source Observable, the result of which will be * used in the next accumulator call. * @return an Observable that emits a single item, the result of accumulator. */ def foldLeft[S](initialValue: S)(accumulator: (S, T) => S): SingleObservable[S] = FoldLeftObservable(this, initialValue, accumulator) /** * Creates a new [[Observable]] that will handle any matching throwable that this Observable might contain. * If there is no match, or if this Observable contains a valid result then the new Observable will contain the same. * * Example: * * {{{ * mongoExceptionObservable recover { case e: MongoException => 0 } // final result: 0 * mongoExceptionObservable recover { case e: NotFoundException => 0 } // result: exception * }}} * * @param pf the partial function used to pattern match against the `onError` throwable * @tparam U the type of the returned Observable * @return an Observable that will handle any matching throwable and not error. */ def recover[U >: T](pf: PartialFunction[Throwable, U]): Observable[U] = RecoverObservable(this, pf) /** * Creates a new [[Observable]] that will handle any matching throwable that this Observable might contain by assigning it a value * of another Observable. * * If there is no match, or if this Observable contains a valid result then the new Observable will contain the same result. * * Example: * * {{{ * successfulObservable recoverWith { case e: ArithmeticException => observableB } // result: successfulObservable * mongoExceptionObservable recoverWith { case t: Throwable => observableB } // result: observableB * }}} * * == Ensuring results from a Single Observer == * * `recoverWith` can potentially emit results from either Observer. This often isn't desirable, so to ensure only a single Observable * issues results combine with the [[[collect[S]()*]]] method eg: * * {{{ * val results = Observable(1 to 100) * .collect() * .recoverWith({ case t: Throwable => Observable(200 to 300).collect() }) * .subscribe((i: Seq[Int]) => print(results)) * }}} * * @param pf the partial function used to pattern match against the `onError` throwable * @tparam U the type of the returned Observable * @return an Observable that will handle any matching throwable and not error but recover with a new observable */ def recoverWith[U >: T](pf: PartialFunction[Throwable, Observable[U]]): Observable[U] = RecoverWithObservable(this, pf) /** * Zips the values of `this` and `that` [[Observable]], and creates a new Observable holding the tuple of their results. * * If `this` Observable fails, the resulting Observable is failed with the throwable stored in `this`. Otherwise, if `that` * Observable fails, the resulting Observable is failed with the throwable stored in `that`. * * It will only emit as many items as the number of items emitted by the source Observable that emits the fewest items. * * @param that the Observable to zip with * @tparam U the type of the `that` Observable * @return a new zipped Observable */ def zip[U](that: Observable[U]): Observable[(T, U)] = ZipObservable(this, that) /** * Creates a new [[Observable]] which returns the results of this Observable, if there is an error, it will then fallback to returning * the results of the alternative "`that`" Observable. * * If both Observables fail, the resulting Observable holds the throwable object of the first Observable. * * Example: * {{{ * val fallBackObservable = Observable(1 to 100) fallbackTo Observable(200 to 300) * }}} * * == Ensuring results from a Single Observer == * * `fallbackTo` can potentially emit results from either Observer. This often isn't desirable, so to ensure only a single Observable * issues results combine with the [[[collect[S]()*]]] method eg: * * {{{ * val results = Observable(1 to 100).collect() fallbackTo Observable(200 to 300).collect() * }}} * * @param that the Observable to fallback to if `this` Observable fails * @tparam U the type of the returned Observable * @return an Observable that will fallback to the `that` Observable should `this` Observable complete with an `onError`. */ def fallbackTo[U >: T](that: Observable[U]): Observable[U] = RecoverWithObservable(this, { case t: Throwable => that }, true) /** * Applies the side-effecting function to the final result of this [[Observable]] and, returns a new Observable with the result of * this Observable. * * This method allows one to enforce that the callbacks are executed in a specified order. * * Note that if one of the chained `andThen` callbacks throws an exception, that exception is not propagated to the subsequent * `andThen` callbacks. Instead, the subsequent `andThen` callbacks are given the original value of this Observable. * * The following example prints out `10`: * * {{{ * Observable(1 to 10) andThen { * case r => sys.error("runtime exception") * } andThen { * case Success(x) => print(x) * case Failure(t) => print("Failure") * } * }}} * * * @param pf the partial function to pattern match against * @tparam U the result type of the * @return an */ def andThen[U](pf: PartialFunction[Try[T], U]): Observable[T] = AndThenObservable(this, pf) /** * Returns the head of the [[Observable]] in a `scala.concurrent.Future`. * * @return the head result of the [[Observable]]. */ def head(): Future[T] = { headOption().map { case Some(result) => result case None => null.asInstanceOf[T] // scalastyle:ignore null }(Helpers.DirectExecutionContext) } /** * Returns the head option of the [[Observable]] in a `scala.concurrent.Future`. * * @return the head option result of the [[Observable]]. * @since 2.2 */ def headOption(): Future[Option[T]] = { val promise = Promise[Option[T]]() subscribe(new Observer[T]() { @volatile var subscription: Option[Subscription] = None @volatile var terminated: Boolean = false override def onSubscribe(sub: Subscription): Unit = { subscription = Some(sub) sub.request(1) } override def onError(throwable: Throwable): Unit = completeWith("onError", { () => promise.failure(throwable) }) override def onComplete(): Unit = { if (!terminated) completeWith("onComplete", { () => promise.success(None) }) // Completed with no values } override def onNext(tResult: T): Unit = { completeWith("onNext", { () => promise.success(Some(tResult)) }) } private def completeWith(method: String, action: () => Any): Unit = { if (terminated) throw new IllegalStateException(s"$method called after the Observer has already completed or errored.") terminated = true subscription.foreach((sub: Subscription) => sub.unsubscribe()) action() } }) promise.future } /** * Use a specific execution context for future operations * * @param context the execution context * @return an Observable that uses the specified execution context */ def observeOn(context: ExecutionContext): Observable[T] = ExecutionContextObservable(this, context) /** * Convert this observable so that it emits a single Unit to [[Observer.onNext]] before calling [[Observer.onComplete]]. * * If the underlying observable errors then that is propagated to the `Observer`. This method is especially useful for chaining * `Observable[Void]` in for comprehensions. * * @return a single observable which emits Unit before completion. * @since 4.4 */ def completeWithUnit(): SingleObservable[Unit] = UnitObservable(this) }
rozza/mongo-java-driver
driver-scala/src/main/scala/org/mongodb/scala/Observable.scala
Scala
apache-2.0
18,433
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import junit.framework.Assert._ import kafka.zk.ZooKeeperTestHarness import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig import scala.collection._ import kafka.utils.{Utils, Logging} import kafka.utils.{TestZKUtils, TestUtils} import org.scalatest.junit.JUnit3Suite import org.apache.log4j.{Level, Logger} import kafka.message._ import kafka.serializer.StringDecoder class ZookeeperConsumerConnectorTest extends JUnit3Suite with KafkaServerTestHarness with ZooKeeperTestHarness with Logging { val zookeeperConnect = TestZKUtils.zookeeperConnect val zkConnect = zookeeperConnect val numNodes = 2 val numParts = 2 val topic = "topic1" val configs = for(props <- TestUtils.createBrokerConfigs(numNodes)) yield new KafkaConfig(props) { override val enableZookeeper = true override val numPartitions = numParts override val zkConnect = zookeeperConnect } val group = "group1" val consumer0 = "consumer0" val consumer1 = "consumer1" val consumer2 = "consumer2" val consumer3 = "consumer3" val nMessages = 2 def testBasic() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandlers]) requestHandlerLogger.setLevel(Level.FATAL) var actualMessages: List[Message] = Nil // test consumer timeout logic val consumerConfig0 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer0)) { override val consumerTimeoutMs = 200 } val zkConsumerConnector0 = new ZookeeperConsumerConnector(consumerConfig0, true) val topicMessageStreams0 = zkConsumerConnector0.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2)) // no messages to consume, we should hit timeout; // also the iterator should support re-entrant, so loop it twice for (i <- 0 until 2) { try { getMessages(nMessages*2, topicMessageStreams0) fail("should get an exception") } catch { case e: ConsumerTimeoutException => // this is ok case e => throw e } } zkConsumerConnector0.shutdown // send some messages to each broker val sentMessages1 = sendMessages(nMessages, "batch1") // create a consumer val consumerConfig1 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2)) val receivedMessages1 = getMessages(nMessages*2, topicMessageStreams1) assertEquals(sentMessages1, receivedMessages1) // commit consumed offsets zkConsumerConnector1.commitOffsets // create a consumer val consumerConfig2 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer2)) val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true) val topicMessageStreams2 = zkConsumerConnector2.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2)) // send some messages to each broker val sentMessages2 = sendMessages(nMessages, "batch2") Thread.sleep(200) val receivedMessages2_1 = getMessages(nMessages, topicMessageStreams1) val receivedMessages2_2 = getMessages(nMessages, topicMessageStreams2) val receivedMessages2 = (receivedMessages2_1 ::: receivedMessages2_2).sortWith((s,t) => s.checksum < t.checksum) assertEquals(sentMessages2, receivedMessages2) // create a consumer with empty map val consumerConfig3 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer3)) val zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3, true) val topicMessageStreams3 = zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int]()) // send some messages to each broker Thread.sleep(200) val sentMessages3 = sendMessages(nMessages, "batch3") Thread.sleep(200) val receivedMessages3_1 = getMessages(nMessages, topicMessageStreams1) val receivedMessages3_2 = getMessages(nMessages, topicMessageStreams2) val receivedMessages3 = (receivedMessages3_1 ::: receivedMessages3_2).sortWith((s,t) => s.checksum < t.checksum) assertEquals(sentMessages3, receivedMessages3) zkConsumerConnector1.shutdown zkConsumerConnector2.shutdown zkConsumerConnector3.shutdown info("all consumer connectors stopped") requestHandlerLogger.setLevel(Level.ERROR) } def testCompression() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandlers]) requestHandlerLogger.setLevel(Level.FATAL) println("Sending messages for 1st consumer") // send some messages to each broker val sentMessages1 = sendMessages(nMessages, "batch1", DefaultCompressionCodec) // create a consumer val consumerConfig1 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2)) val receivedMessages1 = getMessages(nMessages*2, topicMessageStreams1) assertEquals(sentMessages1, receivedMessages1) // commit consumed offsets zkConsumerConnector1.commitOffsets println("Sending more messages for 2nd consumer") // create a consumer val consumerConfig2 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer2)) val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true) val topicMessageStreams2 = zkConsumerConnector2.createMessageStreams(Predef.Map(topic -> numNodes*numParts/2)) // send some messages to each broker val sentMessages2 = sendMessages(nMessages, "batch2", DefaultCompressionCodec) Thread.sleep(200) val receivedMessages2_1 = getMessages(nMessages, topicMessageStreams1) val receivedMessages2_2 = getMessages(nMessages, topicMessageStreams2) val receivedMessages2 = (receivedMessages2_1 ::: receivedMessages2_2).sortWith((s,t) => s.checksum < t.checksum) assertEquals(sentMessages2, receivedMessages2) // create a consumer with empty map println("Sending more messages for 3rd consumer") val consumerConfig3 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer3)) val zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3, true) val topicMessageStreams3 = zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int]()) // send some messages to each broker Thread.sleep(200) val sentMessages3 = sendMessages(nMessages, "batch3", DefaultCompressionCodec) Thread.sleep(200) val receivedMessages3_1 = getMessages(nMessages, topicMessageStreams1) val receivedMessages3_2 = getMessages(nMessages, topicMessageStreams2) val receivedMessages3 = (receivedMessages3_1 ::: receivedMessages3_2).sortWith((s,t) => s.checksum < t.checksum) assertEquals(sentMessages3, receivedMessages3) zkConsumerConnector1.shutdown zkConsumerConnector2.shutdown zkConsumerConnector3.shutdown info("all consumer connectors stopped") requestHandlerLogger.setLevel(Level.ERROR) } def testCompressionSetConsumption() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandlers]) requestHandlerLogger.setLevel(Level.FATAL) var actualMessages: List[Message] = Nil // shutdown one server servers.last.shutdown Thread.sleep(500) // send some messages to each broker val sentMessages = sendMessages(configs.head, 200, "batch1", DefaultCompressionCodec) // test consumer timeout logic val consumerConfig0 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer0)) { override val consumerTimeoutMs = 5000 } val zkConsumerConnector0 = new ZookeeperConsumerConnector(consumerConfig0, true) val topicMessageStreams0 = zkConsumerConnector0.createMessageStreams(Predef.Map(topic -> 1)) getMessages(100, topicMessageStreams0) zkConsumerConnector0.shutdown // at this point, only some part of the message set was consumed. So consumed offset should still be 0 // also fetched offset should be 0 val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig0, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Predef.Map(topic -> 1)) val receivedMessages = getMessages(400, topicMessageStreams1) val sortedReceivedMessages = receivedMessages.sortWith((s,t) => s.checksum < t.checksum) val sortedSentMessages = sentMessages.sortWith((s,t) => s.checksum < t.checksum) assertEquals(sortedSentMessages, sortedReceivedMessages) zkConsumerConnector1.shutdown requestHandlerLogger.setLevel(Level.ERROR) } def testConsumerDecoder() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandlers]) requestHandlerLogger.setLevel(Level.FATAL) val sentMessages = sendMessages(nMessages, "batch1", NoCompressionCodec). map(m => Utils.toString(m.payload, "UTF-8")). sortWith((s, t) => s.compare(t) == -1) val consumerConfig = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector = new ZookeeperConsumerConnector(consumerConfig, true) val topicMessageStreams = zkConsumerConnector.createMessageStreams( Predef.Map(topic -> numNodes*numParts/2), new StringDecoder) var receivedMessages: List[String] = Nil for ((topic, messageStreams) <- topicMessageStreams) { for (messageStream <- messageStreams) { val iterator = messageStream.iterator for (i <- 0 until nMessages * 2) { assertTrue(iterator.hasNext()) val message = iterator.next().message receivedMessages ::= message debug("received message: " + message) } } } receivedMessages = receivedMessages.sortWith((s, t) => s.compare(t) == -1) assertEquals(sentMessages, receivedMessages) zkConsumerConnector.shutdown() requestHandlerLogger.setLevel(Level.ERROR) } def sendMessages(conf: KafkaConfig, messagesPerNode: Int, header: String, compression: CompressionCodec): List[Message]= { var messages: List[Message] = Nil val producer = TestUtils.createProducer("localhost", conf.port) for (partition <- 0 until numParts) { val ms = 0.until(messagesPerNode).map(x => new Message((header + conf.brokerId + "-" + partition + "-" + x).getBytes)).toArray val mSet = new ByteBufferMessageSet(compressionCodec = compression, messages = ms: _*) for (message <- ms) messages ::= message producer.send(topic, partition, mSet) } producer.close() messages } def sendMessages(messagesPerNode: Int, header: String, compression: CompressionCodec = NoCompressionCodec): List[Message]= { var messages: List[Message] = Nil for(conf <- configs) { messages ++= sendMessages(conf, messagesPerNode, header, compression) } messages.sortWith((s,t) => s.checksum < t.checksum) } def getMessages(nMessagesPerThread: Int, topicMessageStreams: Map[String,List[KafkaStream[Message]]]): List[Message]= { var messages: List[Message] = Nil for ((topic, messageStreams) <- topicMessageStreams) { for (messageStream <- messageStreams) { val iterator = messageStream.iterator for (i <- 0 until nMessagesPerThread) { assertTrue(iterator.hasNext) val message = iterator.next.message messages ::= message debug("received message: " + Utils.toString(message.payload, "UTF-8")) } } } messages.sortWith((s,t) => s.checksum < t.checksum) } }
piavlo/operations-debs-kafka
core/src/test/scala/unit/kafka/consumer/ZookeeperConsumerConnectorTest.scala
Scala
apache-2.0
12,794
/* * Copyright 2009-2010 LinkedIn, Inc * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package com.linkedin.norbert package network package partitioned package loadbalancer import java.util import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} import com.linkedin.norbert.cluster.{InvalidClusterException, Node} import com.linkedin.norbert.logging.Logging import com.linkedin.norbert.network.client.loadbalancer.LoadBalancerHelpers import com.linkedin.norbert.network.common.Endpoint /** * A mixin trait that provides functionality to help implement a hash based <code>Router</code>. */ trait DefaultLoadBalancerHelper extends LoadBalancerHelpers with Logging { /** * A mapping from partition id to the <code>Node</code>s which can service that partition. */ protected val partitionToNodeMap: Map[Int, (IndexedSeq[Endpoint], AtomicInteger, Array[AtomicBoolean])] /** * Given the currently available <code>Node</code>s and the total number of partitions in the cluster, this method * generates a <code>Map</code> of partition id to the <code>Node</code>s which service that partition. * * @param nodes the current available nodes * @param numPartitions the total number of partitions in the cluster * * @return a <code>Map</code> of partition id to the <code>Node</code>s which service that partition * @throws InvalidClusterException thrown if every partition doesn't have at least one available <code>Node</code> * assigned to it */ protected def generatePartitionToNodeMap(nodes: Set[Endpoint], numPartitions: Int, serveRequestsIfPartitionMissing: Boolean): Map[Int, (IndexedSeq[Endpoint], AtomicInteger, Array[AtomicBoolean])] = { val partitionToNodeMap = (for (n <- nodes; p <- n.node.partitionIds) yield(p, n)).foldLeft(Map.empty[Int, IndexedSeq[Endpoint]]) { case (map, (partitionId, node)) => map + (partitionId -> (node +: map.get(partitionId).getOrElse(Vector.empty[Endpoint]))) } val possiblePartitions = (0 until numPartitions).toSet val missingPartitions = possiblePartitions diff (partitionToNodeMap.keys.toSet) if(missingPartitions.size == possiblePartitions.size) throw new InvalidClusterException("Every single partition appears to be missing. There are %d partitions".format(numPartitions)) else if(!missingPartitions.isEmpty) { if(serveRequestsIfPartitionMissing) log.warn("Partitions %s are unavailable, attempting to continue serving requests to other partitions.".format(missingPartitions)) else throw new InvalidClusterException("Partitions %s are unavailable, cannot serve requests.".format(missingPartitions)) } partitionToNodeMap.map { case (pId, endPoints) => val states = new Array[AtomicBoolean](endPoints.size) (0 to endPoints.size -1).foreach(states(_) = new AtomicBoolean(true)) pId -> (endPoints, new AtomicInteger(0), states) } } /** * Calculates a <code>Node</code> which can service a request for the specified partition id. * * @param partitionId the id of the partition * * @return <code>Some</code> with the <code>Node</code> which can service the partition id, <code>None</code> * if there are no available <code>Node</code>s for the partition requested */ protected def nodeForPartition(partitionId: Int, capability: Option[Long] = None, persistentCapability: Option[Long] = None): Option[Node] = { partitionToNodeMap.get(partitionId) match { case None => return None case Some((endpoints, counter, states)) => val es = endpoints.size counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0) val idx = counter.getAndIncrement var i = idx var loopCount = 0 do { val endpoint = endpoints(i % es) if(endpoint.canServeRequests && endpoint.node.isCapableOf(capability, persistentCapability)) { compensateCounter(idx, loopCount, counter); return Some(endpoint.node) } i = i + 1 if (i < 0) i = 0 loopCount = loopCount + 1 } while (loopCount <= es) compensateCounter(idx, loopCount, counter); return Some(endpoints(idx % es).node) } } protected def nodesForPartition(partitionId: Int, capability: Option[Long] = None, persistentCapability: Option[Long] = None): util.LinkedHashSet[Node] = { partitionToNodeMap.get(partitionId) match { case None => return new util.LinkedHashSet[Node] case Some((endpoints, counter, states)) => val es = endpoints.size counter.compareAndSet(java.lang.Integer.MAX_VALUE, 0) val idx = counter.getAndIncrement var i = idx var loopCount = 0 val result = new util.LinkedHashSet[Node] do { val endpoint = endpoints(i % es) if(endpoint.canServeRequests && endpoint.node.isCapableOf(capability, persistentCapability)) { result.add(endpoint.node) } i = i + 1 if (i < 0) i = 0 loopCount = loopCount + 1 } while (loopCount <= es) result } } def compensateCounter(idx: Int, count:Int, counter:AtomicInteger) { if (idx + 1 + count <= 0) { // Integer overflow counter.set(idx + 1 - java.lang.Integer.MAX_VALUE + count) } counter.set(idx + 1 + count) } }
nickhristov/norbert
network/src/main/scala/com/linkedin/norbert/network/partitioned/loadbalancer/DefaultLoadBalancerHelper.scala
Scala
apache-2.0
5,898
/* * Copyright 2013 Maurício Linhares * * Maurício Linhares licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ package com.github.mauricio.async.db.postgresql.codec import com.github.mauricio.async.db.postgresql.messages.backend._ trait PostgreSQLConnectionDelegate { def onAuthenticationResponse(message: AuthenticationMessage): Unit def onCommandComplete(message: CommandCompleteMessage): Unit def onDataRow(message: DataRowMessage): Unit def onError(message: ErrorMessage): Unit def onError(throwable: Throwable): Unit def onParameterStatus(message: ParameterStatusMessage): Unit def onReadyForQuery(): Unit def onRowDescription(message: RowDescriptionMessage): Unit def onNotificationResponse(message: NotificationResponse): Unit }
dripower/postgresql-async
postgresql-async/src/main/scala/com/github/mauricio/async/db/postgresql/codec/PostgreSQLConnectionDelegate.scala
Scala
apache-2.0
1,285
package de.sciss.lucre.swing import de.sciss.lucre.{DoubleObj, IntObj, MapObj} import de.sciss.lucre.expr.CellView import de.sciss.swingplus.GroupPanel import scala.collection.immutable.{IndexedSeq => Vec} import scala.swing.event.ButtonClicked import scala.swing.{Alignment, Button, Component, Label, ToggleButton} object OptionalApp extends DurableAppLike { // de.sciss.lucre.event.showLog = true // private val rows = 1 private lazy val viewsDouble: Vec[View[T]] = system.step { implicit tx => implicit val doubleEx: DoubleObj.type = DoubleObj // implicit val intEx = IntObj def label (text: String): View.T[T, Label] = View.wrap(new Label(s"$text:", null, Alignment.Trailing)) def button(text: String)(action: => Unit): View.T[T, Button] = View.wrap(Button(text)(action)) val exDouble1 = DoubleObj.newVar[T](DoubleObj.newConst(0.0)) val mapDouble = MapObj.Modifiable[T, String, DoubleObj]() val keyDouble = "foo-Double" val mapViewDouble = CellView.exprMap[T, String, Double, DoubleObj](mapDouble, keyDouble) val vDouble1 = DoubleSpinnerView(exDouble1, "d1") val vDouble2 = DoubleSpinnerView.optional[T](mapViewDouble, "d2") val mapDoubleH = tx.newHandle(mapDouble) val exDouble1H = tx.newHandle(exDouble1) val butPutDouble = button("Put" )(system.step { implicit tx => mapDoubleH().put (keyDouble, exDouble1H()) }) val butRemoveDouble = button("Remove")(system.step { implicit tx => mapDoubleH().remove(keyDouble ) }) val togDefaultDouble = new ToggleButton("Default: 0") { listenTo(this) reactions += { case ButtonClicked(_) => vDouble2.default = if (selected) Some(0.0) else None } } Vec( label("Double"), vDouble1, vDouble2, butPutDouble, butRemoveDouble, View.wrap(togDefaultDouble) ) } private lazy val viewsInt: Vec[View[T]] = system.step { implicit tx => // implicit val doubleEx = DoubleObj implicit val intEx: IntObj.type = IntObj def label (text: String): View.T[T, Label] = View.wrap(new Label(s"$text:", null, Alignment.Trailing)) def button(text: String)(action: => Unit): View.T[T, Button] = View.wrap(Button(text)(action)) val exInt1 = IntObj.newVar[T](IntObj.newConst(0)) val mapInt = MapObj.Modifiable[T, String, IntObj]() val keyInt = "foo-Int" val mapViewInt = CellView.exprMap[T, String, Int, IntObj](mapInt, keyInt) val vInt1 = IntSpinnerView(exInt1, "d1") val vInt2 = IntSpinnerView.optional[T](mapViewInt, "d2") val mapIntH = tx.newHandle(mapInt) val exInt1H = tx.newHandle(exInt1) val butPutInt = button("Put" )(system.step { implicit tx => mapIntH().put (keyInt, exInt1H()) }) val butRemoveInt = button("Remove")(system.step { implicit tx => mapIntH().remove(keyInt ) }) val togDefaultInt = new ToggleButton("Default: 0") { listenTo(this) reactions += { case ButtonClicked(_) => vInt2.default = if (selected) Some(0) else None } } Vec( label("Int"), vInt1, vInt2, butPutInt, butRemoveInt, View.wrap(togDefaultInt) ) } def mkView(): Component = new GroupPanel { horizontal = Seq((viewsDouble zip viewsInt).map { case (v1, v2) => Par(v1.component, v2.component) } : _* ) vertical = Seq( Par(Baseline)(viewsDouble.map(v => GroupPanel.Element(v.component)): _*), // can't use implicit conversion in Scala 2.10 Par(Baseline)(viewsInt .map(v => GroupPanel.Element(v.component)): _*)) } }
Sciss/LucreSwing
jvm/src/test/scala/de/sciss/lucre/swing/OptionalApp.scala
Scala
agpl-3.0
3,634
package scala.slick.jdbc.meta import java.sql._ import scala.slick.jdbc.ResultSetInvoker /** * A wrapper for a row in the ResultSet returned by DatabaseMetaData.getFunctions(). */ case class MFunction(name: MQName, remarks: String, returnsTable: Option[Boolean], specificName: String) { def getFunctionColumns(columnNamePattern: String = "%") = MFunctionColumn.getFunctionColumns(name, columnNamePattern) } object MFunction { def getFunctions(namePattern: MQName) = { ResultSetInvoker[MFunction] { s => try s.metaData.getFunctions(namePattern.catalog_?, namePattern.schema_?, namePattern.name) catch { case _: AbstractMethodError => null } } { r => MFunction(MQName.from(r), r.<<, r.nextShort match { case DatabaseMetaData.functionNoTable => Some(false) case DatabaseMetaData.functionReturnsTable => Some(true) case _ => None }, r.<<) } } }
boldradius/slick
src/main/scala/scala/slick/jdbc/meta/MFunction.scala
Scala
bsd-2-clause
910
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import org.scalatest.concurrent.TimeLimits._ import org.scalatest.time.{Millis, Span} class UnpersistSuite extends SparkFunSuite with LocalSparkContext { test("unpersist RDD") { sc = new SparkContext("local", "test") val rdd = sc.makeRDD(Array(1, 2, 3, 4), 2).cache() rdd.count assert(sc.persistentRdds.isEmpty === false) rdd.unpersist() assert(sc.persistentRdds.isEmpty === true) failAfter(Span(3000, Millis)) { try { while (! sc.getRDDStorageInfo.isEmpty) { Thread.sleep(200) } } catch { case _: Throwable => Thread.sleep(10) // Do nothing. We might see exceptions because block manager // is racing this thread to remove entries from the driver. } } assert(sc.getRDDStorageInfo.isEmpty === true) } }
minixalpha/spark
core/src/test/scala/org/apache/spark/UnpersistSuite.scala
Scala
apache-2.0
1,645
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import kafka.server.QuotaType._ import kafka.utils.Logging import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.server.quota.ClientQuotaCallback import org.apache.kafka.common.utils.Time object QuotaType { case object Fetch extends QuotaType case object Produce extends QuotaType case object Request extends QuotaType case object LeaderReplication extends QuotaType case object FollowerReplication extends QuotaType case object AlterLogDirsReplication extends QuotaType } sealed trait QuotaType object QuotaFactory extends Logging { object UnboundedQuota extends ReplicaQuota { override def isThrottled(topicPartition: TopicPartition): Boolean = false override def isQuotaExceeded: Boolean = false def record(value: Long): Unit = () } case class QuotaManagers(fetch: ClientQuotaManager, produce: ClientQuotaManager, request: ClientRequestQuotaManager, leader: ReplicationQuotaManager, follower: ReplicationQuotaManager, alterLogDirs: ReplicationQuotaManager, clientQuotaCallback: Option[ClientQuotaCallback]) { def shutdown(): Unit = { fetch.shutdown produce.shutdown request.shutdown clientQuotaCallback.foreach(_.close()) } } def instantiate(cfg: KafkaConfig, metrics: Metrics, time: Time, threadNamePrefix: String): QuotaManagers = { val clientQuotaCallback = Option(cfg.getConfiguredInstance(KafkaConfig.ClientQuotaCallbackClassProp, classOf[ClientQuotaCallback])) QuotaManagers( new ClientQuotaManager(clientFetchConfig(cfg), metrics, Fetch, time, threadNamePrefix, clientQuotaCallback), new ClientQuotaManager(clientProduceConfig(cfg), metrics, Produce, time, threadNamePrefix, clientQuotaCallback), new ClientRequestQuotaManager(clientRequestConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallback), new ReplicationQuotaManager(replicationConfig(cfg), metrics, LeaderReplication, time), new ReplicationQuotaManager(replicationConfig(cfg), metrics, FollowerReplication, time), new ReplicationQuotaManager(alterLogDirsReplicationConfig(cfg), metrics, AlterLogDirsReplication, time), clientQuotaCallback ) } def clientProduceConfig(cfg: KafkaConfig): ClientQuotaManagerConfig = { if (cfg.producerQuotaBytesPerSecondDefault != Long.MaxValue) warn(s"${KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp} has been deprecated in 0.11.0.0 and will be removed in a future release. Use dynamic quota defaults instead.") ClientQuotaManagerConfig( quotaBytesPerSecondDefault = cfg.producerQuotaBytesPerSecondDefault, numQuotaSamples = cfg.numQuotaSamples, quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds ) } def clientFetchConfig(cfg: KafkaConfig): ClientQuotaManagerConfig = { if (cfg.consumerQuotaBytesPerSecondDefault != Long.MaxValue) warn(s"${KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp} has been deprecated in 0.11.0.0 and will be removed in a future release. Use dynamic quota defaults instead.") ClientQuotaManagerConfig( quotaBytesPerSecondDefault = cfg.consumerQuotaBytesPerSecondDefault, numQuotaSamples = cfg.numQuotaSamples, quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds ) } def clientRequestConfig(cfg: KafkaConfig): ClientQuotaManagerConfig = { ClientQuotaManagerConfig( numQuotaSamples = cfg.numQuotaSamples, quotaWindowSizeSeconds = cfg.quotaWindowSizeSeconds ) } def replicationConfig(cfg: KafkaConfig): ReplicationQuotaManagerConfig = { ReplicationQuotaManagerConfig( numQuotaSamples = cfg.numReplicationQuotaSamples, quotaWindowSizeSeconds = cfg.replicationQuotaWindowSizeSeconds ) } def alterLogDirsReplicationConfig(cfg: KafkaConfig): ReplicationQuotaManagerConfig = { ReplicationQuotaManagerConfig( numQuotaSamples = cfg.numAlterLogDirsReplicationQuotaSamples, quotaWindowSizeSeconds = cfg.alterLogDirsReplicationQuotaWindowSizeSeconds ) } }
sslavic/kafka
core/src/main/scala/kafka/server/QuotaFactory.scala
Scala
apache-2.0
5,042
/* * Copyright 2015 Delft University of Technology * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package science.atlarge.graphalytics.graphx.ffm import java.util import java.lang.{Long => JLong} import science.atlarge.graphalytics.domain.algorithms.ForestFireModelParameters import science.atlarge.graphalytics.graphx.{GraphXJobTest, ValidationGraphUtils} import science.atlarge.graphalytics.validation.GraphStructure import science.atlarge.graphalytics.validation.algorithms.ffm.ForestFireModelValidationTest import science.atlarge.graphalytics.domain.algorithms.ForestFireModelParameters import science.atlarge.graphalytics.graphx.{GraphXJobTest, ValidationGraphUtils} import science.atlarge.graphalytics.validation.GraphStructure import science.atlarge.graphalytics.validation.algorithms.ffm.ForestFireModelValidationTest /** * Integration test for Forest Fire Model job on GraphX. * * @author Tim Hegeman */ class ForestFireModelJobTest extends ForestFireModelValidationTest with GraphXJobTest { override def executeDirectedForestFireModel(graph : GraphStructure, parameters : ForestFireModelParameters) : GraphStructure = { val (vertexData, edgeData) = ValidationGraphUtils.directedValidationGraphToVertexEdgeList(graph) executeForestFireModel(vertexData, edgeData, true, parameters) } override def executeUndirectedForestFireModel(graph : GraphStructure, parameters : ForestFireModelParameters) : GraphStructure = { val (vertexData, edgeData) = ValidationGraphUtils.undirectedValidationGraphToVertexEdgeList(graph) executeForestFireModel(vertexData, edgeData, false, parameters) } private def executeForestFireModel(vertexData : List[String], edgeData : List[String], directed : Boolean, parameters : ForestFireModelParameters) : GraphStructure = { val ffmJob = new ForestFireModelJob("", "", directed, "", parameters) val (vertexOutput, edgeOutput) = executeJob(ffmJob, vertexData, edgeData) val edgeLists = new util.HashMap[JLong, util.Set[JLong]]() vertexOutput.foreach { case (vid, _) => edgeLists.put(vid, new util.HashSet[JLong]())} edgeOutput.foreach { case (from, to) => edgeLists.get(from).add(to) } new GraphStructure(edgeLists) } }
tudelft-atlarge/graphalytics-platforms-graphx
src/main/test/scala/science/atlarge/graphalytics/graphx/ffm/ForestFireModelJobTest.scala
Scala
apache-2.0
2,712
/* * The MIT License (MIT) * * Copyright (c) 2015 Einevea * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package com.einevault.econcepts.model import java.util import java.util.{Locale, UUID} import com.einevault.econcepts.domain.Media import spray.http.MediaType /** * Temporal memory database for testing, using mutable structures for simplicity * Created by einevea on 02/06/15. */ object MemDB { val concepts : util.HashMap[UUID, Concept] = new util.HashMap() val labels : util.HashMap[UUID, Label] = new util.HashMap() val cIDToLabelsID : util.HashMap[UUID, List[UUID]] = new util.HashMap() val descriptions : util.HashMap[UUID, Description] = new util.HashMap() def description(uUID: UUID, locale: Locale): Option[Description] = ??? def descriptions(uUID: UUID, locale: Locale): List[Description] = ??? def mainMedia(uUID: UUID, locale: Locale): Option[Media] = ??? def medias(uUID: UUID, locale: Locale): List[Media] = ??? def mainMedia(uUID: UUID, locale: Locale, mediaType: MediaType): Option[Media] = ??? def medias(uUID: UUID, locale: Locale, mediaType: MediaType): List[Media] = ??? def mainLabel(uUID: UUID, locale: Locale): Option[Label] = ??? def labels(uUID: UUID, locale: Locale): List[Label] = ??? }
einevea/econcepts
src/main/scala/com/einevault/econcepts/model/MemDB.scala
Scala
mit
2,289
package com.ataraxer.apps.chess.scala import com.ataraxer.apps.chess.scala.Color._ import com.ataraxer.apps.chess.scala.pieces.Piece case class Cell(val coordinates: Coord, val piece: Option[Piece] = None) { def color = piece map { _.color } def isEmpty = (piece == None) override def toString: String = piece match { case Some(p) => p.toString case None => "---" } def == (that: Cell) = that.coordinates == this.coordinates }
ataraxer/chess-game-scala
src/main/scala/Cell.scala
Scala
mit
461
object Daemon { def main(args: Array[String]) { val t = new Thread { override def run(): Unit = synchronized { wait() } } t.setDaemon(true); t.start } }
mdedetrich/sbt
sbt/src/sbt-test/run/daemon/src/main/scala/Daemon.scala
Scala
bsd-3-clause
167
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import java.text.SimpleDateFormat import java.util.Calendar import java.util.Date import java.util.TimeZone /** * RichDate adds some nice convenience functions to the Java date/calendar classes * We commonly do Date/Time work in analysis jobs, so having these operations convenient * is very helpful. */ object RichDate { // Implicits to Java types: implicit def toDate(rd: RichDate) = rd.value implicit def toCalendar(rd: RichDate)(implicit tz: TimeZone): Calendar = { val cal = Calendar.getInstance(tz) cal.setTime(rd.value) cal } implicit def apply(d: Date): RichDate = RichDate(d.getTime) implicit def apply(d: Calendar): RichDate = RichDate(d.getTime) /** * Parse the string with one of the value DATE_FORMAT_VALIDATORS in the order listed in DateOps. * We allow either date, date with time in minutes, date with time down to seconds. * The separator between date and time can be a space or "T". */ implicit def apply(str: String)(implicit tz: TimeZone, dp: DateParser): RichDate = dp.parse(str).get /* If the format is one of the truncated DateOps formats, we can do * the upper bound, else go to the end of the day */ def upperBound(s: String)(implicit tz: TimeZone, dp: DateParser) = { val end = apply(s) (DateOps.getFormatObject(s) match { case Some(DateOps.Format.DATE_WITH_DASH) => end + Days(1) case Some(DateOps.Format.DATEHOUR_WITH_DASH) => end + Hours(1) case Some(DateOps.Format.DATETIME_WITH_DASH) => end + Minutes(1) case Some(DateOps.Format.DATETIME_HMS_WITH_DASH) => end + Seconds(1) case Some(DateOps.Format.DATETIME_HMSM_WITH_DASH) => end + Millisecs(2) case None => Days(1).floorOf(end + Days(1)) }) - Millisecs(1) } def now: RichDate = RichDate(System.currentTimeMillis()) } /** * A value class wrapper for milliseconds since the epoch. Its tempting to extend * this with AnyVal but this causes problem with Java code. */ case class RichDate(val timestamp: Long) extends Ordered[RichDate] { // these are mutable, don't keep them around def value: Date = new java.util.Date(timestamp) def +(interval: Duration) = interval.addTo(this) def -(interval: Duration) = interval.subtractFrom(this) //Inverse of the above, d2 + (d1 - d2) == d1 def -(that: RichDate) = AbsoluteDuration.fromMillisecs(timestamp - that.timestamp) override def compare(that: RichDate): Int = java.lang.Long.compare(timestamp, that.timestamp) //True of the other is a RichDate with equal value, or a Date equal to value override def equals(that: Any) = that match { case d: Date => d.getTime == timestamp case RichDate(ts) => ts == timestamp case _ => false } /** * Use String.format to format the date, as opposed to toString with uses SimpleDateFormat */ def format(pattern: String)(implicit tz: TimeZone): String = String.format(pattern, toCalendar(tz)) /** * Make sure the hashCode is the same as Date for the (questionable) choice * to make them equal. this is the same as what java does (and only sane thing): * http://grepcode.com/file/repository.grepcode.com/java/root/jdk/openjdk/6-b14/java/util/Date.java#989 */ override def hashCode = (timestamp.toInt) ^ ((timestamp >> 32).toInt) def toCalendar(implicit tz: TimeZone) = { val cal = Calendar.getInstance(tz) cal.setTime(value) cal } override def toString = value.toString /** * Use SimpleDateFormat to print the string */ def toString(fmt: String)(implicit tz: TimeZone): String = { val cal = toCalendar(tz) val sdfmt = new SimpleDateFormat(fmt) sdfmt.setCalendar(cal) sdfmt.format(cal.getTime) } }
nkhuyu/scalding
scalding-date/src/main/scala/com/twitter/scalding/RichDate.scala
Scala
apache-2.0
4,290
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.activemq.apollo.amqp import org.apache.activemq.apollo.broker.protocol import protocol.{MessageCodecFactory, MessageCodec} import org.fusesource.hawtbuf.Buffer._ import org.apache.activemq.apollo.broker.Message import org.apache.activemq.apollo.broker.store.MessageRecord import org.fusesource.hawtbuf.Buffer import org.fusesource.hawtbuf.AsciiBuffer import org.fusesource.hawtbuf.UTF8Buffer import org.apache.qpid.proton.amqp.{UnsignedByte, UnsignedShort, UnsignedLong, UnsignedInteger} import org.apache.qpid.proton.amqp.messaging.{Properties, Header} import org.apache.qpid.proton.message.impl.MessageImpl object AmqpMessageCodecFactory extends MessageCodecFactory.Provider { def create = Array[MessageCodec](AmqpMessageCodec) } object AmqpMessageCodec extends MessageCodec { def ascii_id = ascii("amqp-1.0") def id = "amqp-1.0" def encode(message: Message):MessageRecord = { val rc = new MessageRecord rc.codec = ascii_id rc.buffer = message.encoded rc } def decode(message: MessageRecord) = { assert( message.codec == ascii_id ) new AmqpMessage(message.buffer, null) } } object AmqpMessage { val SENDER_CONTAINER_KEY = "sender-container" val prefixVendor = "JMS_AMQP_"; val prefixDeliveryAnnotationsKey = prefixVendor+"DA_"; val prefixMessageAnnotationsKey= prefixVendor+"MA_"; val prefixFooterKey = prefixVendor+"FT_"; val firstAcquirerKey = prefixVendor + "FirstAcquirer"; val subjectKey = prefixVendor +"Subject"; val contentTypeKey = prefixVendor +"ContentType"; val contentEncodingKey = prefixVendor +"ContentEncoding"; val replyToGroupIDKey = prefixVendor +"ReplyToGroupID"; } import AmqpMessage._ class AmqpMessage(private var encoded_buffer:Buffer, private var decoded_message:org.apache.qpid.proton.message.Message=null) extends org.apache.activemq.apollo.broker.Message { /** * The encoder/decoder of the message */ def codec = AmqpMessageCodec def decoded = { if( decoded_message==null ) { val amqp = new MessageImpl(); var offset = encoded_buffer.offset var len = encoded_buffer.length while( len > 0 ) { var decoded = amqp.decode(encoded_buffer.data, offset, len); assert(decoded > 0, "Make progress decoding the message") offset += decoded; len -= decoded; } decoded_message = amqp } decoded_message } override def encoded = { if( encoded_buffer == null ) { var buffer = new Array[Byte](1024); var c = decoded_message.asInstanceOf[MessageImpl].encode2(buffer, 0, buffer.length); if( c > buffer.length) { buffer = new Array[Byte](c); decoded_message.encode(buffer, 0, c); } encoded_buffer = new Buffer(buffer, 0, c) } encoded_buffer } override def message_group = decoded.getGroupId def getBodyAs[T](toType : Class[T]): T = { if (toType == classOf[Buffer]) { encoded } else if( toType == classOf[String] ) { encoded.utf8 } else if (toType == classOf[AsciiBuffer]) { encoded.ascii } else if (toType == classOf[UTF8Buffer]) { encoded.utf8 } else { null } }.asInstanceOf[T] def getLocalConnectionId: AnyRef = { if ( decoded.getDeliveryAnnotations!=null ) { decoded.getDeliveryAnnotations.getValue.get(SENDER_CONTAINER_KEY) match { case x:String => x case _ => null } } else { null } } def getApplicationProperty(name:String) = { if( decoded.getApplicationProperties !=null ) { decoded.getApplicationProperties.getValue.get(name).asInstanceOf[AnyRef] } else { null } } def getMessageAnnotationProperty(name:String) = { if( decoded.getMessageAnnotations !=null ) { var ma = decoded.getMessageAnnotations var rc = ma.getValue.get(name) if( rc == null ) { rc = ma.getValue.get(org.apache.qpid.proton.amqp.Symbol.valueOf(name)) } rc.asInstanceOf[AnyRef] } else { null } } def getDeliveryAnnotationProperty(name:String) = { if( decoded.getDeliveryAnnotations !=null ) { decoded.getDeliveryAnnotations.getValue.get(name).asInstanceOf[AnyRef] } else { null } } def getFooterProperty(name:AnyRef) = { if( decoded.getFooter !=null ) { decoded.getFooter.getValue.get(name).asInstanceOf[AnyRef] } else { null } } def getHeader[T](default:T)(func: (Header)=>T) = { if( decoded.getHeader == null ) { default } else { func(decoded.getHeader) } } def getProperties[T](default:T)(func: (Properties)=>T) = { if( decoded.getProperties == null ) { default } else { func(decoded.getProperties) } } // object JMSFilterable extends Filterable { // def getBodyAs[T](kind: Class[T]): T = AmqpMessage.this.getBodyAs(kind) // def getLocalConnectionId: AnyRef = AmqpMessage.this.getLocalConnectionId // def getProperty(name: String) = { // } // } def getProperty(name: String) = { val rc:AnyRef = (name match { case "JMSDeliveryMode" => getHeader[AnyRef](null)(header=> if(header.getDurable) "PERSISTENT" else "NON_PERSISTENT" ) case "JMSPriority" => new java.lang.Integer(decoded.getPriority) case "JMSType" => getMessageAnnotationProperty("x-opt-jms-type") case "JMSMessageID" => getProperties[AnyRef](null)(_.getMessageId) case "JMSDestination" => getProperties[String](null)(_.getTo) case "JMSReplyTo" => getProperties[String](null)(_.getReplyTo) case "JMSCorrelationID" => getProperties[AnyRef](null)(_.getCorrelationId) // case "JMSExpiration" => // new java.lang.Long(decoded.getTtl) case "JMSExpiration" => getProperties[AnyRef](null)(x=> Option(x.getAbsoluteExpiryTime()).map(y=> new java.lang.Long(y.getTime)).getOrElse(null)) case "JMSXDeliveryCount" => getHeader[AnyRef](null)(_.getDeliveryCount) case "JMSXUserID" => getProperties[AnyRef](null)(_.getUserId) case "JMSXGroupID" => getProperties[AnyRef](null)(_.getGroupId) case "JMSXGroupSeq" => getProperties[AnyRef](null)(_.getGroupSequence) case x if x == firstAcquirerKey => getHeader[AnyRef](null)(_.getFirstAcquirer) case x if x == subjectKey => getProperties[AnyRef](null)(_.getSubject) case x if x == contentTypeKey => getProperties[AnyRef](null)(_.getContentType) case x if x == contentEncodingKey => getProperties[AnyRef](null)(_.getContentEncoding) case x if x == replyToGroupIDKey => getProperties[AnyRef](null)(_.getReplyToGroupId) case x if x.startsWith(prefixDeliveryAnnotationsKey) => getDeliveryAnnotationProperty(x) case x if x.startsWith(prefixMessageAnnotationsKey) => getMessageAnnotationProperty(x) case x if x.startsWith(prefixFooterKey) => getFooterProperty(x) case x => getApplicationProperty(x) }) match { case x:UnsignedInteger => new java.lang.Long(x.longValue()); case x:UnsignedLong => new java.lang.Long(x.longValue()); case x => x } rc } override def headers_as_json: java.util.HashMap[String, Object] = { val rc = new java.util.HashMap[String, Object]() import collection.JavaConversions._ def convert(v: AnyRef) = (v match { case v: UnsignedByte => new java.lang.Integer(v.shortValue()) case v: UnsignedShort => new java.lang.Integer(v.intValue()) case v: UnsignedInteger => new java.lang.Long(v.longValue()) case v: UnsignedLong => new java.lang.Long(v.longValue()) case _ => v }) if ( decoded.getHeader!=null ) { val header = decoded.getHeader if ( header.getDeliveryCount !=null ) { rc.put("header.delivery_count", new java.lang.Long(header.getDeliveryCount.longValue())) } if ( header.getDurable !=null ) { rc.put("header.durable", new java.lang.Boolean(header.getDurable.booleanValue())) } if ( header.getFirstAcquirer !=null ) { rc.put("header.first_acquirer", new java.lang.Boolean(header.getFirstAcquirer.booleanValue())) } if ( header.getPriority !=null ) { rc.put("header.priority", new java.lang.Integer(header.getPriority.intValue())) } if ( header.getTtl !=null ) { rc.put("header.ttl", new java.lang.Long(header.getTtl.longValue())) } } if( decoded.getProperties != null ) { val properties = decoded.getProperties if ( properties.getAbsoluteExpiryTime !=null ) { rc.put("property.absolute_expiry_time", new java.lang.Long(properties.getAbsoluteExpiryTime.getTime())) } if ( properties.getContentEncoding !=null ) { rc.put("property.content_encoding", properties.getContentEncoding.toString) } if ( properties.getContentType !=null ) { rc.put("property.content_type", properties.getContentType.toString) } if ( properties.getCorrelationId !=null ) { rc.put("property.correlation_id", properties.getCorrelationId.toString) } if ( properties.getCreationTime !=null ) { rc.put("property.creation_time", new java.lang.Long(properties.getCreationTime.getTime)) } if ( properties.getGroupId !=null ) { rc.put("property.group_id", properties.getGroupId) } if ( properties.getGroupSequence !=null ) { rc.put("property.group_sequence", new java.lang.Long(properties.getGroupSequence.longValue())) } if ( properties.getMessageId !=null ) { rc.put("property.message_id", properties.getMessageId) } if ( properties.getReplyTo !=null ) { rc.put("property.reply_to", properties.getReplyTo) } if ( properties.getReplyToGroupId !=null ) { rc.put("property.reply_to_group_id", properties.getReplyToGroupId) } if ( properties.getSubject !=null ) { rc.put("property.subject", properties.getSubject) } if ( properties.getTo !=null ) { rc.put("property.to", properties.getTo) } if ( properties.getUserId !=null ) { rc.put("property.user_id", properties.getUserId.toString) } } if( decoded.getDeliveryAnnotations !=null ) { val annotations = decoded.getDeliveryAnnotations for( (k,v:AnyRef) <- annotations.getValue ) { rc.put("annotation."+k, convert(v)) } } if( decoded.getApplicationProperties !=null ) { val properties = decoded.getApplicationProperties for( (k,v:AnyRef) <- properties.getValue ) { rc.put("app."+k, convert(v)) } } if( decoded.getFooter !=null ) { val footer = decoded.getFooter for( (k,v:AnyRef) <- footer.getValue ) { rc.put("footer."+k, convert(v)) } } rc } def release() {} def retain() {} def retained(): Int = 0 }
chirino/activemq-apollo
apollo-amqp/src/main/scala/org/apache/activemq/apollo/amqp/AmqpMessage.scala
Scala
apache-2.0
11,814
package at.logic.gapt.formats.latex import at.logic.gapt.proofs.HOLSequent import at.logic.gapt.expr._ import at.logic.gapt.expr.hol._ trait SequentsListLatexExporter { def getOutput: java.io.Writer def close = getOutput.close private val nLine = sys.props( "line.separator" ) val smskip = nLine + nLine val mdskip = smskip + """\\rule[-0.1cm]{5cm}{0.01cm} \\\\""" + smskip private def exportSequent( seq: HOLSequent ) = { if ( seq.antecedent.size > 0 ) exportTerm1( seq.antecedent.head ) if ( seq.antecedent.size > 1 ) seq.antecedent.tail.foreach( x => { getOutput.write( smskip ); /*getOutput.write(",");*/ exportTerm1( x ) } ) getOutput.write( smskip ); getOutput.write( """ $\\vdash$ """ ); getOutput.write( smskip ) if ( seq.succedent.size > 0 ) exportTerm1( seq.succedent.head ) if ( seq.succedent.size > 1 ) seq.succedent.tail.foreach( x => { getOutput.write( smskip ); /*getOutput.write(",");*/ exportTerm1( x ) } ) } def exportSequentList( ls: List[HOLSequent], sections: List[Tuple2[String, List[Tuple2[Any, Any]]]] ): SequentsListLatexExporter = { // first obtain information about the clauses, replace lambda expressions of constant type by constants (and describe it at the top of the page) // Also describe the types of all constants getOutput.write( """\\documentclass[10pt, a4paper]{article}""" ) getOutput.write( nLine ) getOutput.write( """\\""" ) getOutput.write( """usepackage{color}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\topmargin}{-1.5cm}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\headheight}{0cm}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\headsep}{0cm}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\textheight}{1.25\\textheight}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\oddsidemargin}{-1.5cm}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\evensidemargin}{-1.5cm}""" ) getOutput.write( nLine ) getOutput.write( """\\setlength{\\textwidth}{1.4\\textwidth}""" ) getOutput.write( nLine ) getOutput.write( """\\begin{document}""" ) getOutput.write( nLine ) sections.foreach( x => { getOutput.write( """\\section{""" + x._1 + "}" ) getOutput.write( nLine ) getOutput.write( """\\begin{tabular}{ll}""" ) x._2.foreach( y => { printOnMatch( y._1 ) getOutput.write( " & " ) printOnMatch( y._2 ) getOutput.write( """ \\\\ """ ) getOutput.write( nLine ) } ) getOutput.write( """\\end{tabular}""" ) getOutput.write( nLine ) } ) getOutput.write( """\\section{Clauses}""" ) getOutput.write( nLine ) ls.foreach( x => { exportSequent( x ); getOutput.write( mdskip ) } ) printTypes( ls ) getOutput.write( """\\end{document}""" ) this } private def getFSVars( fs: HOLSequent ): Set[Var] = fs.formulas.toSet.flatMap( getVars ) private def getVars( l: Expr ): Set[Var] = l match { case v: Var => Set( v ) case c: Const => Set() case Abs( x, t ) => getVars( t ) ++ getVars( x ) case App( s, t ) => getVars( s ) ++ getVars( t ) } private def getFSConsts( fs: HOLSequent ): Set[Const] = fs.formulas.toSet.flatMap( getConsts ) private def getConsts( l: Expr ): Set[Const] = l match { case v: Var => Set() case c: Const => Set( c ) case Abs( x, t ) => getConsts( t ) ++ getConsts( x ) case App( s, t ) => getConsts( s ) ++ getConsts( t ) } def printTypes( l: List[HOLSequent] ) = { val ( vmap, cmap ) = getTypes( l ) getOutput.write( "\\\\subsection{Variable Types}" + nLine ) getOutput.write( "\\\\[\\\\begin{array}{ll}" + nLine ) for ( ( key, set ) <- vmap.toList.sortBy( _._1 )( TAOrdering ) ) { var set_ = set.toList.sorted while ( set_.nonEmpty ) { val ( ten, rest ) = set_.splitAt( 10 ) getOutput.write( ten.mkString( "", ", ", " & " ) + typeToString( key ) ) getOutput.write( " \\\\\\\\" + nLine ) set_ = rest } } getOutput.write( """\\end{array}\\]""" ) getOutput.write( "\\\\subsection{Constant Types}" + nLine ) getOutput.write( "\\\\[\\\\begin{array}{ll}" + nLine ) for ( ( key, set ) <- cmap.toList.sortBy( _._1 )( TAOrdering ) ) { var set_ = set.toList.sorted while ( set_.nonEmpty ) { val ( ten, rest ) = set_.splitAt( 10 ) getOutput.write( ten.mkString( "", ", ", " & " ) + typeToString( key ) ) getOutput.write( " \\\\\\\\" + nLine ) set_ = rest } } getOutput.write( """\\end{array}\\]""" ) } def typeToString( t: Ty, outermost: Boolean = true ): String = t match { case TBase( name, _ ) => name case t1 -> t2 => typeToString_( t1 ) + " > " + typeToString_( t2 ) } def typeToString_( t: Ty ): String = t match { case TBase( name, _ ) => name case t1 -> t2 => ( "(" ) + typeToString_( t1 ) + " > " + typeToString_( t2 ) + ")" } private def getTypes( l: List[HOLSequent] ) = { val vars = l.foldLeft( Set[Var]() )( ( acc, fs ) => acc ++ getFSVars( fs ) ) val consts = l.foldLeft( Set[Const]() )( ( acc, fs ) => acc ++ getFSConsts( fs ) ) val svars = vars.map( _.name.toString() ) val cvars = consts.map( _.name.toString() ) if ( cvars.exists( svars.contains( _ ) ) || svars.exists( cvars.contains( _ ) ) ) println( "WARNING: exported const and varset are not disjunct!" ) val varmap = vars.foldLeft( Map[Ty, Set[String]]() )( ( map, v ) => { if ( map contains v.ty ) { val nset = map( v.ty ) + v.name.toString() map + ( ( v.ty, nset ) ) } else { map + ( ( v.ty, Set( v.name.toString() ) ) ) } } ) val constmap = consts.foldLeft( Map[Ty, Set[String]]() )( ( map, v ) => { if ( map contains v.ty ) { val nset = map( v.ty ) + v.name.toString() map + ( ( v.ty, nset ) ) } else { map + ( ( v.ty, Set( v.name.toString() ) ) ) } } ) ( varmap, constmap ) } private def printOnMatch( a: Any ) = a match { case le: Expr => exportTerm1( le ) case ta: Ty => getOutput.write( "$" + LatexExporter( ta ) + "$" ) case _ => getOutput.write( a.toString ) } private def exportTerm1( f: Expr ) = { getOutput.write( "$" ) LatexExporter( f ) getOutput.write( "$" ) } /*private def replaceTerm(f: Expr, defs: Map[Int, Tuple2[Abs,Var]]): Expr = f match { case v: Var => v case App(a,b) => App(replaceTerm(a, defs), replaceTerm(b, defs)) case a @ Abs(x,b) => defs.get(extractAbs(a.asInstanceOf[Abs])) match { case Some(v) => v._2 case _ => Abs(x, replaceTerm(b, defs)) } }*/ }
gebner/gapt
core/src/main/scala/at/logic/gapt/formats/latex/SequentsListLatexExporter.scala
Scala
gpl-3.0
6,797
package kr.ac.kaist.ir.deep.train import kr.ac.kaist.ir.deep.fn._ import kr.ac.kaist.ir.deep.network.{AutoEncoder, Network} import kr.ac.kaist.ir.deep.rec.BinaryTree import org.apache.spark.annotation.Experimental /** * __Input Operation__ : VectorTree as Input & Unfolding Recursive Auto Encoder Training (no output type) * * ::Experimental:: * @note This cannot be applied into non-AutoEncoder tasks * @note This is designed for Unfolding RAE, in * [[http://ai.stanford.edu/~ang/papers/nips11-DynamicPoolingUnfoldingRecursiveAutoencoders.pdf this paper]] * * @param corrupt Corruption that supervises how to corrupt the input matrix. `(Default : [[kr.ac.kaist.ir.deep.train.NoCorruption]])` * @param error An objective function `(Default: [[kr.ac.kaist.ir.deep.fn.SquaredErr]])` * * @example * {{{var make = new URAEType(error = CrossEntropyErr) * var corruptedIn = make corrupted in * var out = make onewayTrip (net, corruptedIn)}}} */ @Experimental class URAEType(override val corrupt: Corruption = NoCorruption, override val error: Objective = SquaredErr) extends TreeType { /** * Apply & Back-prop given single input * * @param net A network that gets input * @param delta Sequence of delta updates */ def roundTrip(net: Network, delta: Seq[ScalarMatrix]) = (in: BinaryTree, real: Null) ⇒ net match { case net: AutoEncoder ⇒ val out = in forward net.encode // Decode phrase of reconstruction var terminals = in.backward(out, net.decode) while (terminals.nonEmpty) { val leaf = terminals.head terminals = terminals.tail leaf.out = error.derivative(leaf.out, leaf.x) } // Error propagation for decoder val err = in forward net.decode_!(delta.take(2).toIterator) // Error propagation for encoder in backward(err, net.encode_!(delta.takeRight(2).toIterator)) } /** * Apply given input and compute the error * * @param net A network that gets input * @param pair (Input, Real output) for error computation. * @return error of this network */ def lossOf(net: Network)(pair: (BinaryTree, Null)): Scalar = net match { case net: AutoEncoder ⇒ var sum = 0.0f val in = pair._1 // Encode phrase of Reconstruction val out = in forward net.apply // Decode phrase of reconstruction var terminals = in.backward(out, net.reconstruct) val size = terminals.size while (terminals.nonEmpty) { val leaf = terminals.head terminals = terminals.tail sum += error(leaf.out, leaf.x) } sum case _ ⇒ 0.0f } /** * Make validation output * * @return input as string */ def stringOf(net: Network, pair: (BinaryTree, Null)): String = net match { case net: AutoEncoder ⇒ val string = StringBuilder.newBuilder val in = pair._1 // Encode phrase of Reconstruction val out = in forward net.apply // Decode phrase of reconstruction var terminals = in.backward(out, net.reconstruct) while (terminals.nonEmpty) { val leaf = terminals.head terminals = terminals.tail string append s"IN: ${leaf.x.mkString} URAE → OUT: ${leaf.out.mkString};" } string.mkString case _ ⇒ "NOT AN AUTOENCODER" } }
nearbydelta/ScalaNetwork
src/main/scala/kr/ac/kaist/ir/deep/train/URAEType.scala
Scala
gpl-2.0
3,470
package com.sksamuel.elastic4s import org.elasticsearch.action.search.SearchResponse import org.elasticsearch.client.Client import scala.concurrent.Future trait ScrollDsl { implicit object ScrollExecutable extends Executable[SearchScrollDefinition, SearchResponse] { override def apply(client: Client, s: SearchScrollDefinition): Future[SearchResponse] = { val request = client.prepareSearchScroll(s.id) s._keepAlive.foreach(request.setScroll) injectFuture(request.execute) } } } class SearchScrollDefinition(val id: String) { var _keepAlive: Option[String] = None def keepAlive(time: String): this.type = { _keepAlive = Option(time) this } }
l15k4/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/ScrollDsl.scala
Scala
apache-2.0
692
package lila.relay import akka.actor._ import akka.pattern.{ ask, pipe } import scala.concurrent.duration._ import scala.util.{ Try, Success, Failure } import lila.hub.SequentialActor private[relay] final class GameActor( fics: ActorRef, ficsId: Int, relayId: String, getRelayGame: () => Fu[Option[Relay.Game]], setEnd: () => Funit, importer: Importer) extends SequentialActor { import GameActor._ context setReceiveTimeout 1.hour // override def preStart() { // println(s"[$ficsId] start actor $self") // } def process = { case GetTime => withRelayGame { g => implicit val t = makeTimeout seconds 10 fics ? command.GetTime(g.white) mapTo manifest[command.GetTime.Result] addEffect { case Failure(err) => fufail(err) case Success(data) => importer.setClocks(g.id, data.white, data.black) } addFailureEffect onFailure } case move: GameEvent.Move => withRelayGame { g => if (g.white == move.white && g.black == move.black) importer.move(g.id, move.san, move.ply) >>- { self ! GetTime } else end } case clock: GameEvent.Clock => withRelayGame { g => fuccess { clock.player match { case p if p == g.white => importer.setClock(g.id, chess.White, clock.tenths) case p if p == g.black => importer.setClock(g.id, chess.Black, clock.tenths) case p => onFailure(new Exception(s"Invalid clock event (no such player) $clock")) } } } case [email protected](_) => withRelayGame { g => fuccess { // println(s"[$ficsId] http://en.l.org/${g.id} draw") importer.draw(g.id) >> end } } case [email protected](_, loser) => withRelayGame { g => // println(s"[$ficsId] http://en.l.org/${g.id} $loser resigns") g colorOf loser match { case None => end case Some(color) => importer.resign(g.id, color) >> end } } case Recover => implicit val t = makeTimeout seconds 60 fics ? command.Moves(ficsId) mapTo manifest[command.Moves.Result] flatMap { case Failure(err) => fufail(err) case Success(data) => withRelayGame { g => if (g.white == data.white.ficsName && g.black == data.black.ficsName) importer.full(relayId, g.id, data) addEffect { case true => self ! GetTime // re-observe. If a limit was reached before, // but a slot became available, use it. fics ! FICS.Observe(ficsId) case false => } else fufail(s"Can't import wrong game") } } addFailureEffect (_ => end) case ReceiveTimeout => end } override def onFailure(e: Exception) { println(s"[$ficsId] ERR ${e.getMessage}") } def end = setEnd() >>- { // println(s"[$ficsId] end game $self") fics ! FICS.Unobserve(ficsId) // self ! SequentialActor.Terminate } def withRelayGame[A](f: Relay.Game => Fu[A]): Funit = getRelayGame() flatMap { case None => fufail(s"[$ficsId] No game found!") case Some(g) => f(g).void } } object GameActor { case object Recover case object GetTime }
pavelo65/lila
modules/relay/src/main/GameActor.scala
Scala
mit
3,341
package microtools.patch import com.github.plokhotnyuk.jsoniter_scala.core.{JsonValueCodec, writeToString} import microtools.models.Problems import microtools.patch.JsonPointer._ import microtools._ import play.api.libs.functional.syntax._ import play.api.libs.json.Reads._ import play.api.libs.json._ case class PatchWhitelist(allowed: Seq[JsPath]) extends AnyVal /** * RFC6902 kind of patch operation. */ sealed trait Patch { def path: JsPath @deprecated( "UNCHECKED PATCH APPLICATIONS ARE HUGE SECURITY LIABILITY, please use `patch.apply(JsValue, PatchWhitelist)` instead to specify which paths are ok to patch (hint: not all, usually not the id or the owner etc..)", "20170531v0114" ) def apply(json: JsValue): DecidedBusinessTry[JsValue] = BusinessTry.transformJson(json, transformation) def apply(json: JsValue, ev: PatchWhitelist): DecidedBusinessTry[JsValue] = { if (ev.allowed.exists(allowed => path.path.startsWith(allowed.path))) BusinessTry.transformJson(json, transformation) else BusinessFailure(Problems.FORBIDDEN.withDetails(s"patch operation not allowed on $path")) } def transformation: Reads[_ <: JsValue] } case class Remove(path: JsPath) extends Patch { override def transformation: Reads[JsValue] = Reads[JsValue] { json => if (path(json).isEmpty) { JsSuccess(json) } else { json.validate((path.json.prune)) } } } case class Add(path: JsPath, value: JsValue) extends Patch { override def transformation: Reads[_ <: JsValue] = { Reads[JsValue] { json => if (path(json).isEmpty) { json.validate(__.json.update(path.json.put(value))) } else { json.validate(path.json.update(Reads { case arr: JsArray => JsSuccess(arr :+ value) case JsNull => JsSuccess(value) case _ => JsError("error.patch.add.value.exists") })) } } } } case class Replace(path: JsPath, value: JsValue) extends Patch { override def transformation: Reads[_ <: JsValue] = Reads[JsValue] { json => if (path(json).isEmpty) { json.validate(__.json.update(path.json.put(value))) } else { json.validate((path.json.prune and path.json.put(value)).reduce) } } } object Patch extends JsonFormats { val patchRead: Reads[Patch] = (__ \\ "op").read[String].flatMap { case "remove" => path.map(Remove) case "add" => (path and value)(Add) case "replace" => (path and value)(Replace) case unsupported => Reads(_ => JsError(s"Unsupported patch operation: $unsupported")) } private val patchWrite: OWrites[Patch] = OWrites { case Remove(path) => Json.obj("op" -> "remove", "path" -> path) case Add(path, value) => Json.obj("op" -> "add", "path" -> path, "value" -> value) case Replace(path, value) => Json.obj("op" -> "replace", "path" -> path, "value" -> value) } private lazy val path = __.\\("path").read[JsPath] private lazy val value = __.\\("value").read[JsValue] implicit val patchFormat: OFormat[Patch] = OFormat(patchRead, patchWrite) def applyPatches[T: Format](patches: IterableOnce[Patch], whiteList: PatchWhitelist)( entity: T ): DecidedBusinessTry[T] = { patches.iterator.foldLeft[DecidedBusinessTry[JsValue]](BusinessSuccess(Json.toJson(entity))) { case (fail: BusinessFailure, _) => fail case (BusinessSuccess(json), patch) => patch(json, whiteList) } match { case fail: BusinessFailure => fail case BusinessSuccess(json) => BusinessTry.validateJson[T](json) } } def applyPatchesJsoniter[T: JsonValueCodec]( patches: IterableOnce[Patch], whiteList: PatchWhitelist )( entity: T ): DecidedBusinessTry[T] = { val encodedString = writeToString(entity) val decodedJson = Json.parse(encodedString) patches.iterator.foldLeft[DecidedBusinessTry[JsValue]](BusinessSuccess(decodedJson)) { case (fail: BusinessFailure, _) => fail case (BusinessSuccess(json), patch) => patch(json, whiteList) } match { case fail: BusinessFailure => fail case BusinessSuccess(json) => BusinessTry.validateJsoniter[T](Json.stringify(json)) } } }
21re/play-micro-tools
src/main/scala/microtools/patch/Patch.scala
Scala
mit
4,262
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cogx.compiler.codegenerator.opencl.cpukernels import cogx.platform.types.KernelTypes.{KernelType, CPUOuterProductKernelType} import cogx.platform.types._ import cogx.compiler.parser.op.OuterProductOp import cogx.platform.opencl.{OpenCLCpuSingleOutputKernel, OpenCLFieldRegister} import cogx.platform.cpumemory.ScalarFieldMemory /** * Computes the outer product of the two field inputs. Code ported from the * Cog 3.5 ScalarField class. * * @author Dick Carter */ private[cogx] class CPUOuterProductKernel private (in: Array[VirtualFieldRegister], op: Opcode, resultType: FieldType) extends OpenCLCpuSingleOutputKernel(op, in, resultType) { /** The type of the kernel, either DeviceKernel, or one of a number of CPU kernel types. */ override val kernelType: KernelType = CPUOuterProductKernelType /** Outer product of "in1" and "in2". * * If either field is 0 dimensional, it's * treated as a scalar multiplication of the other field. */ def outerProduct(in1: ScalarFieldMemory, in2: ScalarFieldMemory, out: ScalarFieldMemory) { def inRead(mem: ScalarFieldMemory)(indices: Array[Int]) = mem.dimensions match { case 0 => mem.read() case 1 => mem.read(indices(0)) case 2 => mem.read(indices(0), indices(1)) case 3 => mem.read(indices(0), indices(1), indices(2)) case _ => throw new RuntimeException("Input dimension too big.") } val in1Reader = inRead(in1)(_) val in2Reader = inRead(in2)(_) def outWrite(mem: ScalarFieldMemory)(indices: Array[Int], value: Float) = mem.dimensions match { case 0 => mem.write(value) case 1 => mem.write(indices(0), value) case 2 => mem.write(indices(0), indices(1), value) case 3 => mem.write(indices(0), indices(1), indices(2), value) case _ => throw new RuntimeException("Output dimension too big.") } val outWriter = outWrite(out)(_, _) for (i <- in1.fieldShape.indices) { val iValue = in1Reader(i) for (j <- in2.fieldShape.indices) { val jValue = in2Reader(j) val resultIndex = Array.concat(i, j) outWriter(resultIndex, iValue * jValue) } } } def compute(in: Array[OpenCLFieldRegister], out: OpenCLFieldRegister) { val inputField1 = in(0).slave.read.asInstanceOf[ScalarFieldMemory] val inputField2 = in(1).slave.read.asInstanceOf[ScalarFieldMemory] val outputField = out.master.cpuMemory.asInstanceOf[ScalarFieldMemory] outerProduct(inputField1, inputField2, outputField) // Copy the CPU memory to the GPU. out.master.write } /** Create a clone of this kernel that uses a new set of virtual field registers * as inputs. Useful for breaking a large circuit apart into smaller subcircuits. */ def copyWithNewInputs(inputs: Array[VirtualFieldRegister]): AbstractKernel = new CPUOuterProductKernel(inputs, op, resultType) } private[cogx] object CPUOuterProductKernel { def apply(in: Array[VirtualFieldRegister], operation: Opcode, resultType: FieldType) = { require(operation == OuterProductOp) new CPUOuterProductKernel(in, operation, resultType) } }
hpe-cct/cct-core
src/main/scala/cogx/compiler/codegenerator/opencl/cpukernels/CPUOuterProductKernel.scala
Scala
apache-2.0
3,765
/* * Scala.js (https://www.scala-js.org/) * * Copyright EPFL. * * Licensed under Apache License 2.0 * (https://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package org.scalajs.testing.bridge import scala.scalajs.js import scala.scalajs.js.annotation._ /* Use the queue execution context (based on JS promises) explicitly: * We do not have anything better at our disposal and it is accceptable in * terms of fairness: JSRPC only handles in-between test communcation, so any * future chain will "yield" to I/O (waiting for a message) or an RPC handler in * a finite number of steps. */ import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue import scala.concurrent.duration._ import org.scalajs.testing.common.RPCCore /** JS RPC Core. Uses `scalajsCom`. */ private[bridge] final object JSRPC extends RPCCore { Com.init(handleMessage _) override protected def send(msg: String): Unit = Com.send(msg) @js.native @JSGlobal("scalajsCom") private object Com extends js.Object { def init(onReceive: js.Function1[String, Unit]): Unit = js.native def send(msg: String): Unit = js.native // We support close, but do not use it. The JS side just terminates. // def close(): Unit = js.native } }
scala-js/scala-js
test-bridge/src/main/scala/org/scalajs/testing/bridge/JSRPC.scala
Scala
apache-2.0
1,353
package omnibus.configuration import akka.actor.ActorContext import spray.routing.authentication._ import scala.concurrent.Future object Security { def adminPassAuthenticator(userPass: Option[UserPass])(implicit context: ActorContext) = { implicit val system = context.system implicit val executionContext = context.dispatcher Future { if (userPass.exists(up ⇒ up.user == Settings(system).Admin.Name && up.pass == Settings(system).Admin.Password)) Some(Settings(system).Admin.Name) else None } } }
agourlay/omnibus
src/main/scala/omnibus/configuration/Security.scala
Scala
apache-2.0
542
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.streaming.sources import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.datasources.v2.WriteToDataSourceV2 import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWrite /** * The logical plan for writing data to a micro-batch stream. * * Note that this logical plan does not have a corresponding physical plan, as it will be converted * to [[WriteToDataSourceV2]] with [[MicroBatchWrite]] before execution. */ case class WriteToMicroBatchDataSource(write: StreamingWrite, query: LogicalPlan) extends LogicalPlan { override def children: Seq[LogicalPlan] = Seq(query) override def output: Seq[Attribute] = Nil def createPlan(batchId: Long): WriteToDataSourceV2 = { WriteToDataSourceV2(new MicroBatchWrite(batchId, write), query) } }
aosagie/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/sources/WriteToMicroBatchDataSource.scala
Scala
apache-2.0
1,710
package dk.itu.wsq.queue class DuplicatingQueue[E: Manifest](val size: Int) extends WorkStealingQueue[E] { import scala.annotation.tailrec private val tasks: Array[Option[E]] = new Array[Option[E]](size) @volatile private var head: Int = 0 @volatile private var tail: Int = 0 private var tailMin = Integer.MAX_VALUE @tailrec final def push(e: E): Unit = { if(tail < (Math.min(tailMin, head) + size) && tail < Integer.MAX_VALUE/2) { tasks(tail % size) = Some(e) tail += 1 } else { this.synchronized { if(head > tailMin) { head = tailMin } tailMin = Integer.MAX_VALUE val count = Math.max(0, tail - head) head = head % size tail = tail + count } push(e) // In the paper they run the task here. } } final def take(): Option[E] = { tail -= 1 if(head <= Math.min(tailMin, tail)) { if(tailMin > tail) { tailMin = tail } val task = tasks(tail % size) tasks(tail % size) = None task } else { this.synchronized { if(head > tailMin) { head = tailMin } tailMin = Integer.MAX_VALUE if(head <= tail) { val task = tasks(tail % size) tasks(tail % size) = None task } else { tail += 1 None } } } } final def steal(): Option[E] = { this.synchronized { if(head < tail) { val task = tasks(head % size) head += 1 task } else { None } } } final def length: Int = tail - head }
christianharrington/WorkStealingQueues
Scala/src/main/scala/dk/itu/wsq/queue/DuplicatingQueue.scala
Scala
unlicense
1,629
package org.scalajs.openui5.sap.ui.layout import org.scalajs.openui5.sap.ui.core.{ControlSetters, ControlSettings, Control} import org.scalajs.openui5.util.{Settings, SettingsMap, noSettings} import scala.scalajs.js import scala.scalajs.js.annotation.{ScalaJSDefined, JSName} @ScalaJSDefined trait BlockLayoutRowSettings extends ControlSettings object BlockLayoutRowSettings extends BlockLayoutRowSettingsBuilder(noSettings) class BlockLayoutRowSettingsBuilder(val dict: SettingsMap) extends Settings[BlockLayoutRowSettings, BlockLayoutRowSettingsBuilder](new BlockLayoutRowSettingsBuilder(_)) with BlockLayoutRowSetters[BlockLayoutRowSettings, BlockLayoutRowSettingsBuilder] trait BlockLayoutRowSetters[T <: js.Object, B <: Settings[T,_]] extends ControlSetters[T, B] { def scrollable(v: Boolean) = setting("scrollable", v) def content(v: js.Array[BlockLayoutCell]) = setting("content", v) } /** The BlockLayoutRow is used as an aggregation to the BlockLayout. It * aggregates Block Layout cells. The BlockLayoutRow has 2 rendering modes - * scrollable and non scrollable. * * @since 1.34 */ @JSName("sap.ui.layout.BlockLayoutRow") @js.native class BlockLayoutRow(id: js.UndefOr[String] = js.native, settings: js.UndefOr[BlockLayoutRowSettings]) extends Control { def this(id: String) = this(id, js.undefined) def this(settings: BlockLayoutRowSettings) = this(js.undefined, settings) }
lastsys/scalajs-openui5
src/main/scala/org/scalajs/openui5/sap/ui/layout/BlockLayoutRow.scala
Scala
mit
1,442
/** * Licensed to Big Data Genomics (BDG) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The BDG licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.bdgenomics.adam.rdd.fragment import org.apache.spark.rdd.RDD import org.bdgenomics.adam.converters.AlignmentRecordConverter import org.bdgenomics.adam.instrumentation.Timers._ import org.bdgenomics.adam.models.{ RecordGroupDictionary, ReferenceRegion, ReferenceRegionSerializer, SequenceDictionary } import org.bdgenomics.adam.rdd.{ AvroReadGroupGenomicRDD, JavaSaveArgs } import org.bdgenomics.adam.rdd.read.{ AlignmentRecordRDD, MarkDuplicates } import org.bdgenomics.adam.serialization.AvroSerializer import org.bdgenomics.formats.avro._ import org.bdgenomics.utils.interval.array.{ IntervalArray, IntervalArraySerializer } import org.bdgenomics.utils.misc.Logging import scala.collection.JavaConversions._ import scala.reflect.ClassTag private[adam] case class FragmentArray( array: Array[(ReferenceRegion, Fragment)], maxIntervalWidth: Long) extends IntervalArray[ReferenceRegion, Fragment] { def duplicate(): IntervalArray[ReferenceRegion, Fragment] = { copy() } protected def replace(arr: Array[(ReferenceRegion, Fragment)], maxWidth: Long): IntervalArray[ReferenceRegion, Fragment] = { FragmentArray(arr, maxWidth) } } private[adam] class FragmentArraySerializer extends IntervalArraySerializer[ReferenceRegion, Fragment, FragmentArray] { protected val kSerializer = new ReferenceRegionSerializer protected val tSerializer = new AvroSerializer[Fragment] protected def builder(arr: Array[(ReferenceRegion, Fragment)], maxIntervalWidth: Long): FragmentArray = { FragmentArray(arr, maxIntervalWidth) } } /** * Helper singleton object for building FragmentRDDs. */ object FragmentRDD { /** * Creates a FragmentRDD where no record groups or sequence info are attached. * * @param rdd RDD of fragments. * @return Returns a FragmentRDD with an empty record group dictionary and sequence dictionary. */ private[rdd] def fromRdd(rdd: RDD[Fragment]): FragmentRDD = { FragmentRDD(rdd, SequenceDictionary.empty, RecordGroupDictionary.empty) } } /** * A genomic RDD that supports RDDs of Fragments. * * @param rdd The underlying RDD of Fragment data. * @param sequences The genomic sequences this data was aligned to, if any. * @param recordGroups The record groups these Fragments came from. */ case class FragmentRDD(rdd: RDD[Fragment], sequences: SequenceDictionary, recordGroups: RecordGroupDictionary) extends AvroReadGroupGenomicRDD[Fragment, FragmentRDD] { protected def buildTree(rdd: RDD[(ReferenceRegion, Fragment)])( implicit tTag: ClassTag[Fragment]): IntervalArray[ReferenceRegion, Fragment] = { IntervalArray(rdd, FragmentArray.apply(_, _)) } /** * Replaces the underlying RDD with a new RDD. * * @param newRdd The RDD to replace our underlying RDD with. * @return Returns a new FragmentRDD where the underlying RDD has been * swapped out. */ protected def replaceRdd(newRdd: RDD[Fragment]): FragmentRDD = { copy(rdd = newRdd) } /** * Essentially, splits up the reads in a Fragment. * * @return Returns this RDD converted back to reads. */ def toReads(): AlignmentRecordRDD = { val converter = new AlignmentRecordConverter // convert the fragments to reads val newRdd = rdd.flatMap(converter.convertFragment) // are we aligned? AlignmentRecordRDD(newRdd, sequences, recordGroups) } /** * Marks reads as possible fragment duplicates. * * @return A new RDD where reads have the duplicate read flag set. Duplicate * reads are NOT filtered out. */ def markDuplicates(): FragmentRDD = MarkDuplicatesInDriver.time { replaceRdd(MarkDuplicates(this)) } /** * Saves Fragments to Parquet. * * @param filePath Path to save fragments at. */ def save(filePath: java.lang.String) { saveAsParquet(new JavaSaveArgs(filePath)) } /** * Returns the regions that this fragment covers. * * Since a fragment may be chimeric or multi-mapped, we do not try to compute * the hull of the underlying element. * * @param elem The Fragment to get the region from. * @return Returns all regions covered by this fragment. */ protected def getReferenceRegions(elem: Fragment): Seq[ReferenceRegion] = { elem.getAlignments .flatMap(r => ReferenceRegion.opt(r)) .toSeq } }
massie/adam
adam-core/src/main/scala/org/bdgenomics/adam/rdd/fragment/FragmentRDD.scala
Scala
apache-2.0
5,225
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.consumer import java.util.Properties import org.junit.Assert._ import kafka.common.MessageStreamsExistException import kafka.integration.KafkaServerTestHarness import kafka.javaapi.consumer.ConsumerRebalanceListener import kafka.message._ import kafka.serializer._ import kafka.server._ import kafka.utils.TestUtils._ import kafka.utils._ import org.apache.log4j.{Level, Logger} import org.junit.{Test, After, Before} import scala.collection._ @deprecated("This test has been deprecated and it will be removed in a future release", "0.10.0.0") class ZookeeperConsumerConnectorTest extends KafkaServerTestHarness with Logging { val RebalanceBackoffMs = 5000 var dirs : ZKGroupTopicDirs = null val numNodes = 2 val numParts = 2 val topic = "topic1" val overridingProps = new Properties() overridingProps.put(KafkaConfig.NumPartitionsProp, numParts.toString) override def generateConfigs = TestUtils.createBrokerConfigs(numNodes, zkConnect).map(KafkaConfig.fromProps(_, overridingProps)) val group = "group1" val consumer0 = "consumer0" val consumer1 = "consumer1" val consumer2 = "consumer2" val consumer3 = "consumer3" val nMessages = 2 @Before override def setUp() { super.setUp() dirs = new ZKGroupTopicDirs(group, topic) } @After override def tearDown() { super.tearDown() } @Test def testBasic() { val requestHandlerLogger = Logger.getLogger(classOf[KafkaRequestHandler]) requestHandlerLogger.setLevel(Level.FATAL) // test consumer timeout logic val consumerConfig0 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer0)) { override val consumerTimeoutMs = 200 } val zkConsumerConnector0 = new ZookeeperConsumerConnector(consumerConfig0, true) val topicMessageStreams0 = zkConsumerConnector0.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) // no messages to consume, we should hit timeout; // also the iterator should support re-entrant, so loop it twice for (_ <- 0 until 2) { try { getMessages(topicMessageStreams0, nMessages * 2) fail("should get an exception") } catch { case _: ConsumerTimeoutException => // this is ok } } zkConsumerConnector0.shutdown // send some messages to each broker val sentMessages1 = sendMessages(servers, topic, nMessages, 0) ++ sendMessages(servers, topic, nMessages, 1) // wait to make sure the topic and partition have a leader for the successful case waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 1) // create a consumer val consumerConfig1 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) val receivedMessages1 = getMessages(topicMessageStreams1, nMessages * 2) assertEquals(sentMessages1.sorted, receivedMessages1.sorted) // also check partition ownership val actual_1 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_1 = List( ("0", "group1_consumer1-0"), ("1", "group1_consumer1-0")) assertEquals(expected_1, actual_1) // commit consumed offsets zkConsumerConnector1.commitOffsets(true) // create a consumer val consumerConfig2 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer2)) { override val rebalanceBackoffMs = RebalanceBackoffMs } val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true) val topicMessageStreams2 = zkConsumerConnector2.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) // send some messages to each broker val sentMessages2 = sendMessages(servers, topic, nMessages, 0) ++ sendMessages(servers, topic, nMessages, 1) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) val receivedMessages2 = getMessages(topicMessageStreams1, nMessages) ++ getMessages(topicMessageStreams2, nMessages) assertEquals(sentMessages2.sorted, receivedMessages2.sorted) // also check partition ownership val actual_2 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_2 = List( ("0", "group1_consumer1-0"), ("1", "group1_consumer2-0")) assertEquals(expected_2, actual_2) // create a consumer with empty map val consumerConfig3 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer3)) val zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3, true) zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int]()) // send some messages to each broker val sentMessages3 = sendMessages(servers, topic, nMessages, 0) ++ sendMessages(servers, topic, nMessages, 1) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) val receivedMessages3 = getMessages(topicMessageStreams1, nMessages) ++ getMessages(topicMessageStreams2, nMessages) assertEquals(sentMessages3.sorted, receivedMessages3.sorted) // also check partition ownership val actual_3 = getZKChildrenValues(dirs.consumerOwnerDir) assertEquals(expected_2, actual_3) // call createMesssageStreams twice should throw MessageStreamsExistException try { zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int]()) fail("Should fail with MessageStreamsExistException") } catch { case _: MessageStreamsExistException => // expected } zkConsumerConnector1.shutdown zkConsumerConnector2.shutdown zkConsumerConnector3.shutdown info("all consumer connectors stopped") requestHandlerLogger.setLevel(Level.ERROR) } @Test def testCompression() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandler]) requestHandlerLogger.setLevel(Level.FATAL) // send some messages to each broker val sentMessages1 = sendMessages(servers, topic, nMessages, 0, GZIPCompressionCodec) ++ sendMessages(servers, topic, nMessages, 1, GZIPCompressionCodec) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 1) // create a consumer val consumerConfig1 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) val receivedMessages1 = getMessages(topicMessageStreams1, nMessages * 2) assertEquals(sentMessages1.sorted, receivedMessages1.sorted) // also check partition ownership val actual_1 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_1 = List( ("0", "group1_consumer1-0"), ("1", "group1_consumer1-0")) assertEquals(expected_1, actual_1) // commit consumed offsets zkConsumerConnector1.commitOffsets(true) // create a consumer val consumerConfig2 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer2)) { override val rebalanceBackoffMs = RebalanceBackoffMs } val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true) val topicMessageStreams2 = zkConsumerConnector2.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) // send some messages to each broker val sentMessages2 = sendMessages(servers, topic, nMessages, 0, GZIPCompressionCodec) ++ sendMessages(servers, topic, nMessages, 1, GZIPCompressionCodec) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) val receivedMessages2 = getMessages(topicMessageStreams1, nMessages) ++ getMessages(topicMessageStreams2, nMessages) assertEquals(sentMessages2.sorted, receivedMessages2.sorted) // also check partition ownership val actual_2 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_2 = List( ("0", "group1_consumer1-0"), ("1", "group1_consumer2-0")) assertEquals(expected_2, actual_2) // create a consumer with empty map val consumerConfig3 = new ConsumerConfig( TestUtils.createConsumerProperties(zkConnect, group, consumer3)) val zkConsumerConnector3 = new ZookeeperConsumerConnector(consumerConfig3, true) zkConsumerConnector3.createMessageStreams(new mutable.HashMap[String, Int](), new StringDecoder(), new StringDecoder()) // send some messages to each broker val sentMessages3 = sendMessages(servers, topic, nMessages, 0, GZIPCompressionCodec) ++ sendMessages(servers, topic, nMessages, 1, GZIPCompressionCodec) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) val receivedMessages3 = getMessages(topicMessageStreams1, nMessages) ++ getMessages(topicMessageStreams2, nMessages) assertEquals(sentMessages3.sorted, receivedMessages3.sorted) // also check partition ownership val actual_3 = getZKChildrenValues(dirs.consumerOwnerDir) assertEquals(expected_2, actual_3) zkConsumerConnector1.shutdown zkConsumerConnector2.shutdown zkConsumerConnector3.shutdown info("all consumer connectors stopped") requestHandlerLogger.setLevel(Level.ERROR) } @Test def testCompressionSetConsumption() { // send some messages to each broker val sentMessages = sendMessages(servers, topic, 200, 0, DefaultCompressionCodec) ++ sendMessages(servers, topic, 200, 1, DefaultCompressionCodec) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 1) val consumerConfig1 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer0)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) val receivedMessages = getMessages(topicMessageStreams1, 400) assertEquals(sentMessages.sorted, receivedMessages.sorted) // also check partition ownership val actual_2 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_2 = List( ("0", "group1_consumer0-0"), ("1", "group1_consumer0-0")) assertEquals(expected_2, actual_2) zkConsumerConnector1.shutdown } @Test def testConsumerDecoder() { val requestHandlerLogger = Logger.getLogger(classOf[kafka.server.KafkaRequestHandler]) requestHandlerLogger.setLevel(Level.FATAL) // send some messages to each broker val sentMessages = sendMessages(servers, topic, nMessages, 0, NoCompressionCodec) ++ sendMessages(servers, topic, nMessages, 1, NoCompressionCodec) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0) TestUtils.waitUntilMetadataIsPropagated(servers, topic, 1) val consumerConfig = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer1)) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 0) waitUntilLeaderIsElectedOrChanged(zkUtils, topic, 1) val zkConsumerConnector = new ZookeeperConsumerConnector(consumerConfig, true) val topicMessageStreams = zkConsumerConnector.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) var receivedMessages: List[String] = Nil for (messageStreams <- topicMessageStreams.values) { for (messageStream <- messageStreams) { val iterator = messageStream.iterator for (_ <- 0 until nMessages * 2) { assertTrue(iterator.hasNext()) val message = iterator.next().message receivedMessages ::= message debug("received message: " + message) } } } assertEquals(sentMessages.sorted, receivedMessages.sorted) zkConsumerConnector.shutdown() requestHandlerLogger.setLevel(Level.ERROR) } @Test def testLeaderSelectionForPartition() { val zkUtils = ZkUtils(zkConnect, 6000, 30000, false) // create topic topic1 with 1 partition on broker 0 createTopic(zkUtils, topic, numPartitions = 1, replicationFactor = 1, servers = servers) // send some messages to each broker val sentMessages1 = sendMessages(servers, topic, nMessages) // create a consumer val consumerConfig1 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) val topicRegistry = zkConsumerConnector1.getTopicRegistry assertEquals(1, topicRegistry.map(r => r._1).size) assertEquals(topic, topicRegistry.map(r => r._1).head) val topicsAndPartitionsInRegistry = topicRegistry.map(r => (r._1, r._2.map(p => p._2))) val brokerPartition = topicsAndPartitionsInRegistry.head._2.head assertEquals(0, brokerPartition.partitionId) // also check partition ownership val actual_1 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_1 = List( ("0", "group1_consumer1-0")) assertEquals(expected_1, actual_1) val receivedMessages1 = getMessages(topicMessageStreams1, nMessages) assertEquals(sentMessages1, receivedMessages1) zkConsumerConnector1.shutdown() zkUtils.close() } @Test def testConsumerRebalanceListener() { // Send messages to create topic sendMessages(servers, topic, nMessages, 0) sendMessages(servers, topic, nMessages, 1) val consumerConfig1 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer1)) val zkConsumerConnector1 = new ZookeeperConsumerConnector(consumerConfig1, true) // Register consumer rebalance listener val rebalanceListener1 = new TestConsumerRebalanceListener() zkConsumerConnector1.setConsumerRebalanceListener(rebalanceListener1) val topicMessageStreams1 = zkConsumerConnector1.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) // Check if rebalance listener is fired assertEquals(true, rebalanceListener1.beforeReleasingPartitionsCalled) assertEquals(true, rebalanceListener1.beforeStartingFetchersCalled) assertEquals(null, rebalanceListener1.partitionOwnership.get(topic)) // Check if partition assignment in rebalance listener is correct assertEquals("group1_consumer1", rebalanceListener1.globalPartitionOwnership.get(topic).get(0).consumer) assertEquals("group1_consumer1", rebalanceListener1.globalPartitionOwnership.get(topic).get(1).consumer) assertEquals(0, rebalanceListener1.globalPartitionOwnership.get(topic).get(0).threadId) assertEquals(0, rebalanceListener1.globalPartitionOwnership.get(topic).get(1).threadId) assertEquals("group1_consumer1", rebalanceListener1.consumerId) // reset the flag rebalanceListener1.beforeReleasingPartitionsCalled = false rebalanceListener1.beforeStartingFetchersCalled = false val actual_1 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_1 = List(("0", "group1_consumer1-0"), ("1", "group1_consumer1-0")) assertEquals(expected_1, actual_1) val consumerConfig2 = new ConsumerConfig(TestUtils.createConsumerProperties(zkConnect, group, consumer2)) val zkConsumerConnector2 = new ZookeeperConsumerConnector(consumerConfig2, true) // Register consumer rebalance listener val rebalanceListener2 = new TestConsumerRebalanceListener() zkConsumerConnector2.setConsumerRebalanceListener(rebalanceListener2) zkConsumerConnector2.createMessageStreams(Map(topic -> 1), new StringDecoder(), new StringDecoder()) // Consume messages from consumer 1 to make sure it has finished rebalance getMessages(topicMessageStreams1, nMessages) val actual_2 = getZKChildrenValues(dirs.consumerOwnerDir) val expected_2 = List(("0", "group1_consumer1-0"), ("1", "group1_consumer2-0")) assertEquals(expected_2, actual_2) // Check if rebalance listener is fired assertEquals(true, rebalanceListener1.beforeReleasingPartitionsCalled) assertEquals(true, rebalanceListener1.beforeStartingFetchersCalled) assertEquals(Set[Int](0, 1), rebalanceListener1.partitionOwnership.get(topic)) // Check if global partition ownership in rebalance listener is correct assertEquals("group1_consumer1", rebalanceListener1.globalPartitionOwnership.get(topic).get(0).consumer) assertEquals("group1_consumer2", rebalanceListener1.globalPartitionOwnership.get(topic).get(1).consumer) assertEquals(0, rebalanceListener1.globalPartitionOwnership.get(topic).get(0).threadId) assertEquals(0, rebalanceListener1.globalPartitionOwnership.get(topic).get(1).threadId) assertEquals("group1_consumer1", rebalanceListener1.consumerId) assertEquals("group1_consumer2", rebalanceListener2.consumerId) assertEquals(rebalanceListener1.globalPartitionOwnership, rebalanceListener2.globalPartitionOwnership) zkConsumerConnector1.shutdown() zkConsumerConnector2.shutdown() } def getZKChildrenValues(path : String) : Seq[Tuple2[String,String]] = { val children = zkUtils.getChildren(path).sorted children.map(partition => (partition, zkUtils.zkClient.readData(path + "/" + partition).asInstanceOf[String])) } private class TestConsumerRebalanceListener extends ConsumerRebalanceListener { var beforeReleasingPartitionsCalled: Boolean = false var beforeStartingFetchersCalled: Boolean = false var consumerId: String = "" var partitionOwnership: java.util.Map[String, java.util.Set[java.lang.Integer]] = null var globalPartitionOwnership: java.util.Map[String, java.util.Map[java.lang.Integer, ConsumerThreadId]] = null override def beforeReleasingPartitions(partitionOwnership: java.util.Map[String, java.util.Set[java.lang.Integer]]) { beforeReleasingPartitionsCalled = true this.partitionOwnership = partitionOwnership } override def beforeStartingFetchers(consumerId: String, globalPartitionOwnership: java.util.Map[String, java.util.Map[java.lang.Integer, ConsumerThreadId]]) { beforeStartingFetchersCalled = true this.consumerId = consumerId this.globalPartitionOwnership = globalPartitionOwnership } } }
themarkypantz/kafka
core/src/test/scala/unit/kafka/consumer/ZookeeperConsumerConnectorTest.scala
Scala
apache-2.0
20,095
package controllers import models.Notification import play.api.mvc.{ Action, Controller, Request } import play.api.data._ import play.api.data.Forms._ import play.api.Logger import play.api.i18n.{ I18nSupport, MessagesApi } import be.objectify.deadbolt.scala.ActionBuilders import javax.inject.Inject import scala.concurrent.Future import scala.concurrent.ExecutionContext.Implicits.global import daos.UserDao import audits.EventDao import actions.Actions import crypto.HashService import resources.ImageBlockLoader /** * User: Eduardo Barrientos * Date: 25/09/16 * Time: 06:42 AM */ class LoginController @Inject() ( val actionBuilder: ActionBuilders, val userDao: UserDao, val eventDao: EventDao, val actions: Actions, val hashService: HashService, val imageLoader: ImageBlockLoader, val messagesApi: MessagesApi ) extends Controller with I18nSupport { import LoginController.loginForm // Go to login if there's no session, go to home if there is def loginPage = Action.async { implicit req ⇒ isLoggedIn(req).map(_ match { case true ⇒ Redirect(routes.HomeController.index) case false ⇒ Ok(views.html.security.login(loginForm, imageLoader.load())) }) } /** Check whether there's a user logged in */ private[this] def isLoggedIn(req: Request[_]): Future[Boolean] = { req.session.get("login").fold(Future.successful(false)) { login ⇒ Future.successful( userDao.byLogin(login).fold(false)(u ⇒ actions.isStillIn(u)) ) } } /** Handle the user logging in */ def login = Action.async { implicit request ⇒ Logger.info(messagesApi("LoginController.login.info")) loginForm.bindFromRequest.fold( formWithErrors ⇒ Future(BadRequest(views.html.security.login(formWithErrors, imageLoader.load()))), userData ⇒ { val (login, pwd) = userData authenticate(login, pwd) flatMap (valid ⇒ if (valid) { eventDao.write(messagesApi("LoginController.login.aud.success", login)) userDao.updateConnected(login) Future.successful(Redirect(routes.HomeController.index).withSession("login" → login)) } else { eventDao.write(messagesApi("LoginController.login.aud.error", login)) implicit val errors = Notification.error(messagesApi("LoginController.login.authError", login)) Future.successful(BadRequest(views.html.security.login(loginForm, imageLoader.load()))) }) } ) } /** Terminar la sesion del usuario */ def logout = Action { request ⇒ eventDao.write(messagesApi("LoginController.logout.aud", request.session("login"))) Redirect(routes.LoginController.loginPage()).withNewSession } /** * Authenticate the user * @param login User's login * @param password User's password * @return Whether the user/password combination matches a user in the system. */ private[this] def authenticate(login: String, password: String): Future[Boolean] = { Future.successful(userDao.byLogin(login).map(u ⇒ u.password == hashService.hashString(password, u.salt)).getOrElse(false)) } } object LoginController { val loginForm: Form[(String, String)] = Form( tuple( "login" → nonEmptyText, "password" → nonEmptyText ) ) }
kdoomsday/kaminalapp
app/controllers/LoginController.scala
Scala
mit
3,347
object SCL2806 { trait A trait B class C extends A with B var z: A = null new C match { case x: B => /*start*/x/*end*/ } } //SCL2806.C
ilinum/intellij-scala
testdata/typeInference/bugs5/SCL2806.scala
Scala
apache-2.0
156
/* * This file is part of Kiama. * * Copyright (C) 2011-2015 Anthony M Sloane, Macquarie University. * * Kiama is free software: you can redistribute it and/or modify it under * the terms of the GNU Lesser General Public License as published by the * Free Software Foundation, either version 3 of the License, or (at your * option) any later version. * * Kiama is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for * more details. * * You should have received a copy of the GNU Lesser General Public License * along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see * <http://www.gnu.org/licenses/>. */ package org.kiama package example.oberon0 package L0 /** * Parsers for L0 language. */ trait SyntaxAnalyser extends base.SyntaxAnalyser { import base.source.{Declaration, Expression, Statement} import scala.language.postfixOps import source.{AddExp, AndExp, Assignment, ConstDecl, DivExp, EqExp, GeExp, GtExp, IdnExp, IntExp, LeExp, LtExp, ModExp, MulExp, NamedType, NeExp, NegExp, NotExp, OrExp, SubExp, TypeDecl, TypeDef, VarDecl} override def declarationsDef : PackratParser[List[Declaration]] = (constdeclsection?) ~ (typedeclsection?) ~ (vardeclsection?) ^^ { case oc ~ ot ~ ov => List (oc, ot, ov).flatten.flatten } lazy val constdeclsection = "CONST" ~> rep (constdecl) lazy val constdecl = (idndef <~ "=") ~ (expression <~ ";") ^^ ConstDecl lazy val typedeclsection = "TYPE" ~> rep (typedecl) lazy val typedecl = (idndef <~ "=") ~ (typedef <~ ";") ^^ TypeDecl lazy val vardeclsection = "VAR" ~> rep (vardecl) lazy val vardecl = (idndeflist <~ ":") ~ (typedef <~ ";") ^^ VarDecl lazy val idndeflist = rep1sep (idndef, ",") lazy val typedef = typedefDef def typedefDef : PackratParser[TypeDef] = namedtypedef lazy val namedtypedef = idnuse ^^ NamedType override def statementDef : PackratParser[Statement] = assignment | super.statementDef lazy val assignment = lhs ~ (":=" ~> expression) ^^ Assignment lazy val lhs = lhsDef def lhsDef : PackratParser[Expression] = idnuse ^^ IdnExp lazy val expression = simpexp ~ ("=" ~> simpexp) ^^ EqExp | simpexp ~ ("#" ~> simpexp) ^^ NeExp | simpexp ~ ("<" ~> simpexp) ^^ LtExp | simpexp ~ ("<=" ~> simpexp) ^^ LeExp | simpexp ~ (">" ~> simpexp) ^^ GtExp | simpexp ~ (">=" ~> simpexp) ^^ GeExp | simpexp lazy val simpexp : PackratParser[Expression] = simpexp ~ ("+" ~> term) ^^ AddExp | simpexp ~ ("-" ~> term) ^^ SubExp | simpexp ~ ("OR" ~> term) ^^ OrExp | term lazy val term : PackratParser[Expression] = term ~ ("*" ~> factor) ^^ MulExp | term ~ ("DIV" ~> factor) ^^ DivExp | term ~ ("MOD" ~> factor) ^^ ModExp | term ~ ("&" ~> factor) ^^ AndExp | factor lazy val factor : PackratParser[Expression] = intexp | lhs | "+" ~> factor | "-" ~> factor ^^ NegExp | "~" ~> factor ^^ NotExp | "(" ~> expression <~ ")" lazy val intexp = constrainedInt ^^ IntExp override def keywordStrings : List[String] = "CONST" +: "DIV" +: "MOD" +: "OR" +: "TYPE" +: "VAR" +: super.keywordStrings }
joaoraf/kiama
library/src/org/kiama/example/oberon0/L0/SyntaxAnalyser.scala
Scala
gpl-3.0
3,629
// BMI.scala val kg = 72.57 // 160 lbs val heightM = 1.727 // 68 inches val bmi = kg/(heightM * heightM) if(bmi < 18.5) println("Underweight") else if(bmi < 25) println("Normal weight") else println("Overweight")
P7h/ScalaPlayground
Atomic Scala/atomic-scala-examples/examples/06_EvaluationOrder/BMI.scala
Scala
apache-2.0
215
package ch.ltouroumov.modularmachines.common.init import ch.ltouroumov.modularmachines.common.tileentity.controller.MachineControllerEntity import ch.ltouroumov.modularmachines.common.tileentity.modules.{MachineModuleCoil, MachineModuleSmelter} import ch.ltouroumov.modularmachines.common.tileentity.ports.{MachinePortItems, MachinePortFluid, MachinePortPower} import ch.ltouroumov.modularmachines.common.tileentity.{ModuleType, PortType} import ch.ltouroumov.modularmachines.common.blocks import cpw.mods.fml.common.registry.GameRegistry object Blocks extends ObjectRegistry { val machineFrame = new blocks.MachineFrame val machineController = new blocks.MachineController val machineGlass = new blocks.MachineGlass val machinePort_Items = new blocks.MachinePort(PortType.Items) val machinePort_Power = new blocks.MachinePort(PortType.Power) val machinePort_Fluid = new blocks.MachinePort(PortType.Fluid) val machineModule_Coil = new blocks.MachineModule(ModuleType.Coil) val machineModule_Smelter = new blocks.MachineModule(ModuleType.Smelter) def registerBlocks() { GameRegistry.registerBlock(machineFrame, "blockMachineFrame") GameRegistry.registerBlock(machineController, "blockMachineController") GameRegistry.registerBlock(machineGlass, "blockMachineGlass") GameRegistry.registerBlock(machinePort_Items, "blockMachinePort_Items") GameRegistry.registerBlock(machinePort_Power, "blockMachinePort_Power") GameRegistry.registerBlock(machinePort_Fluid, "blockMachinePort_Fluid") GameRegistry.registerBlock(machineModule_Coil, "blockMachinePort_Coil") GameRegistry.registerBlock(machineModule_Smelter, "blockMachinePort_Smelter") } def registerTileEntities() { GameRegistry.registerTileEntity(classOf[MachineControllerEntity], "machineController") GameRegistry.registerTileEntity(classOf[MachinePortItems], "machinePortItems") GameRegistry.registerTileEntity(classOf[MachinePortPower], "machinePortPower") GameRegistry.registerTileEntity(classOf[MachinePortFluid], "machinePortFluid") GameRegistry.registerTileEntity(classOf[MachineModuleSmelter], "machineModuleSmelter") GameRegistry.registerTileEntity(classOf[MachineModuleCoil], "machineModuleCoil") } registerCallback(1, registerBlocks) registerCallback(2, registerTileEntities) }
ltouroumov/modular-machines
src/main/scala/ch/ltouroumov/modularmachines/common/init/Blocks.scala
Scala
gpl-2.0
2,325
package inloopio.math.indicator import java.awt.Color /** * * @author Caoyuan Deng */ trait Plottable { def plot: Plot def getColor(idx: Int): Color def setColor(idx: Int, color: Color) def layer: Int def layer_=(order: Int) }
dcaoyuan/inloopio-libs
inloopio-math/src/main/scala/inloopio/math/indicator/Plottable.scala
Scala
bsd-3-clause
246
package ulang.syntax import ulang.source._ case class Defs(data: Map[Con, Set[Op]], syn: Map[Con, (List[TypeParam], Type)], axioms: List[Expr]) { def +(con: Con, params: List[TypeParam], rhs: Type) = copy(syn = syn + (con -> (params, rhs))) def +(con: Con, constrs: Set[Op]) = copy(data = data + (con -> constrs)) // def +(op: Op, rhs: Expr) = copy(ops = ops + (op -> rhs)) def +(ax: Expr) = copy(axioms = axioms :+ ax) // def ++(axs: List[Expr]) = copy(axioms = axioms ++ axs) def ++(that: Defs) = Defs(this.data ++ that.data, this.syn ++ that.syn, this.axioms ++ that.axioms) lazy val constrs = data.flatMap(_._2) lazy val ops: Map[Op, Expr] = { import ulang.syntax.predefined.pred val dfs = axioms map { case pred.Eq(FlatApp(op: Op, args), rhs) => (op, args, rhs) } val grouped = dfs.groupBy(_._1) val folded = grouped.map { case (op, axs) => val cases = axs map { case (_, args, rhs) => Case(args, rhs) } (op, Match(cases)) } folded.toMap } private def _synonym(typ: TypeApp): Type = syn get typ.con match { case Some((params, rhs)) => val theta = (params zip typ.args).toMap rhs subst theta case None => typ } def synonym(typ: Type): Type = typ match { case TypeApp(name, args) => val res = _synonym(TypeApp(name, args map synonym)) if (res != typ) synonym(res) /* until fixpoint */ else typ /*case TypeInst(orig, gen) => TypeInst(orig, synonym(gen)) */ case _ => typ } override def toString = { val ss = syn map { case (con, (params, rhs)) => "type " + con + params.mkString(" ", " ", " = ") + rhs } val ds = data map { case (con, ops) => "data " + con + ops.mkString(" = ", " | ", "") } val os = axioms map { _.toString } (ss ++ ds ++ os).mkString("\\n") } } object Defs { val empty = Defs(Map.empty[Con, Set[Op]], Map.empty[Con, (List[TypeParam], Type)], Nil) val default = empty def apply(data: List[(Con, Set[Op])], syn: List[(Con, (List[TypeParam], Type))], axioms: List[Expr]): Defs = { Defs(data.toMap, syn.toMap, axioms) } }
gernst/ulang
src/ulang/syntax/Defs.scala
Scala
mit
2,159
package controllers import anorm._ import views._ import models._ import utils.DoubleFormat._ import play.api._ import play.api.mvc._ import play.api.data._ import play.api.data.Forms._ /** * Manage and control container, monitors, users. */ object Application extends Controller with Secured { val loginForm = Form( tuple( "email" -> text, "password" -> nonEmptyText ) verifying ("Invalid email or password", result => result match { case (email, password) => User.authenticate(email, password).isDefined }) ) /* * serve user with login form */ def login = Action { implicit request => Ok(html.login(loginForm)) } /* * Verify user's login credential submission is valid. If not, redirect * to login page indicating as much */ def authenticate = Action { implicit request => loginForm.bindFromRequest.fold( formWithErrors => BadRequest(html.login(formWithErrors)), user => Redirect(routes.ContainerController.index).withSession( "email" -> user._1 ) ) } /* * Log the user out and return them to the login page */ def logout = Action { Redirect(routes.Application.login).withNewSession.flashing( "success" -> "You've been logged out" ) } /* * Effectively strips the trailing '/' from a URL, for example if a user * tries to go to /containers/ since no such route exists redirect them * to /containers */ def untrail(path: String) = Action { MovedPermanently("/" +path) } } /* * Provide security features */ trait Secured { /* * Retrieve the connected user email. */ private def username(request: RequestHeader) = request.session.get("email") /* * Redirect to login if the user in not authorized. */ private def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.Application.login) /* * Redirect user to requested action with certified authentication credentials */ def withAuth(f: => String => Request[AnyContent] => Result) = { Security.Authenticated(username, onUnauthorized) { user => Action(request => f(user)(request)) } } /* * Perform a given request verifying the user is logged in */ def IsAuthenticated(f: => String => Request[AnyContent] => Result) = { Security.Authenticated(username, onUnauthorized) { user => Action { request => f(user)(request) } } } }
cpeck1/tempmonServer
app/controllers/Application.scala
Scala
bsd-2-clause
2,442
package charactor.core.model.objects.charactor.attributes import charactor.core.model.objects.charactor.parts.MoveDecider import charactor.core.model.objects.charactor.Charactor import actors.OutputChannel import charactor.core.model.objects.Positionable import charactor.core.messages.{MoveMessage, MoveAnywhereMessage} class BlindMoveDecider extends MoveDecider { override def decide(owner: Charactor, sender: OutputChannel[Any], nearest: List[(Positionable, Double)], timeScale: Double) = { if (nearest.isEmpty) sender ! new MoveAnywhereMessage(owner.speed.Value*timeScale); else { val target = nearest.minBy(_._2)._1; sender ! new MoveMessage(target, owner.speed.Value*timeScale); } } } class BlindAttribute extends Attribute(List(new BlindMoveDecider)) { }
PiotrTrzpil/charactor
src/charactor/core/model/objects/charactor/attributes/BlindAttribute.scala
Scala
apache-2.0
786
package no.netcompany.testdatagen.recordgen // Copyright (C) 2014 Lars Reed -- GNU GPL 2.0 -- see LICENSE.txt import java.io.{BufferedWriter, FileOutputStream, IOException, OutputStreamWriter} import java.nio.charset.Charset import no.netcompany.testdatagen.BareGenerator /** * This generator is typically the end of a chain, and called implicitly * by either toFile or appendToFile from a record generator, * but may also be called through the apply method. */ class ToFile[T](fileName:String, generator: BareGenerator[T], append:Boolean, charSet:String) extends BareGenerator[T] { protected var prefix: List[String] = List[String]() protected var suffix: List[String] = List[String]() def prepend(s: String):this.type = { prefix::= s this } def append(s: String):this.type = { suffix::= s this } /** Writes a list of strings to a named file. */ protected def writeToFile(list: List[String]) { val writer = new OutputStreamWriter( new FileOutputStream(fileName, append), Charset.forName(charSet).newEncoder()) val bufWriter= new BufferedWriter(writer) try { def out(s: String) { bufWriter.append(s) bufWriter.newLine } prefix.reverse.foreach {s => out(s)} list.foreach{s=> out(s)} suffix.reverse.foreach {s => out(s)} } catch { case e: IOException => println("Error: " + e) } finally bufWriter.close } override def get(n: Int): List[T]= { val res= generator.get(n) writeToFile(res map {_.toString}) res } override def getStrings(n: Int): List[String]= { val res= generator.getStrings(n) writeToFile(res) res } // Nicer name... def write(n:Int, strings:Boolean= true): List[Any] = if (strings) getStrings(n) else get(n) } object ToFile { val defaultCharSet="ISO-8859-1" def apply[T](fileName:String, generator: BareGenerator[T], append:Boolean=false, charSet:String=defaultCharSet): ToFile[T]= new ToFile(fileName, generator, append, charSet) }
lre-mesan/testdata
src/main/scala/no/netcompany/testdatagen/recordgen/ToFile.scala
Scala
gpl-2.0
2,151
/* Copyright 2014 White Label Personal Clouds Pty Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package me.welcomer.framework.picocontainer.repository import scala.concurrent.ExecutionContext import scala.concurrent.Future import me.welcomer.framework.models.Pico import play.api.libs.iteratee.Iteratee import reactivemongo.bson.BSONDocument trait PicoContainerRepositoryComponentMockImpl extends PicoContainerRepositoryComponent { override protected def _picoContainerRepository: PicoContainerRepository = new PicoContainerRepositoryMockImpl class PicoContainerRepositoryMockImpl extends PicoContainerRepository { def save(model: Pico)(implicit ec: ExecutionContext): Future[Unit] = { ??? } def findOne(keyValue: (String, String))(implicit ec: ExecutionContext): Future[Option[Pico]] = { ??? } def enumerate[B](query: BSONDocument, processor: Iteratee[Pico, B])(implicit ec: ExecutionContext): Future[Iteratee[Pico, B]] = { ??? } } }
welcomer/framework
src/test/scala/me/welcomer/framework/picocontainer/repository/PicoContainerRepositoryComponentMockImpl.scala
Scala
apache-2.0
1,520
/** * Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package nl.knaw.dans.easy.pid.fixture import better.files.File import org.scalatest.matchers.{ MatchResult, Matcher } /** Does not dump the full file but just the searched content if it is not found. * * See also <a href="http://www.scalatest.org/user_guide/using_matchers#usingCustomMatchers">CustomMatchers</a> */ trait CustomMatchers { class ContentMatcher(content: String) extends Matcher[File] { override def apply(left: File): MatchResult = { def trimLines(s: String): String = s.split("\\n").map(_.trim).mkString("\\n") MatchResult( trimLines(left.contentAsString).contains(trimLines(content)), s"$left did not contain: $content", s"$left contains $content" ) } } def containTrimmed(content: String) = new ContentMatcher(content) }
DANS-KNAW/easy-pid-generator
src/test/scala/nl/knaw/dans/easy/pid/fixture/CustomMatchers.scala
Scala
apache-2.0
1,447
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.integration.torch import com.intel.analytics.bigdl.nn.MarginRankingCriterion import com.intel.analytics.bigdl.tensor.{Storage, Tensor} import com.intel.analytics.bigdl.utils.Table import scala.collection.mutable.HashMap import scala.util.Random @com.intel.analytics.bigdl.tags.Serial class MarginRankingCriterionSpec extends TorchSpec { "A MarginRankingCriterion " should "generate correct output and grad with only value" in { torchCheck() val mse = new MarginRankingCriterion[Double]() val input1 = Tensor[Double](5).apply1(e => Random.nextDouble()) val input2 = Tensor[Double](5).apply1(e => Random.nextDouble()) val input = new Table() input(1.toDouble) = input1 input(2.toDouble) = input2 val target = new Table() val target1 = Tensor[Double](Storage(Array(-1.0))) target(1.toDouble) = target1 val start = System.nanoTime() val output = mse.forward(input, target) val gradInput = mse.backward(input, target) val end = System.nanoTime() val scalaTime = end - start val code = "mse = nn.MarginRankingCriterion()\\n" + "output = mse:forward(input,-1)\\n" + "gradInput = mse:backward(input,-1)" val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target), Array("output", "gradInput")) val luaOutput1 = torchResult("output").asInstanceOf[Double] val luaOutput2 = torchResult("gradInput").asInstanceOf[Table] luaOutput1 should be (output) gradInput should equal (luaOutput2) println("Test case : MarginRankingCriterion, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } "A MarginRankingCriterion " should "generate correct output and grad with Tensor target" in { torchCheck() val mse = new MarginRankingCriterion[Double]() val input1 = Tensor[Double](5).apply1(e => Random.nextDouble()) val input2 = Tensor[Double](5).apply1(e => Random.nextDouble()) val input = new Table() input(1.toDouble) = input1 input(2.toDouble) = input2 val target = new Table() val target1 = Tensor[Double](5).apply1(e => Random.nextDouble()) target(1.toDouble) = target1 val start = System.nanoTime() val output = mse.forward(input, target) val gradInput = mse.backward(input, target) val end = System.nanoTime() val scalaTime = end - start val code = "mse = nn.MarginRankingCriterion()\\n" + "output = mse:forward(input, target)\\n" + "gradInput = mse:backward(input, target)" val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target1), Array("output", "gradInput")) val luaOutput1 = torchResult("output").asInstanceOf[Double] val luaOutput2 = torchResult("gradInput").asInstanceOf[Table] luaOutput1 should be (output) gradInput should equal (luaOutput2) println("Test case : MarginRankingCriterion, Torch : " + luaTime + " s, Scala : " + scalaTime / 1e9 + " s") } }
zhangxiaoli73/BigDL
spark/dl/src/test/scala/com/intel/analytics/bigdl/integration/torch/MarginRankingCriterionSpec.scala
Scala
apache-2.0
3,596
package com.recursivity.specs2 import org.specs2._ import runner.SpecificationsFinder._ class index extends Specification{ def is = examplesLinks("Specs2 Example specifications") // see the SpecificationsFinder trait for the parameters of the 'specifications' method def examplesLinks(t: String) = t.title ^ specifications(pattern = ".*Specification").map(see) }
wfaler/specs2-demo
src/test/scala/com/recursivity/specs2/Index.scala
Scala
mit
374
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.apache.spark.{SharedSparkContext, SparkFunSuite} import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types.{BooleanType, StringType, StructField, StructType} @deprecated("This suite is deprecated to silent compiler deprecation warnings", "2.0.0") class SQLContextSuite extends SparkFunSuite with SharedSparkContext { object DummyRule extends Rule[LogicalPlan] { def apply(p: LogicalPlan): LogicalPlan = p } test("getOrCreate instantiates SQLContext") { val sqlContext = SQLContext.getOrCreate(sc) assert(sqlContext != null, "SQLContext.getOrCreate returned null") assert(SQLContext.getOrCreate(sc).eq(sqlContext), "SQLContext created by SQLContext.getOrCreate not returned by SQLContext.getOrCreate") } test("getOrCreate return the original SQLContext") { val sqlContext = SQLContext.getOrCreate(sc) val newSession = sqlContext.newSession() assert(SQLContext.getOrCreate(sc).eq(sqlContext), "SQLContext.getOrCreate after explicitly created SQLContext did not return the context") SparkSession.setActiveSession(newSession.sparkSession) assert(SQLContext.getOrCreate(sc).eq(newSession), "SQLContext.getOrCreate after explicitly setActive() did not return the active context") } test("Sessions of SQLContext") { val sqlContext = SQLContext.getOrCreate(sc) val session1 = sqlContext.newSession() val session2 = sqlContext.newSession() // all have the default configurations val key = SQLConf.SHUFFLE_PARTITIONS.key assert(session1.getConf(key) === session2.getConf(key)) session1.setConf(key, "1") session2.setConf(key, "2") assert(session1.getConf(key) === "1") assert(session2.getConf(key) === "2") // temporary table should not be shared val df = session1.range(10) df.createOrReplaceTempView("test1") assert(session1.tableNames().contains("test1")) assert(!session2.tableNames().contains("test1")) // UDF should not be shared def myadd(a: Int, b: Int): Int = a + b session1.udf.register[Int, Int, Int]("myadd", myadd) session1.sql("select myadd(1, 2)").explain() intercept[AnalysisException] { session2.sql("select myadd(1, 2)").explain() } } test("Catalyst optimization passes are modifiable at runtime") { val sqlContext = SQLContext.getOrCreate(sc) sqlContext.experimental.extraOptimizations = Seq(DummyRule) assert(sqlContext.sessionState.optimizer.batches.flatMap(_.rules).contains(DummyRule)) } test("get all tables") { val sqlContext = SQLContext.getOrCreate(sc) val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") assert( sqlContext.tables().filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) assert( sqlContext.sql("SHOW tables").filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) sqlContext.sessionState.catalog.dropTable( TableIdentifier("listtablessuitetable"), ignoreIfNotExists = true, purge = false) assert(sqlContext.tables().filter("tableName = 'listtablessuitetable'").count() === 0) } test("getting all tables with a database name has no impact on returned table names") { val sqlContext = SQLContext.getOrCreate(sc) val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") assert( sqlContext.tables("default").filter("tableName = 'listtablessuitetable'").collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) assert( sqlContext.sql("show TABLES in default").filter("tableName = 'listtablessuitetable'") .collect().toSeq == Row("", "listtablessuitetable", true) :: Nil) sqlContext.sessionState.catalog.dropTable( TableIdentifier("listtablessuitetable"), ignoreIfNotExists = true, purge = false) assert(sqlContext.tables().filter("tableName = 'listtablessuitetable'").count() === 0) } test("query the returned DataFrame of tables") { val sqlContext = SQLContext.getOrCreate(sc) val df = sqlContext.range(10) df.createOrReplaceTempView("listtablessuitetable") val expectedSchema = StructType( StructField("database", StringType, false) :: StructField("tableName", StringType, false) :: StructField("isTemporary", BooleanType, false) :: Nil) Seq(sqlContext.tables(), sqlContext.sql("SHOW TABLes")).foreach { case tableDF => assert(expectedSchema === tableDF.schema) tableDF.createOrReplaceTempView("tables") assert( sqlContext.sql( "SELECT isTemporary, tableName from tables WHERE tableName = 'listtablessuitetable'") .collect().toSeq == Row(true, "listtablessuitetable") :: Nil) assert( sqlContext.tables().filter("tableName = 'tables'").select("tableName", "isTemporary") .collect().toSeq == Row("tables", true) :: Nil) sqlContext.dropTempTable("tables") } } }
akopich/spark
sql/core/src/test/scala/org/apache/spark/sql/SQLContextSuite.scala
Scala
apache-2.0
6,063
package structures package laws case class IsEqual[A](lhs: A, rhs: A) { def isEqual(implicit eq: Equal[A]): Boolean = eq.equal(lhs, rhs) }
mpilquist/Structures
laws/src/main/scala/structures/laws/IsEqual.scala
Scala
bsd-3-clause
142
// Copyright (C) 2019 MapRoulette contributors (see CONTRIBUTORS.md). // Licensed under the Apache License, Version 2.0 (see LICENSE). package org.maproulette.models.utils import java.sql.SQLException import anorm._ import org.apache.commons.lang3.StringUtils import org.joda.time.DateTime import org.joda.time.format.DateTimeFormat import org.maproulette.session.SearchParameters import scala.collection.mutable.ListBuffer sealed trait SQLKey { def getSQLKey(): String } case class AND() extends SQLKey { override def getSQLKey(): String = "AND" } case class OR() extends SQLKey { override def getSQLKey(): String = "OR" } case class WHERE() extends SQLKey { override def getSQLKey(): String = "WHERE" } /** * Helper functions for any Data Access Layer classes * * @author cuthbertm */ trait DALHelper { private val dateFormat = DateTimeFormat.forPattern("yyyy-MM-dd") // The set of characters that are allowed for column names, so that we can sanitize in unknown input // for protection against SQL injection private val ordinary = (('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9') ++ Seq('_') ++ Seq('.')).toSet /** * Function will return "ALL" if value is 0 otherwise the value itself. Postgres does not allow * using 0 for ALL * * @param value The limit used in the query * @return ALL if 0 otherwise the value */ def sqlLimit(value: Int): String = if (value <= 0) "ALL" else value + "" /** * Corrects the search string by adding % before and after string, so that it doesn't rely * on simply an exact match. If value not supplied, then will simply return % * * @param value The search string that you are using to match with * @return */ def search(value: String): String = if (value.nonEmpty) s"%$value%" else "%" /** * Creates the ORDER functionality, with the column and direction * * @param orderColumn The column that you are ordering with (or multiple comma separated columns) * @param orderDirection Direction of ordering ASC or DESC * @param tablePrefix table alias if required * @param nameFix The namefix really is just a way to force certain queries specific to MapRoulette * to use a much more efficient query plan. The difference in performance can be quite * large. We don't do it by default because it relies on the "name" column which is * not guaranteed. * @return */ def order(orderColumn: Option[String] = None, orderDirection: String = "ASC", tablePrefix: String = "", nameFix: Boolean = false, ignoreCase: Boolean = false): String = orderColumn match { case Some(column) => this.testColumnName(column) val direction = orderDirection match { case "DESC" => "DESC" case _ => "ASC" } // sanitize the column name to prevent sql injection. Only allow underscores and A-Za-z if (column.forall(this.ordinary.contains)) { val casedColumn = new StringBuilder() if (ignoreCase) { casedColumn ++= "LOWER(" } casedColumn ++= this.getPrefix(tablePrefix) + column if (ignoreCase) { casedColumn ++= ")" } s"ORDER BY $casedColumn $direction ${ if (nameFix) { "," + this.getPrefix(tablePrefix) + "name"; } else { ""; } }" } else { "" } case None => "" } def sqlWithParameters(query: String, parameters: ListBuffer[NamedParameter]): SimpleSql[Row] = { if (parameters.nonEmpty) { SQL(query).on(parameters: _*) } else { SQL(query).asSimple[Row]() } } def parentFilter(parentId: Long) (implicit conjunction: Option[SQLKey] = Some(AND())): String = if (parentId != -1) { s"${this.getSqlKey} parent_id = $parentId" } else { "" } def getLongListFilter(list: Option[List[Long]], columnName: String) (implicit conjunction: Option[SQLKey] = Some(AND())): String = { this.testColumnName(columnName) list match { case Some(idList) if idList.nonEmpty => s"${this.getSqlKey} $columnName IN (${idList.mkString(",")})" case _ => "" } } def getOptionalFilter(filterValue:Option[Any], columnName:String, key:String) = { filterValue match { case Some(value) => s"$columnName = {$key}" case None => "" } } def getOptionalMatchFilter(filterValue:Option[Any], columnName:String, key:String) = { filterValue match { case Some(value) => s"LOWER($columnName) LIKE LOWER({$key})" case None => "" } } def getIntListFilter(list:Option[List[Int]], columnName:String) (implicit conjunction:Option[SQLKey]=Some(AND())) : String = { this.testColumnName(columnName) list match { case Some(idList) if idList.nonEmpty => s"${this.getSqlKey} $columnName IN (${idList.mkString(",")})" case _ => "" } } private def testColumnName(columnName: String): Unit = { if (!columnName.forall(this.ordinary.contains)) { throw new SQLException(s"Invalid column name provided `$columnName`") } } private def getSqlKey(implicit conjunction: Option[SQLKey]): String = { conjunction match { case Some(c) => c.getSQLKey() case None => "" } } def getDateClause(column: String, start: Option[DateTime] = None, end: Option[DateTime] = None) (implicit sqlKey: Option[SQLKey] = None): String = { this.testColumnName(column) val dates = getDates(start, end) s"${this.getSqlKey} $column::date BETWEEN '${dates._1}' AND '${dates._2}'" } def getDates(start: Option[DateTime] = None, end: Option[DateTime] = None): (String, String) = { val startDate = start match { case Some(s) => dateFormat.print(s) case None => dateFormat.print(DateTime.now().minusWeeks(1)) } val endDate = end match { case Some(e) => dateFormat.print(e) case None => dateFormat.print(DateTime.now()) } (startDate, endDate) } def addSearchToQuery(params: SearchParameters, whereClause: StringBuilder, projectPrefix: String = "p", challengePrefix: String = "c") (implicit projectSearch: Boolean = true): ListBuffer[NamedParameter] = { val parameters = new ListBuffer[NamedParameter]() if (!projectSearch) { params.getProjectIds match { case Some(p) if p.nonEmpty => whereClause ++= s"$challengePrefix.parent_id IN (${p.mkString(",")})" case _ => params.projectSearch match { case Some(ps) if ps.nonEmpty => params.fuzzySearch match { case Some(x) => whereClause ++= this.fuzzySearch(s"$projectPrefix.display_name", "ps", x)(if (whereClause.isEmpty) None else Some(AND())) parameters += ('ps -> ps) case None => whereClause ++= (if (whereClause.isEmpty) "" else " AND ") whereClause ++= " (" + this.searchField(s"$projectPrefix.display_name", "ps")(None) whereClause ++= s" OR $challengePrefix.id IN (SELECT vp2.challenge_id FROM virtual_project_challenges vp2 INNER JOIN projects p2 ON p2.id = vp2.project_id WHERE " + this.searchField(s"p2.display_name", "ps")(None) + " AND p2.enabled=true)) " parameters += ('ps -> s"%$ps%") } case _ => // we can ignore this } this.appendInWhereClause(whereClause, this.enabled(params.projectEnabled.getOrElse(false), projectPrefix)(None)) } } params.getChallengeIds match { case Some(c) if c.nonEmpty => this.appendInWhereClause(whereClause, s"$challengePrefix.id IN (${c.mkString(",")})") case _ => params.challengeParams.challengeSearch match { case Some(cs) if cs.nonEmpty => params.fuzzySearch match { case Some(x) => this.appendInWhereClause(whereClause, this.fuzzySearch(s"$challengePrefix.name", "cs", x)(None)) parameters += ('cs -> cs) case None => this.appendInWhereClause(whereClause, this.searchField(s"$challengePrefix.name", "cs")(None)) parameters += ('cs -> s"%$cs%") } case _ => // ignore } this.appendInWhereClause(whereClause, this.enabled(params.challengeParams.challengeEnabled.getOrElse(false), challengePrefix)(None)) } parameters } /** * All MapRoulette objects contain the enabled column that define whether it is enabled in the * system or not. This will create the WHERE part of the clause checking for enabled values in the * query * * @param value If looking only for enabled elements this needs to be set to true * @param tablePrefix If used as part of a join or simply the table alias if required * @param key Defaulted to "AND" * @return */ def enabled(value: Boolean, tablePrefix: String = "") (implicit key: Option[SQLKey] = Some(AND())): String = { if (value) { s"${this.getSqlKey} ${this.getPrefix(tablePrefix)}enabled = TRUE" } else { "" } } /** * Just appends the period at the end of the table prefix if the provided string is not empty * * @param prefix The table prefix that is being used in the query * @return */ private def getPrefix(prefix: String): String = if (StringUtils.isEmpty(prefix) || !prefix.forall(this.ordinary.contains)) "" else s"$prefix." /** * This function will handle the conjunction in a where clause. So if you are you creating * a dynamic where clause this will handle adding the conjunction clause if required * * @param whereClause The StringBuilder where clause * @param value The value that is being appended * @param conjunction The conjunction, by default AND */ def appendInWhereClause(whereClause: StringBuilder, value: String) (implicit conjunction: Option[SQLKey] = Some(AND())): Unit = { if (whereClause.nonEmpty && value.nonEmpty) { whereClause ++= s" ${this.getSqlKey} $value" } else { whereClause ++= value } } /** * Set the search field in the where clause correctly, it will also surround the values * with LOWER to make sure that match is case insensitive * * @param column The column that you are searching against * @param conjunction Default is AND, but can use AND or OR * @param key The search string that you are testing against * @return */ def searchField(column: String, key: String = "ss") (implicit conjunction: Option[SQLKey] = Some(AND())): String = s" ${this.getSqlKey} LOWER($column) LIKE LOWER({$key})" /** * Adds fuzzy search to any query. This will include the Levenshtein, Metaphone and Soundex functions * that will search the string. On large datasets this could potentially decrease performance * * @param column The column that we are comparing * @param key The key used in anorm for the value to compare with * @param levenshsteinScore The levenshstein score, which is the difference between the strings * @param metaphoneSize The maximum size of the metaphone code * @param conjunction Default AND * @return A string with all the fuzzy search functions */ def fuzzySearch(column: String, key: String = "ss", levenshsteinScore: Int = DALHelper.DEFAULT_LEVENSHSTEIN_SCORE, metaphoneSize: Int = DALHelper.DEFAULT_METAPHONE_SIZE) (implicit conjunction: Option[SQLKey] = Some(AND())): String = { val score = if (levenshsteinScore > 0) { levenshsteinScore } else { 3 } s""" ${this.getSqlKey} ($column <> '' AND (LEVENSHTEIN(LOWER($column), LOWER({$key})) < $score OR METAPHONE(LOWER($column), 4) = METAPHONE(LOWER({$key}), $metaphoneSize) OR SOUNDEX(LOWER($column)) = SOUNDEX(LOWER({$key}))) )""" } def addChallengeTagMatchingToQuery(params: SearchParameters, whereClause: StringBuilder, joinClause: StringBuilder, challengePrefix: String = "c"): ListBuffer[NamedParameter] = { val parameters = new ListBuffer[NamedParameter]() params.challengeParams.challengeTags match { case Some(ct) if ct.nonEmpty => joinClause ++= s""" INNER JOIN tags_on_challenges toc ON toc.challenge_id = $challengePrefix.id INNER JOIN tags tgs ON tgs.id = toc.tag_id """ val tags = ListBuffer[String]() ct.zipWithIndex.foreach(tagWithIndex => { parameters += new NamedParameter(s"tag${tagWithIndex._2}", tagWithIndex._1) tags += s"{tag${tagWithIndex._2}}" }) this.appendInWhereClause(whereClause, s"tgs.name IN (${tags.mkString(",")})") case _ => // ignore } parameters } } object DALHelper { private val DEFAULT_LEVENSHSTEIN_SCORE = 3 private val DEFAULT_METAPHONE_SIZE = 4 }
mvexel/maproulette2
app/org/maproulette/models/utils/DALHelper.scala
Scala
apache-2.0
13,329
trait Monad[M[_]] object Test { def x: Monad[M forSome { type M[_] }] = ??? }
loskutov/intellij-scala
testdata/scalacTests/failed/t9008.scala
Scala
apache-2.0
81
package utl.net.http import com.twitter.ostrich.admin.Service import io.netty.bootstrap.ServerBootstrap import io.netty.buffer.PooledByteBufAllocator import io.netty.channel._ import io.netty.channel.nio.NioEventLoopGroup import io.netty.channel.socket.SocketChannel import io.netty.channel.socket.nio.NioServerSocketChannel import io.netty.handler.codec.http._ import org.slf4j.LoggerFactory /** * * User: light * Date: 17/03/14 * Time: 13:00 */ class HttpServer(val port: Int, val dispatcher : () => ChannelHandler) extends Service { val log = LoggerFactory.getLogger(getClass) val bossGroup = new NioEventLoopGroup(1) val workerGroup = new NioEventLoopGroup(Runtime.getRuntime.availableProcessors() * 2 + 1) var ch: Option[Channel] = None override def quiesce() = { stop() } def stop() { log.info("--> Stopping HttpServer") bossGroup.shutdownGracefully() workerGroup.shutdownGracefully() log.info("-- HttpServer Stopping (await)") bossGroup.terminationFuture().sync() workerGroup.terminationFuture().sync() log.info("<-- HttpServer Stopped") } override def shutdown() = { } override def start() = { val boot = new ServerBootstrap() boot.group(bossGroup, workerGroup) .channel(classOf[NioServerSocketChannel]) .childHandler(new HttpServerChannelInitializer(dispatcher)) .option(ChannelOption.SO_BACKLOG, new Integer(16000)) //.option(ChannelOption.ALLOCATOR, ) //.option(ChannelOption.RCVBUF_ALLOCATOR, AdaptiveRecvByteBufAllocator.DEFAULT) ch = Some(boot.bind(port).sync().channel()) log.info("Server started, port: " + port) } } class HttpServerChannelInitializer(x : () => ChannelHandler) extends ChannelInitializer[SocketChannel] { val allocator = new PooledByteBufAllocator(true) override def initChannel(ch: SocketChannel) = { ch.config().setAllocator( allocator ) val p = ch.pipeline() p.addLast("decoder", new HttpRequestDecoder()) p.addLast("combiner", new HttpCombiner() ) p.addLast("encoder", new HttpResponseEncoder()) p.addLast("handler", x()) } }
onerinvestments/strd
strd-commons/src/main/scala/utl/net/http/HttpServer.scala
Scala
apache-2.0
2,122
import sbt._ import Keys._ object Tools { type Sett = Def.Setting[_] def onVersion[A](all: Seq[A], on292: => Seq[A] = Seq(), on210: => Seq[A] = Seq(), on211: => Seq[A] = Seq()) = scalaVersion(v => all ++ (if (v.contains("2.11")) on211 else if (v.contains("2.10")) on210 else on292)) def onVersionTask[A](all: Seq[A], on292: => Seq[A] = Seq(), on210: => Seq[A] = Seq(), on211: => Seq[A] = Seq()) = scalaVersion.map(v => all ++ (if (v.contains("2.11")) on211 else if (v.contains("2.10")) on210 else on292)) }
etorreborre/argonaut
project/Tools.scala
Scala
bsd-3-clause
525
/* * DoubleSpinnerView.scala * (LucreSwing) * * Copyright (c) 2014-2021 Hanns Holger Rutz. All rights reserved. * * This software is published under the GNU Affero General Public License v3+ * * * For further information, please contact Hanns Holger Rutz at * [email protected] */ package de.sciss.lucre.swing import de.sciss.lucre.edit.UndoManager import de.sciss.lucre.expr.CellView import de.sciss.lucre.swing.impl.{DoubleSpinnerViewImpl => Impl} import de.sciss.lucre.{Cursor, DoubleObj, Txn} import de.sciss.swingplus.Spinner object DoubleSpinnerView { def cell[T <: Txn[T]](cell: CellView[T, Double], name: String, width: Int = 160) (implicit tx: T, cursor: Cursor[T], undoManager: UndoManager[T]): DoubleSpinnerView[T] = Impl(cell, name = name, width = width) def apply[T <: Txn[T]](expr: DoubleObj[T], name: String, width: Int = 160) (implicit tx: T, cursor: Cursor[T], undoManager: UndoManager[T]): DoubleSpinnerView[T] = { implicit val tpe: DoubleObj.type = DoubleObj Impl(CellView.expr[T, Double, DoubleObj](expr), name = name, width = width) } def optional[T <: Txn[T]](cell: CellView[T, Option[Double]], name: String, width: Int = 160, default: Option[Double] = None) (implicit tx: T, cursor: Cursor[T], undoManager: UndoManager[T]): Optional[T] = Impl.optional(cell, name = name, width = width, default0 = default) trait Optional[T <: Txn[T]] extends DoubleSpinnerView[T] { /** Sets a default value to be displayed when the model value is absent. * This must be called on the EDT. */ var default: Option[Double] } } trait DoubleSpinnerView[T <: Txn[T]] extends View[T] { type C = Spinner }
Sciss/LucreSwing
jvm/src/main/scala/de/sciss/lucre/swing/DoubleSpinnerView.scala
Scala
agpl-3.0
1,773
package com.gx.flyweight /** * Copyright 2017 josephguan * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ // Flyweight Interface trait Tea { // name is intrinsic state, shared state val name: String // table is extrinsic state, unshared state def serve(table: Int): Unit = { println(s"Serving $name to table# $table. hashCode: $hashCode") } } // concrete flyweight class GreenTea extends Tea { override val name: String = "Green Tea" } // unshared concrete flyweight class UnsharedTea extends Tea { override val name: String = "Unshared Tea" // unshared state val price = 10 override def serve(table: Int): Unit = { println(s"Serving $name to table# $table. Price is $price. hashCode: $hashCode") } } // Tea type object Tea { trait Type case object GreenTea extends Type case object UnsharedTea extends Type } // This is what Tea will look like if it is not a Flyweight //trait Tea { // val name: String // val table: Int // create a object for each table // def serve(): Unit //}
josephguan/scala-design-patterns
structural/flyweight/src/main/scala/com/gx/flyweight/Tea.scala
Scala
apache-2.0
1,561
/////////////////////////////////////////////////////////////// // © ООО «Праймтолк», 2011-2013 // // Все права принадлежат компании ООО «Праймтолк». // /////////////////////////////////////////////////////////////// /** * SynapseGrid * © Primetalk Ltd., 2013. * All rights reserved. * Authors: A.Zhizhelev, A.Nehaev, P. Popov * * Created: 17.07.13, zhizhelev */ package ru.primetalk.synapse.examples import ru.primetalk.synapse.core._ object Examples2 { object StringSplitterBuilder extends SystemBuilder { implicit val sb1 = this val a = contact[String]("a") val b = contact[String]("b") val c = contact[Char]("c") inputs(a) outputs(b,c) a -> b flatMap (_.split("\\\\s+")) a -> c flatMap (_.toCharArray.toSeq) } }
ppopoff/SynapseGrid
synapse-grid-examples/src/main/scala/ru/primetalk/synapse/examples/Examples2.scala
Scala
bsd-2-clause
857
/* * Copyright (c) 2015-2022 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Attribution Notice under the terms of the Apache License 2.0 * * This work was created by the collective efforts of the openCypher community. * Without limiting the terms of Section 6, any Derivative Work that is not * approved by the public consensus process of the openCypher Implementers Group * should not be described as “Cypher” (and Cypher® is a registered trademark of * Neo4j Inc.) or as "openCypher". Extensions by implementers or prototypes or * proposals for change that have been documented or implemented should only be * described as "implementation extensions to Cypher" or as "proposed changes to * Cypher that are not yet approved by the openCypher community". */ package org.opencypher.tools.tck.api.groups import org.opencypher.tools.tck.api.Scenario trait Group { def name: String def description: String = name def indent: Int def parent: Option[Group] def children(implicit tck: TckTree): Set[Group] = tck.groupChildren.getOrElse(this, Set[Group]()) override def toString: String = name } object Group { implicit def canonicalOrdering[A <: Group]: Ordering[A] = (x: A, y: A) => (x, y) match { case (x: ScenarioCategory, y: ScenarioCategory) => ScenarioCategory.canonicalOrdering.compare(x, y) case (x: Feature, y: Feature) => Feature.canonicalOrdering.compare(x, y) case (x: Numbered, y: Numbered) => Numbered.canonicalOrdering.compare(x, y) case (x: ExampleItem, y: ExampleItem) => ExampleItem.canonicalOrdering.compare(x, y) case (x: Tag, y: Tag) => Tag.canonicalOrdering.compare(x, y) case (Total, _) => -1 case (_, Total) => 1 case (_: ScenarioCategory, _) => -1 case (_, _: ScenarioCategory) => 1 case (_: Feature, _) => -1 case (_, _: Feature) => 1 case (_: Numbered, _) => -1 case (_, _: Numbered) => 1 case (_: ExampleItem, _) => -1 case (_, _: ExampleItem) => 1 case (_: Tag, _) => -1 case (_, _: Tag) => 1 case (_, _) => 0 } } trait ContainerGroup extends Group trait ContainedGroup extends Group { def parentGroup: ContainerGroup override lazy val indent: Int = parentGroup.indent + 1 override lazy val parent: Option[Group] = Some(parentGroup) override def toString: String = parentGroup.toString + name + "/" } trait ScenarioContainer extends ContainerGroup case object Total extends ContainerGroup { override val name = "Total" override val indent = 0 override val parent: Option[Group] = None override def toString: String = "/" } case class ScenarioCategory(override val name: String, override val parentGroup: ContainerGroup) extends ContainedGroup with ContainerGroup { override def toString: String = parentGroup.toString + name + "/" } object ScenarioCategory { implicit def canonicalOrdering[A <: ScenarioCategory]: Ordering[A] = Ordering.by(_.name) } case class Feature(override val name: String, override val parentGroup: ContainerGroup) extends ContainedGroup with ScenarioContainer object Feature { private val namePatternWithDescription = "^([^0-9]+)([0-9]+)( - .+)$".r private val namePatternWithoutDescription = "^([^0-9]+)([0-9]+)$".r implicit def canonicalOrdering[A <: Feature]: Ordering[A] = Ordering.by(f => { f.name match { case namePatternWithDescription(name, number, description) => (name, number.toInt, description) case namePatternWithoutDescription(name, number) => (name, number.toInt, "") case _ => (f.name, -1, "") } }) } trait Numbered extends ContainedGroup { def number: Option[Int] override def description = s"${number.map(n => "["+n+"] ").getOrElse("")}$name" override def toString: String = parentGroup.toString + description + "/" } object Numbered { implicit def canonicalOrdering[A <: Numbered]: Ordering[A] = Ordering.by(n => (n.number.getOrElse(Int.MaxValue), n.name)) } case class ScenarioOutline(override val number: Option[Int], override val name: String, override val parentGroup: ScenarioContainer) extends Numbered with ContainedGroup with ContainerGroup trait Item extends ContainedGroup { def scenario: Scenario } case class ScenarioItem(override val scenario: Scenario, override val parentGroup: ScenarioContainer) extends Numbered with Item { override def number: Option[Int] = scenario.number override def name: String = scenario.name } case class ExampleItem(index: Int, exampleName: Option[String], override val scenario: Scenario, override val parentGroup: ScenarioOutline) extends Item { override def description = s"#$index${exampleName.map(n => " ("+n+")").getOrElse("")}" override def name: String = s"#$index${exampleName.map(n => " ("+n+")").getOrElse("")}" } object ExampleItem { implicit def canonicalOrdering[A <: ExampleItem]: Ordering[A] = Ordering.by(_.index) } case class Tag(override val name: String) extends ContainedGroup with ScenarioContainer { override val parentGroup: ContainerGroup = Total } object Tag { implicit def canonicalOrdering[A <: Tag]: Ordering[A] = Ordering.by(_.name) }
opencypher/openCypher
tools/tck-api/src/main/scala/org/opencypher/tools/tck/api/groups/Group.scala
Scala
apache-2.0
5,705
package com.github.spirom.sparkflights.etl import com.github.spirom.sparkflights.fw.RDDLogger import org.apache.log4j.{Level, Logger} import org.apache.spark.sql.SQLContext import org.apache.spark.{SparkConf, SparkContext} object ParquetSubsetLocal { val logger = Logger.getLogger(getClass.getName) logger.setLevel(Level.ALL) def main(args: Array[String]): Unit = { val from = args(0) val to = args(1) logger.info("running locally") val conf = new SparkConf().setAppName("Flights Example").setMaster("local[4]") logger.info(s"SparkFlights: Reading data from $from") logger.info(s"SparkFlights: Writing subset data to $to") val sc = new SparkContext(conf) val rddLogger = new RDDLogger("/logger", sc) //rddLogger.enabled = true rddLogger.log(from) rddLogger.log(to) val sqlContext = new SQLContext(sc) val data = sqlContext.read.parquet(from) data.registerTempTable("flights") val subset = sqlContext.sql("SELECT * FROM flights WHERE origin = 'ORD' AND year = '2013' AND dayofmonth = '2' AND month = '1' ") subset.write.mode("append").parquet(to) } }
spirom/SparkFlightExamples
src/main/scala/com/github/spirom/sparkflights/etl/ParquetSubsetLocal.scala
Scala
mit
1,141
package jigg.nlp.ccg.tagger /* Copyright 2013-2015 Hiroshi Noji Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import jigg.nlp.ccg.lexicon._ //import jigg.nlp.ccg.util.Indexer import jigg.ml.{LogLinearClassifier, OnlineLogLinearTrainer, Example, FeatureIndexer} import scala.collection.mutable.{ArrayBuffer, HashMap} import scala.util.Random class MaxEntMultiTagger( val indexer: FeatureIndexer[LF], val extractors: FeatureExtractors, val classifier: LogLinearClassifier[Int], val dict: Dictionary) { val reusableFeatureIdxs = new ArrayBuffer[Int] trait Instance { def items:Array[Example[Int]] def goldLabel:Int = 0 } case class TestInstance(override val items:Array[Example[Int]]) extends Instance def getTestInstance(sentence:TaggedSentence, i:Int): TestInstance = { val candidateLabels = dict.getCategoryCandidates(sentence.base(i), sentence.pos(i)) map { _.id } val unlabeled = extractors.extractUnlabeledFeatures(sentence, i).toArray unlabeledToTestInstance(unlabeled, candidateLabels) } def unlabeledToTestInstance(features:Array[UF], candidateLabels:Array[Int]) = TestInstance(getItems(features, candidateLabels, { f => indexer.get(f) })) def getItems(features:Array[UF], candidateLabels:Array[Int], f2index:(LF => Int)): Array[Example[Int]] = candidateLabels map { label => //val indexes = new Array[Int](features.size) reusableFeatureIdxs.clear var i = 0 while (i < features.size) { val f = f2index(features(i).assignLabel(label)) if (f >= 0) reusableFeatureIdxs += f // discard -1 = unknown features i += 1 } Example(reusableFeatureIdxs.toArray, label) } def candSeq(sentence:TaggedSentence, beta:Double, maxK: Int): Array[Seq[Category]] = (0 until sentence.size).map { i => val instance = getTestInstance(sentence, i) val dist = classifier.labelProbs(instance.items) val (max, argmax) = dist.zipWithIndex.foldLeft((0.0, 0)) { case ((max, argmax), (p,i)) => if (p > max) (p, i) else (max, argmax) } val threshold = max * beta val numTake = if (maxK == -1) dist.size else maxK instance.items.zip(dist).filter { case (e, p) => p >= threshold }.take(numTake).map { case (e, _) => dict.getCategory(e.label) }.toSeq }.toArray } class MaxEntMultiTaggerTrainer( indexer: FeatureIndexer[LF], extractors: FeatureExtractors, override val classifier: OnlineLogLinearTrainer[Int], dict: Dictionary) extends MaxEntMultiTagger(indexer, extractors, classifier, dict) { case class TrainingInstance(override val items:Array[Example[Int]], override val goldLabel:Int) extends Instance def trainWithCache(sentences:Seq[GoldSuperTaggedSentence], numIters:Int) = { println("feature extraction start...") val cachedInstances:Seq[Option[Instance]] = sentences.zipWithIndex.flatMap { case (sentence, j) => if (j % 100 == 0) print(j + "\\t/" + sentences.size + " done \\r") (0 until sentence.size) map { i => getTrainingInstance(sentence, i, sentence.cat(i).id) } } println("\\ndone.") val numEffectiveInstances = cachedInstances.filter(_ != None).size println("# all training instances: " + numEffectiveInstances + "; " + (cachedInstances.size - numEffectiveInstances) + " instances were discarded by look-up errors of candidate categories.") println("# features: " + indexer.size) println("# average of candidate labels: " + (cachedInstances.foldLeft(0) { case (sum, o) => sum + o.map { _.items.size }.getOrElse(0) } ).toDouble / numEffectiveInstances.toDouble ) // import scala.collection.immutable.TreeMap // var labelNum2Count = new TreeMap[Int,Int] // cachedInstances.foreach { _.foreach { _.items.size match { case k => labelNum2Count += k -> (labelNum2Count.getOrElse(k, 0) + 1) } } } // println(labelNum2Count) (0 until numIters).foreach { j => val shuffledInstances = Random.shuffle(cachedInstances) var correct = 0 shuffledInstances.foreach { _ foreach { e => if (trainInstance(e)) correct += 1 } } println("accuracy (" + j + "): " + (correct.toDouble / numEffectiveInstances.toDouble)) } } def trainInstance(instance:Instance):Boolean = { val pred = classifier.predict(instance.items)._1 classifier.update(instance.items, instance.goldLabel) pred == instance.goldLabel } def getTrainingInstance(sentence:TaggedSentence, i:Int, goldLabel:Int): Option[TrainingInstance] = { val candidateLabels = dict.getCategoryCandidates(sentence.base(i), sentence.pos(i)) map { _.id } if (candidateLabels.isEmpty || !candidateLabels.contains(goldLabel)) None else { val unlabeled = extractors.extractUnlabeledFeatures(sentence, i).toArray Some(unlabeledToTrainingInstance(unlabeled, candidateLabels, goldLabel)) } } def unlabeledToTrainingInstance(features:Array[UF], candidateLabels:Array[Int], goldLabel:Int) = TrainingInstance(getItems(features, candidateLabels, { f => indexer.getIndex(f) }), goldLabel) }
sakabar/jigg
src/main/scala/jigg/nlp/ccg/tagger/MaxentMultiTagger.scala
Scala
apache-2.0
5,555
/* * Copyright 2013 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ object Developers { lazy val members = Map( "andreak" -> "Andreas Joseph Krogh", "bwmcadams" -> "Brendan W. McAdams", "davewhittaker" -> "Dave Whittaker", "davidB" -> "David Bernard", "dcbriccetti" -> "Dave Briccetti", "dchenbecker" -> "Derek Chen-Becker", "fmpwizard" -> "Diego Medina", "dpp" -> "David Pollak", "Dridus" -> "Ross Mellgren", "dlouwers" -> "Dirk Louwers", "eltimn" -> "Tim Nelson", "fbettag" -> "Franz Bettag", "harryh" -> "Harry Heymann", "hoffrocket" -> "Jon Hoffman", "indrajitr" -> "Indrajit Raychaudhuri", "jeppenejsum" -> "Jeppe Nejsum Madsen", "jgoday" -> "Javier Goday", "jonifreeman" -> "Joni Freeman", "jorgeortiz85" -> "Jorge Ortiz", "lkuczera" -> "Łukasz Kuczera", "mads379" -> "Mads Hartmann Jensen", "mariusdanciu" -> "Marius Danciu", "max-l" -> "Maxime Lévesque", "nafg" -> "Naftoli Gugenheim", "pr1001" -> "Peter Robinett", "rusho" -> "Ján Raška", "timperrett" -> "Timothy Perrett", "tjweir" -> "Tyler Weir", "tuhlmann" -> "Torsten Uhlmann", "vdichev" -> "Vassil Dichev", "chenkelmann" -> "Christoph Henkelmann" ) def toXml = <developers> {members map { m => <developer> <id>{m._1}</id> <name>{m._2}</name> <url>http://github.com/{m._1}</url> </developer> }} </developers> }
lzpfmh/framework-2
project/Developers.scala
Scala
apache-2.0
2,021
package controllers.admin import jp.t2v.lab.play2.auth.test.Helpers.AuthFakeRequest import model.form.data._ import model.form.{BlockPageForm, ComputerForm, SSHOrderForm, SelectComputersForm} import model.json.{ComputerJson, LoginJson} import play.api.libs.json.Json import play.api.test.FakeRequest import services.state /** * Computer specifications on successful operations * @see controllers.admin.ComputerControllerSpec for mocked dependencies and other methods used here * @author Camilo Sampedro <[email protected]> */ class ComputerControllerFailedSpec extends ComputerControllerSpec { val computerService = mockComputerService(state.Failed) /** * Controller to be tested, with the dependencies */ lazy val controller = new ComputerController(computerService, roomService, messagesApi)(userService, executionContext, environment) "Computer Controller on failed operations" should { "return Failed <400> status on receiving an edited computer" in { import computer._ val result = controller.edit.apply { FakeRequest() .withJsonBody(Json.parse( s""" |{ | "ip":"$ip", | "description":"${description.getOrElse("")}", | "SSHUser":"$SSHUser", | "name":"${name.getOrElse("")}", | "SSHPassword":"$SSHPassword", | "roomID":${roomID.getOrElse(0)} |} """.stripMargin)) .withLoggedIn(controller)(loggedInUser) } assertFutureResultStatus(result, 400) } "return \\"Could not add that computer\\" on receiving a new computer" in { import computer._ val computerJson = ComputerJson(ip, name, SSHUser, SSHPassword, description, roomID) val result = controller.add.apply { FakeRequest() .withJsonBody(Json.toJson(computerJson)) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not add that computer") } "return Failed <400> status on deleting a computer" in { val result = controller.delete(computer.ip).apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(LoginJson("admin", "adminaton")) } assertFutureResultStatus(result, 400) } "return \\"Could not deleteLaboratory that computer\\" on receiving an deleting computer" in { val result = controller.delete(computer.ip).apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(LoginJson("admin", "adminaton")) } assertBodyJsonMessage(result, "Could not deleteLaboratory that computer") } "return Failed <400> status on blocking a page on a single computer" in { val result = controller.blockPage.apply { FakeRequest() .withJsonBody(blockPageJson) .withLoggedIn(controller)(loggedInUser) } assertFutureResultStatus(result, 400) } "return \\"Could not block that page\\" on blocking a page on a single computer" in { val result = controller.blockPage.apply { FakeRequest() .withJsonBody(blockPageJson) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not block that page", emptyExtras = false) } "return Failed <400> status on shutting down a computer" in { val result = controller.shutdown.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertFutureResultStatus(result, 400) } "return \\"Could not shutdown that computer\\" on shutting down a computer" in { val result = controller.shutdown.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not shutdown that computer") } "return Failed <400> status on upgrading a computer" in { val result = controller.upgrade.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertFutureResultStatus(result, 400) } "return \\"Could not upgrade that computer\\" on upgrading a computer" in { val result = controller.upgrade.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not upgrade that computer", emptyExtras = true) } "return Failed <400> status on unfreezing a computer" in { val result = controller.unfreeze.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertFutureResultStatus(result, 400) } "return \\"Could not unfreeze that computer\\" on unfreezing a computer" in { val result = controller.unfreeze.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not unfreeze that computer", emptyExtras = true) } "return Failed <400> status on sending a command to a computer" in { val sshOrderData = SSHOrderFormData(superUser = false, command) val sshOrderForm = SSHOrderForm.form.fill(sshOrderData) val result = controller.sendOrder.apply { FakeRequest() .withJsonBody(ipJson) .withLoggedIn(controller)(loggedInUser) .withFormUrlEncodedBody(sshOrderForm.data.toSeq: _*) } assertFutureResultStatus(result, 400) } "return \\"Could not send that command to that computer\\" on sending a command to a computer" in { val sshOrderData = SSHOrderFormData(superUser = false, command) val sshOrderForm = SSHOrderForm.form.fill(sshOrderData) val result = controller.sendOrder.apply { FakeRequest() .withJsonBody(Json.parse( s""" |{ | "ip": "${computer.ip}", | "ssh-order": { | "superUser": false, | "interrupt": false, | "command": ${Json.toJson(command)} | } |} """.stripMargin)) .withLoggedIn(controller)(loggedInUser) } assertBodyJsonMessage(result, "Could not send that command to that computer", emptyExtras = false) } } }
ProjectAton/AtonLab
test/controllers/admin/ComputerControllerFailedSpec.scala
Scala
gpl-3.0
6,470
/** * The MIT License (MIT) * * Copyright (c) 2018 Israel Freitas([email protected]) * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ package ifreitas.scalaaiml.elements case class Srai(expression: TemplateExpression*) extends TemplateExpression { def toXml = <srai>{ expression.toXml }</srai> }
ifreitas/AimlToXml
src/main/scala/ifreitas/scalaaiml/elements/Srai.scala
Scala
mit
1,348
package org.jetbrains.plugins.scala.lang.resolve2 /** * Pavel.Fatin, 02.02.2010 */ class PredefLiteralTest extends ResolveTestBase { override def folderPath: String = { super.folderPath + "predef/literal/" } //TODO answer? // def testBoolean = doTest //TODO answer? // def testByte = doTest //TODO answer? // def testChar = doTest //TODO answer? // def testDouble = doTest //TODO answer? // def testFloat = doTest //TODO answer? // def testInt = doTest //TODO answer? // def testLong = doTest //TODO answer? def testPrimitive = doTest //TODO answer? // def testTheNull = doTest //TODO answer? // def testShort = doTest //TODO answer? // def testString = doTest //TODO answer? // def testSymbol = doTest }
triggerNZ/intellij-scala
test/org/jetbrains/plugins/scala/lang/resolve2/PredefLiteralTest.scala
Scala
apache-2.0
754
package regolic.asts.theories.int import regolic.asts.core.Trees._ import regolic.asts.fol.Trees.Constant import regolic.asts.fol.{Trees => FolT} object Trees { object IntSort { def apply() = Sort("Int", List()) def unapply(sort: Sort): Boolean = sort match { case Sort("Int", List()) => true case _ => false } } object EqualsSymbol { def apply(): PredicateSymbol = FolT.EqualsSymbol(IntSort()) def unapply(s: PredicateSymbol): Boolean = s match { case FolT.EqualsSymbol(IntSort()) => true case _ => false } } object Equals { def apply(t1: Term, t2: Term): PredicateApplication = PredicateApplication(EqualsSymbol(), List(t1, t2)) def unapply(pApply: PredicateApplication): Option[(Term, Term)] = pApply match { case PredicateApplication(EqualsSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object LessThanSymbol { def apply() = PredicateSymbol("<", List(IntSort(), IntSort())) def unapply(symb: PredicateSymbol): Boolean = symb match { case PredicateSymbol("<", List(IntSort(), IntSort())) => true case _ => false } } object LessThan { def apply(t1: Term, t2: Term) = PredicateApplication(LessThanSymbol(), List(t1, t2)) def unapply(appli: PredicateApplication): Option[(Term, Term)] = appli match { case PredicateApplication(LessThanSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object LessEqualSymbol { def apply() = PredicateSymbol("<=", List(IntSort(), IntSort())) def unapply(symb: PredicateSymbol): Boolean = symb match { case PredicateSymbol("<=", List(IntSort(), IntSort())) => true case _ => false } } object LessEqual { def apply(t1: Term, t2: Term) = PredicateApplication(LessEqualSymbol(), List(t1, t2)) def unapply(appli: PredicateApplication): Option[(Term, Term)] = appli match { case PredicateApplication(LessEqualSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object GreaterThanSymbol { def apply() = PredicateSymbol(">", List(IntSort(), IntSort())) def unapply(symb: PredicateSymbol): Boolean = symb match { case PredicateSymbol(">", List(IntSort(), IntSort())) => true case _ => false } } object GreaterThan { def apply(t1: Term, t2: Term) = PredicateApplication(GreaterThanSymbol(), List(t1, t2)) def unapply(appli: PredicateApplication): Option[(Term, Term)] = appli match { case PredicateApplication(GreaterThanSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object GreaterEqualSymbol { def apply() = PredicateSymbol(">=", List(IntSort(), IntSort())) def unapply(symb: PredicateSymbol): Boolean = symb match { case PredicateSymbol(">=", List(IntSort(), IntSort())) => true case _ => false } } object GreaterEqual { def apply(t1: Term, t2: Term) = PredicateApplication(GreaterEqualSymbol(), List(t1, t2)) def unapply(appli: PredicateApplication): Option[(Term, Term)] = appli match { case PredicateApplication(GreaterEqualSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object Var { def apply(name: String): Variable = Variable(name, IntSort()) def unapply(v: Variable): Option[String] = v match { case Variable(name, IntSort()) => Some(name) case _ => None } } object Num { def apply(n: BigInt) = Constant(n.toString, IntSort()) def unapply(appli: FunctionApplication): Option[BigInt] = appli match { case Constant(n, IntSort()) => try { Some(BigInt(n)) } catch { case (_: Throwable) => None } case _ => None } } object AddSymbol { def apply(n: Int) = FunctionSymbol("+", (1 to n).map(_ => IntSort()).toList, IntSort()) def unapply(symb: FunctionSymbol): Option[Int] = symb match { case FunctionSymbol("+", argsSort, IntSort()) if argsSort.forall(s => s == IntSort()) => Some(argsSort.size) case _ => None } } object Add { def apply(ts: List[Term]) = FunctionApplication(AddSymbol(ts.size), ts) def unapply(appli: FunctionApplication): Option[List[Term]] = appli match { case FunctionApplication(AddSymbol(_), ts) => Some(ts) case _ => None } } object SubSymbol { def apply() = FunctionSymbol("-", List(IntSort(), IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("-", List(IntSort(), IntSort()), IntSort()) => true case _ => false } } object Sub { def apply(t1: Term, t2: Term) = FunctionApplication(SubSymbol(), List(t1, t2)) def unapply(appli: FunctionApplication): Option[(Term, Term)] = appli match { case FunctionApplication(SubSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object MulSymbol { def apply(n: Int) = FunctionSymbol("*", (1 to n).map(_ => IntSort()).toList, IntSort()) def unapply(symb: FunctionSymbol): Option[Int] = symb match { case FunctionSymbol("*", argsSort, IntSort()) if argsSort.forall(s => s == IntSort()) => Some(argsSort.size) case _ => None } } object Mul { def apply(ts: List[Term]) = FunctionApplication(MulSymbol(ts.size), ts) def unapply(appli: FunctionApplication): Option[List[Term]] = appli match { case FunctionApplication(MulSymbol(_), ts) => Some(ts) case _ => None } } object MulConstSymbol { def apply(n: BigInt) = FunctionSymbol("*" + n.toString, List(IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Option[BigInt] = symb match { case FunctionSymbol(name, List(IntSort()), IntSort()) => { val str: Seq[Char] = name str match { case Seq('*', n @ _*) => Some(BigInt(n.toString)) case _ => None } } case _ => None } } object MulConst { def apply(n: BigInt, t: Term) = FunctionApplication(MulConstSymbol(n), List(t)) def unapply(appli: FunctionApplication): Option[(BigInt, Term)] = appli match { case FunctionApplication(MulConstSymbol(n), List(t)) => Some((n, t)) case _ => None } } object DivSymbol { def apply() = FunctionSymbol("/", List(IntSort(), IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("/", List(IntSort(), IntSort()), IntSort()) => true case _ => false } } object Div { def apply(t1: Term, t2: Term) = FunctionApplication(DivSymbol(), List(t1, t2)) def unapply(appli: FunctionApplication): Option[(Term, Term)] = appli match { case FunctionApplication(DivSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object ModSymbol { def apply() = FunctionSymbol("%", List(IntSort(), IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("%", List(IntSort(), IntSort()), IntSort()) => true case _ => false } } object Mod { def apply(t1: Term, t2: Term) = FunctionApplication(ModSymbol(), List(t1, t2)) def unapply(appli: FunctionApplication): Option[(Term, Term)] = appli match { case FunctionApplication(ModSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object NegSymbol { def apply() = FunctionSymbol("-", List(IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("-", List(IntSort()), IntSort()) => true case _ => false } } object Neg { def apply(t: Term) = FunctionApplication(NegSymbol(), List(t)) def unapply(appli: FunctionApplication): Option[Term] = appli match { case FunctionApplication(NegSymbol(), List(t)) => Some(t) case _ => None } } object PowSymbol { def apply() = FunctionSymbol("^", List(IntSort(), IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("^", List(IntSort(), IntSort()), IntSort()) => true case _ => false } } object Pow { def apply(t1: Term, t2: Term) = FunctionApplication(PowSymbol(), List(t1, t2)) def unapply(appli: FunctionApplication): Option[(Term, Term)] = appli match { case FunctionApplication(PowSymbol(), List(t1, t2)) => Some((t1, t2)) case _ => None } } object AbsSymbol { def apply() = FunctionSymbol("abs", List(IntSort()), IntSort()) def unapply(symb: FunctionSymbol): Boolean = symb match { case FunctionSymbol("abs", List(IntSort()), IntSort()) => true case _ => false } } object Abs { def apply(t: Term) = FunctionApplication(AbsSymbol(), List(t)) def unapply(appli: FunctionApplication): Option[Term] = appli match { case FunctionApplication(AbsSymbol(), List(t)) => Some(t) case _ => None } } object MaxSymbol { def apply(n: Int) = FunctionSymbol("max", (1 to n).map(_ => IntSort()).toList, IntSort()) def unapply(symb: FunctionSymbol): Option[Int] = symb match { case FunctionSymbol("max", argsSort, IntSort()) if argsSort.forall(s => s == IntSort()) => Some(argsSort.size) case _ => None } } object Max { def apply(ts: List[Term]) = FunctionApplication(MaxSymbol(ts.size), ts) def unapply(appli: FunctionApplication): Option[List[Term]] = appli match { case FunctionApplication(MaxSymbol(_), ts) => Some(ts) case _ => None } } object MinSymbol { def apply(n: Int) = FunctionSymbol("min", (1 to n).map(_ => IntSort()).toList, IntSort()) def unapply(symb: FunctionSymbol): Option[Int] = symb match { case FunctionSymbol("min", argsSort, IntSort()) if argsSort.forall(s => s == IntSort()) => Some(argsSort.size) case _ => None } } object Min { def apply(ts: List[Term]) = FunctionApplication(MinSymbol(ts.size), ts) def unapply(appli: FunctionApplication): Option[List[Term]] = appli match { case FunctionApplication(MinSymbol(_), ts) => Some(ts) case _ => None } } }
regb/scabolic
src/main/scala/regolic/asts/theories/int/Trees.scala
Scala
mit
10,049
package pieces import chessagents.Translation /** * Created by Aleksandra on 19.05.2016. */ class Pawn(val color : String) extends Piece{ var possibleMovements = Array.ofDim[Translation](3,2) if (color.equals(WHITE)) { possibleMovements = Array( Array( new Translation(-1, 0, 2), //first move new Translation(-2, 0, 2) ), Array( new Translation(-1, 1, 1) //capture // new Translation(-1, 1, 1) ), Array(new Translation(-1, -1, 1) //capture //new Translation(-1, -1, 1) ) ) } else { possibleMovements = Array( Array( new Translation(1, 0, 2), //first move new Translation(2, 0, 2) ), Array( new Translation(1, -1, 1) //capture // new Translation(1, -1, 1) ), Array(new Translation(1, 1, 1) //capture //new Translation(1, 1, 1) ) ) } var gameState = Array.ofDim[Char](8,8) override def getPossibleMovements : Array[Array[Translation]] = { possibleMovements //return } override def toString():String={ if(color.equals(WHITE)) { return "P"; }else { return "p"; } } override def getName():String={ "Pawn" } override def copy(): Piece = { var p: Piece = new Pawn(color) p.positionX=this.positionX p.positionY=this.positionY return p; } }
m-kostrzewa/ChessAgents
src/pieces/Pawn.scala
Scala
mit
1,435
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.nn.ops import com.intel.analytics.bigdl.dllib.nn.abstractnn.Activity import com.intel.analytics.bigdl.dllib.tensor.{BooleanType, Tensor} import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric import com.intel.analytics.bigdl.dllib.utils.Table import scala.reflect.ClassTag class LogicalAnd[T: ClassTag]() (implicit ev: TensorNumeric[T]) extends Operation[Table, Tensor[Boolean], T] { output = Activity.allocate[Tensor[Boolean], Boolean]() override def updateOutput(input: Table): Tensor[Boolean] = { input[Tensor[_]](1).getType() match { case BooleanType => output.resizeAs(input(1)).copy(input(1)) output .toTensor[Boolean] .map(input(2).asInstanceOf[Tensor[Boolean]], (a, b) => a && b) case _ => throw new RuntimeException("LogicalAnd only support boolean tensor") } output } } object LogicalAnd { def apply[T: ClassTag]()(implicit ev: TensorNumeric[T]): Operation[Activity, Activity, T] = ModuleToOperation[T](new LogicalAnd()) }
intel-analytics/BigDL
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/ops/LogicalAnd.scala
Scala
apache-2.0
1,677
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.plan.util import org.apache.flink.table.plan.`trait`.TraitUtil import org.apache.flink.table.plan.metadata.FlinkRelMetadataQuery import org.apache.calcite.rel.RelFieldCollation.Direction import org.apache.calcite.rel.metadata.RelMetadataQuery import org.apache.calcite.rel.{RelCollation, RelNode} import org.apache.calcite.sql.validate.SqlMonotonicity import org.apache.calcite.util.ImmutableBitSet import scala.collection.JavaConversions._ /** * Base class of Strategy to choose different rank process function. */ sealed trait RankProcessStrategy case object AppendFastStrategy extends RankProcessStrategy case object RetractStrategy extends RankProcessStrategy case class UpdateFastStrategy(primaryKeys: Array[Int]) extends RankProcessStrategy { override def toString: String = "UpdateFastStrategy" + primaryKeys.mkString("[", ",", "]") } object RankProcessStrategy { /** * Gets [[RankProcessStrategy]] based on input, partitionKey and orderKey. */ def analyzeRankProcessStrategy( input: RelNode, partitionKey: ImmutableBitSet, orderKey: RelCollation, mq: RelMetadataQuery): RankProcessStrategy = { val fieldCollations = orderKey.getFieldCollations val isUpdateStream = !UpdatingPlanChecker.isAppendOnly(input) if (isUpdateStream) { val inputIsAccRetract = TraitUtil.isAccRetract(input) val uniqueKeys = mq.getUniqueKeys(input) if (inputIsAccRetract || uniqueKeys == null || uniqueKeys.isEmpty // unique key should contains partition key || !uniqueKeys.exists(k => k.contains(partitionKey))) { // input is AccRetract or extract the unique keys failed, // and we fall back to using retract rank RetractStrategy } else { val fmq = FlinkRelMetadataQuery.reuseOrCreate(mq) val monotonicity = fmq.getRelModifiedMonotonicity(input) val isMonotonic = if (monotonicity == null) { false } else { if (fieldCollations.isEmpty) { false } else { fieldCollations.forall { collation => val fieldMonotonicity = monotonicity.fieldMonotonicities(collation.getFieldIndex) val direction = collation.direction if ((fieldMonotonicity == SqlMonotonicity.DECREASING || fieldMonotonicity == SqlMonotonicity.STRICTLY_DECREASING) && direction == Direction.ASCENDING) { // sort field is ascending and its monotonicity is decreasing true } else if ((fieldMonotonicity == SqlMonotonicity.INCREASING || fieldMonotonicity == SqlMonotonicity.STRICTLY_INCREASING) && direction == Direction.DESCENDING) { // sort field is descending and its monotonicity is increasing true } else if (fieldMonotonicity == SqlMonotonicity.CONSTANT) { // sort key is a grouping key of upstream agg, it is monotonic true } else { false } } } } if (isMonotonic) { //FIXME choose a set of primary key UpdateFastStrategy(uniqueKeys.iterator().next().toArray) } else { RetractStrategy } } } else { AppendFastStrategy } } }
shaoxuan-wang/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/util/RankProcessStrategy.scala
Scala
apache-2.0
4,200
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import java.io.File import java.net.{URL, URLClassLoader} import java.nio.charset.StandardCharsets import java.sql.Timestamp import java.util.concurrent.TimeUnit import scala.collection.mutable.HashMap import scala.language.implicitConversions import org.apache.hadoop.conf.Configuration import org.apache.hadoop.hive.common.`type`.HiveDecimal import org.apache.hadoop.hive.conf.HiveConf import org.apache.hadoop.hive.conf.HiveConf.ConfVars import org.apache.hadoop.hive.serde2.io.{DateWritable, TimestampWritable} import org.apache.hadoop.util.VersionInfo import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.internal.Logging import org.apache.spark.sql._ import org.apache.spark.sql.hive.client._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.internal.SQLConf._ import org.apache.spark.sql.internal.StaticSQLConf.{CATALOG_IMPLEMENTATION, WAREHOUSE_PATH} import org.apache.spark.sql.types._ import org.apache.spark.util.Utils private[spark] object HiveUtils extends Logging { def withHiveExternalCatalog(sc: SparkContext): SparkContext = { sc.conf.set(CATALOG_IMPLEMENTATION.key, "hive") sc } /** The version of hive used internally by Spark SQL. */ val hiveExecutionVersion: String = "1.2.1" val HIVE_METASTORE_VERSION = SQLConfigBuilder("spark.sql.hive.metastore.version") .doc("Version of the Hive metastore. Available options are " + s"<code>0.12.0</code> through <code>$hiveExecutionVersion</code>.") .stringConf .createWithDefault(hiveExecutionVersion) val HIVE_EXECUTION_VERSION = SQLConfigBuilder("spark.sql.hive.version") .doc("Version of Hive used internally by Spark SQL.") .stringConf .createWithDefault(hiveExecutionVersion) val HIVE_METASTORE_JARS = SQLConfigBuilder("spark.sql.hive.metastore.jars") .doc(s""" | Location of the jars that should be used to instantiate the HiveMetastoreClient. | This property can be one of three options: " | 1. "builtin" | Use Hive ${hiveExecutionVersion}, which is bundled with the Spark assembly when | <code>-Phive</code> is enabled. When this option is chosen, | <code>spark.sql.hive.metastore.version</code> must be either | <code>${hiveExecutionVersion}</code> or not defined. | 2. "maven" | Use Hive jars of specified version downloaded from Maven repositories. | 3. A classpath in the standard format for both Hive and Hadoop. """.stripMargin) .stringConf .createWithDefault("builtin") val CONVERT_METASTORE_PARQUET = SQLConfigBuilder("spark.sql.hive.convertMetastoreParquet") .doc("When set to false, Spark SQL will use the Hive SerDe for parquet tables instead of " + "the built in support.") .booleanConf .createWithDefault(true) val CONVERT_METASTORE_PARQUET_WITH_SCHEMA_MERGING = SQLConfigBuilder("spark.sql.hive.convertMetastoreParquet.mergeSchema") .doc("When true, also tries to merge possibly different but compatible Parquet schemas in " + "different Parquet data files. This configuration is only effective " + "when \\"spark.sql.hive.convertMetastoreParquet\\" is true.") .booleanConf .createWithDefault(false) val CONVERT_METASTORE_ORC = SQLConfigBuilder("spark.sql.hive.convertMetastoreOrc") .internal() .doc("When set to false, Spark SQL will use the Hive SerDe for ORC tables instead of " + "the built in support.") .booleanConf .createWithDefault(false) val HIVE_METASTORE_SHARED_PREFIXES = SQLConfigBuilder("spark.sql.hive.metastore.sharedPrefixes") .doc("A comma separated list of class prefixes that should be loaded using the classloader " + "that is shared between Spark SQL and a specific version of Hive. An example of classes " + "that should be shared is JDBC drivers that are needed to talk to the metastore. Other " + "classes that need to be shared are those that interact with classes that are already " + "shared. For example, custom appenders that are used by log4j.") .stringConf .toSequence .createWithDefault(jdbcPrefixes) private def jdbcPrefixes = Seq( "com.mysql.jdbc", "org.postgresql", "com.microsoft.sqlserver", "oracle.jdbc") val HIVE_METASTORE_BARRIER_PREFIXES = SQLConfigBuilder("spark.sql.hive.metastore.barrierPrefixes") .doc("A comma separated list of class prefixes that should explicitly be reloaded for each " + "version of Hive that Spark SQL is communicating with. For example, Hive UDFs that are " + "declared in a prefix that typically would be shared (i.e. <code>org.apache.spark.*</code>).") .stringConf .toSequence .createWithDefault(Nil) val HIVE_THRIFT_SERVER_ASYNC = SQLConfigBuilder("spark.sql.hive.thriftServer.async") .doc("When set to true, Hive Thrift server executes SQL queries in an asynchronous way.") .booleanConf .createWithDefault(true) /** * The version of the hive client that will be used to communicate with the metastore. Note that * this does not necessarily need to be the same version of Hive that is used internally by * Spark SQL for execution. */ private def hiveMetastoreVersion(conf: SQLConf): String = { conf.getConf(HIVE_METASTORE_VERSION) } /** * The location of the jars that should be used to instantiate the HiveMetastoreClient. This * property can be one of three options: * - a classpath in the standard format for both hive and hadoop. * - builtin - attempt to discover the jars that were used to load Spark SQL and use those. This * option is only valid when using the execution version of Hive. * - maven - download the correct version of hive on demand from maven. */ private def hiveMetastoreJars(conf: SQLConf): String = { conf.getConf(HIVE_METASTORE_JARS) } /** * A comma separated list of class prefixes that should be loaded using the classloader that * is shared between Spark SQL and a specific version of Hive. An example of classes that should * be shared is JDBC drivers that are needed to talk to the metastore. Other classes that need * to be shared are those that interact with classes that are already shared. For example, * custom appenders that are used by log4j. */ private def hiveMetastoreSharedPrefixes(conf: SQLConf): Seq[String] = { conf.getConf(HIVE_METASTORE_SHARED_PREFIXES).filterNot(_ == "") } /** * A comma separated list of class prefixes that should explicitly be reloaded for each version * of Hive that Spark SQL is communicating with. For example, Hive UDFs that are declared in a * prefix that typically would be shared (i.e. org.apache.spark.*) */ private def hiveMetastoreBarrierPrefixes(conf: SQLConf): Seq[String] = { conf.getConf(HIVE_METASTORE_BARRIER_PREFIXES).filterNot(_ == "") } /** * Configurations needed to create a [[HiveClient]]. */ private[hive] def hiveClientConfigurations(hadoopConf: Configuration): Map[String, String] = { // Hive 0.14.0 introduces timeout operations in HiveConf, and changes default values of a bunch // of time `ConfVar`s by adding time suffixes (`s`, `ms`, and `d` etc.). This breaks backwards- // compatibility when users are trying to connecting to a Hive metastore of lower version, // because these options are expected to be integral values in lower versions of Hive. // // Here we enumerate all time `ConfVar`s and convert their values to numeric strings according // to their output time units. Seq( ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY -> TimeUnit.SECONDS, ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT -> TimeUnit.SECONDS, ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME -> TimeUnit.SECONDS, ConfVars.HMSHANDLERINTERVAL -> TimeUnit.MILLISECONDS, ConfVars.METASTORE_EVENT_DB_LISTENER_TTL -> TimeUnit.SECONDS, ConfVars.METASTORE_EVENT_CLEAN_FREQ -> TimeUnit.SECONDS, ConfVars.METASTORE_EVENT_EXPIRY_DURATION -> TimeUnit.SECONDS, ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL -> TimeUnit.SECONDS, ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT -> TimeUnit.MILLISECONDS, ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT -> TimeUnit.MILLISECONDS, ConfVars.HIVES_AUTO_PROGRESS_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL -> TimeUnit.MILLISECONDS, ConfVars.HIVE_STATS_JDBC_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_STATS_RETRIES_WAIT -> TimeUnit.MILLISECONDS, ConfVars.HIVE_LOCK_SLEEP_BETWEEN_RETRIES -> TimeUnit.SECONDS, ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS, ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME -> TimeUnit.MILLISECONDS, ConfVars.HIVE_TXN_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_COMPACTOR_WORKER_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_COMPACTOR_CHECK_INTERVAL -> TimeUnit.SECONDS, ConfVars.HIVE_COMPACTOR_CLEANER_RUN_INTERVAL -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME -> TimeUnit.SECONDS, ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_SESSION_CHECK_INTERVAL -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_IDLE_SESSION_TIMEOUT -> TimeUnit.MILLISECONDS, ConfVars.HIVE_SERVER2_IDLE_OPERATION_TIMEOUT -> TimeUnit.MILLISECONDS, ConfVars.SERVER_READ_SOCKET_TIMEOUT -> TimeUnit.SECONDS, ConfVars.HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL -> TimeUnit.MILLISECONDS, ConfVars.SPARK_CLIENT_FUTURE_TIMEOUT -> TimeUnit.SECONDS, ConfVars.SPARK_JOB_MONITOR_TIMEOUT -> TimeUnit.SECONDS, ConfVars.SPARK_RPC_CLIENT_CONNECT_TIMEOUT -> TimeUnit.MILLISECONDS, ConfVars.SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT -> TimeUnit.MILLISECONDS ).map { case (confVar, unit) => confVar.varname -> HiveConf.getTimeVar(hadoopConf, confVar, unit).toString }.toMap } /** * Create a [[HiveClient]] used for execution. * * Currently this must always be Hive 13 as this is the version of Hive that is packaged * with Spark SQL. This copy of the client is used for execution related tasks like * registering temporary functions or ensuring that the ThreadLocal SessionState is * correctly populated. This copy of Hive is *not* used for storing persistent metadata, * and only point to a dummy metastore in a temporary directory. */ protected[hive] def newClientForExecution( conf: SparkConf, hadoopConf: Configuration): HiveClientImpl = { logInfo(s"Initializing execution hive, version $hiveExecutionVersion") val loader = new IsolatedClientLoader( version = IsolatedClientLoader.hiveVersion(hiveExecutionVersion), sparkConf = conf, execJars = Seq(), hadoopConf = hadoopConf, config = newTemporaryConfiguration(useInMemoryDerby = true), isolationOn = false, baseClassLoader = Utils.getContextOrSparkClassLoader) loader.createClient().asInstanceOf[HiveClientImpl] } /** * Create a [[HiveClient]] used to retrieve metadata from the Hive MetaStore. * * The version of the Hive client that is used here must match the metastore that is configured * in the hive-site.xml file. */ protected[hive] def newClientForMetadata( conf: SparkConf, hadoopConf: Configuration): HiveClient = { val configurations = hiveClientConfigurations(hadoopConf) newClientForMetadata(conf, hadoopConf, configurations) } protected[hive] def newClientForMetadata( conf: SparkConf, hadoopConf: Configuration, configurations: Map[String, String]): HiveClient = { val sqlConf = new SQLConf sqlConf.setConf(SQLContext.getSQLProperties(conf)) val hiveMetastoreVersion = HiveUtils.hiveMetastoreVersion(sqlConf) val hiveMetastoreJars = HiveUtils.hiveMetastoreJars(sqlConf) val hiveMetastoreSharedPrefixes = HiveUtils.hiveMetastoreSharedPrefixes(sqlConf) val hiveMetastoreBarrierPrefixes = HiveUtils.hiveMetastoreBarrierPrefixes(sqlConf) val metaVersion = IsolatedClientLoader.hiveVersion(hiveMetastoreVersion) val isolatedLoader = if (hiveMetastoreJars == "builtin") { if (hiveExecutionVersion != hiveMetastoreVersion) { throw new IllegalArgumentException( "Builtin jars can only be used when hive execution version == hive metastore version. " + s"Execution: $hiveExecutionVersion != Metastore: $hiveMetastoreVersion. " + s"Specify a vaild path to the correct hive jars using $HIVE_METASTORE_JARS " + s"or change ${HIVE_METASTORE_VERSION.key} to $hiveExecutionVersion.") } // We recursively find all jars in the class loader chain, // starting from the given classLoader. def allJars(classLoader: ClassLoader): Array[URL] = classLoader match { case null => Array.empty[URL] case urlClassLoader: URLClassLoader => urlClassLoader.getURLs ++ allJars(urlClassLoader.getParent) case other => allJars(other.getParent) } val classLoader = Utils.getContextOrSparkClassLoader val jars = allJars(classLoader) if (jars.length == 0) { throw new IllegalArgumentException( "Unable to locate hive jars to connect to metastore. " + "Please set spark.sql.hive.metastore.jars.") } logInfo( s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using Spark classes.") new IsolatedClientLoader( version = metaVersion, sparkConf = conf, hadoopConf = hadoopConf, execJars = jars.toSeq, config = configurations, isolationOn = true, barrierPrefixes = hiveMetastoreBarrierPrefixes, sharedPrefixes = hiveMetastoreSharedPrefixes) } else if (hiveMetastoreJars == "maven") { // TODO: Support for loading the jars from an already downloaded location. logInfo( s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion using maven.") IsolatedClientLoader.forVersion( hiveMetastoreVersion = hiveMetastoreVersion, hadoopVersion = VersionInfo.getVersion, sparkConf = conf, hadoopConf = hadoopConf, config = configurations, barrierPrefixes = hiveMetastoreBarrierPrefixes, sharedPrefixes = hiveMetastoreSharedPrefixes) } else { // Convert to files and expand any directories. val jars = hiveMetastoreJars .split(File.pathSeparator) .flatMap { case path if new File(path).getName == "*" => val files = new File(path).getParentFile.listFiles() if (files == null) { logWarning(s"Hive jar path '$path' does not exist.") Nil } else { files.filter(_.getName.toLowerCase.endsWith(".jar")) } case path => new File(path) :: Nil } .map(_.toURI.toURL) logInfo( s"Initializing HiveMetastoreConnection version $hiveMetastoreVersion " + s"using ${jars.mkString(":")}") new IsolatedClientLoader( version = metaVersion, sparkConf = conf, hadoopConf = hadoopConf, execJars = jars.toSeq, config = configurations, isolationOn = true, barrierPrefixes = hiveMetastoreBarrierPrefixes, sharedPrefixes = hiveMetastoreSharedPrefixes) } isolatedLoader.createClient() } /** Constructs a configuration for hive, where the metastore is located in a temp directory. */ def newTemporaryConfiguration(useInMemoryDerby: Boolean): Map[String, String] = { val withInMemoryMode = if (useInMemoryDerby) "memory:" else "" val tempDir = Utils.createTempDir() val localMetastore = new File(tempDir, "metastore") val propMap: HashMap[String, String] = HashMap() // We have to mask all properties in hive-site.xml that relates to metastore data source // as we used a local metastore here. HiveConf.ConfVars.values().foreach { confvar => if (confvar.varname.contains("datanucleus") || confvar.varname.contains("jdo") || confvar.varname.contains("hive.metastore.rawstore.impl")) { propMap.put(confvar.varname, confvar.getDefaultExpr()) } } propMap.put(WAREHOUSE_PATH.key, localMetastore.toURI.toString) propMap.put(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, s"jdbc:derby:${withInMemoryMode};databaseName=${localMetastore.getAbsolutePath};create=true") propMap.put("datanucleus.rdbms.datastoreAdapterClassName", "org.datanucleus.store.rdbms.adapter.DerbyAdapter") // SPARK-11783: When "hive.metastore.uris" is set, the metastore connection mode will be // remote (https://cwiki.apache.org/confluence/display/Hive/AdminManual+MetastoreAdmin // mentions that "If hive.metastore.uris is empty local mode is assumed, remote otherwise"). // Remote means that the metastore server is running in its own process. // When the mode is remote, configurations like "javax.jdo.option.ConnectionURL" will not be // used (because they are used by remote metastore server that talks to the database). // Because execution Hive should always connects to an embedded derby metastore. // We have to remove the value of hive.metastore.uris. So, the execution Hive client connects // to the actual embedded derby metastore instead of the remote metastore. // You can search HiveConf.ConfVars.METASTOREURIS in the code of HiveConf (in Hive's repo). // Then, you will find that the local metastore mode is only set to true when // hive.metastore.uris is not set. propMap.put(ConfVars.METASTOREURIS.varname, "") // The execution client will generate garbage events, therefore the listeners that are generated // for the execution clients are useless. In order to not output garbage, we don't generate // these listeners. propMap.put(ConfVars.METASTORE_PRE_EVENT_LISTENERS.varname, "") propMap.put(ConfVars.METASTORE_EVENT_LISTENERS.varname, "") propMap.put(ConfVars.METASTORE_END_FUNCTION_LISTENERS.varname, "") propMap.toMap } protected val primitiveTypes = Seq(StringType, IntegerType, LongType, DoubleType, FloatType, BooleanType, ByteType, ShortType, DateType, TimestampType, BinaryType) protected[sql] def toHiveString(a: (Any, DataType)): String = a match { case (struct: Row, StructType(fields)) => struct.toSeq.zip(fields).map { case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}""" }.mkString("{", ",", "}") case (seq: Seq[_], ArrayType(typ, _)) => seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]") case (map: Map[_, _], MapType(kType, vType, _)) => map.map { case (key, value) => toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType)) }.toSeq.sorted.mkString("{", ",", "}") case (null, _) => "NULL" case (d: Int, DateType) => new DateWritable(d).toString case (t: Timestamp, TimestampType) => new TimestampWritable(t).toString case (bin: Array[Byte], BinaryType) => new String(bin, StandardCharsets.UTF_8) case (decimal: java.math.BigDecimal, DecimalType()) => // Hive strips trailing zeros so use its toString HiveDecimal.create(decimal).toString case (other, tpe) if primitiveTypes contains tpe => other.toString } /** Hive outputs fields of structs slightly differently than top level attributes. */ protected def toHiveStructString(a: (Any, DataType)): String = a match { case (struct: Row, StructType(fields)) => struct.toSeq.zip(fields).map { case (v, t) => s""""${t.name}":${toHiveStructString(v, t.dataType)}""" }.mkString("{", ",", "}") case (seq: Seq[_], ArrayType(typ, _)) => seq.map(v => (v, typ)).map(toHiveStructString).mkString("[", ",", "]") case (map: Map[_, _], MapType(kType, vType, _)) => map.map { case (key, value) => toHiveStructString((key, kType)) + ":" + toHiveStructString((value, vType)) }.toSeq.sorted.mkString("{", ",", "}") case (null, _) => "null" case (s: String, StringType) => "\\"" + s + "\\"" case (decimal, DecimalType()) => decimal.toString case (other, tpe) if primitiveTypes contains tpe => other.toString } }
big-pegasus/spark
sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveUtils.scala
Scala
apache-2.0
21,844
package gettingstarted import fpinscala.errorhandling.{Either, Right, Left} import org.specs2.mutable.Specification class EitherTest extends Specification{ "orElse" should { "return the first Either if its defined otherwise it returns the second Either" in { val x: Int = 5 val success: Either[String, Int] = Right(x) val error: Either[String, Int] = Left("error occurred") val msg = "string value is: " val f: Either[String, String] = Right(msg + x) success.orElse(f) must beEqualTo(success) error.orElse(f) must beEqualTo(f) } } "map" should { "return the Either with f applied to the Right if there is one else Left " in { val x: Int = 5 val success: Either[String, Int] = Right(x) val error: Either[String, Int] = Left("error occurred") val msg = "string value is: " val f: (Int) => String = x => msg + x success.map(f) must beEqualTo(Right(msg + x)) error.map(f) must beEqualTo(error) } } "flatMap" should { "return the Option with f applied to Some if there is one else None " in { val x: Int = 5 val success: Either[String, Int] = Right(x) val error: Either[String, Int] = Left("error occurred") val msg = "string value is: " val f: (Int) => Either[String, String] = x => Right(msg + x) success.flatMap(f) must beEqualTo(Right(msg + x)) error.flatMap(f) must beEqualTo(error) } } "map2" should { "return a new Either[C] of the function f applied to right of Either[A] and right of Either[B]." in { val eitherA: Either[String, Double] = Right(2.0) val eitherB: Either[String, Char] = Right('A') val f: (Double, Char) => String = (a: Double, b: Char) => s"A: $a and B: $b" val errorEither = Left("error occurred.") eitherA.map2(errorEither)(f) must beEqualTo(errorEither) errorEither.map2(eitherB)(f) must beEqualTo(errorEither) eitherA.map2(eitherB)(f) must beEqualTo(Right("A: 2.0 and B: A")) } } "sequence" should { "return a new Either[List[A]] from a List[Either[A]] which is None if any of the list of Eithers is a Left." in { val eitherA: List[Either[String, Int]] = List(Right(1), Right(2)) val error: Left[String] = Left("Error occurred.") val eitherB: List[Either[String, Int]] = List(Right(1), error, Right(2)) Either.sequence(eitherA) must beEqualTo(Right(List(1,2))) Either.sequence(eitherB) must beEqualTo(error) } } "traverse" should { "return a new Either[E, List[B]] from a List[A] which is E if any of the list elements is E after applying f." in { val list: List[String] = List("1", "2") val badList: List[String] = List("z", "2") val emptyList: List[String] = List() Either.traverse[Exception, String, Int](list)(x => Either.Try(() => x.toInt)) must beEqualTo(Right(List(1, 2))) Either.traverse[Exception, String, Int](badList)(x => Either.Try(() => x.toInt)) match { case (Left(e)) => { println("MESSAGE"+e.getMessage) e.getMessage() mustEqual ("""For input string: "z"""") } case _ => failure("Should have thrown an exception.") } Either.traverse[Exception, String, Int](emptyList)(x => Either.Try(() => x.toInt)) must beEqualTo(Right(emptyList)) } } }
coughlac/fpinscala
exercises/src/test/scala/gettingstarted/EitherTest.scala
Scala
mit
3,339
package controllers import scala.concurrent.Future import play.api.mvc._ import play.api.data._ import play.api.data.Forms._ import play.api.Play.current import play.api.i18n.Messages.Implicits._ import play.api.libs.concurrent.Execution.Implicits.defaultContext import views._ import model.PIMAidDBContext._ import model.PIMAidDBContext.driver.api._ object DrugGroupsController extends Controller { val drugGroupForm = Form( mapping( "id" -> optional(longNumber.transform( (id: Long) => DrugGroupID(id), (drugGroupId: DrugGroupID) => drugGroupId.value )), "name" -> nonEmptyText )(DrugGroup.apply)(DrugGroup.unapply) ) def list = Action.async { implicit rs => db.run(DrugGroup.all.result).map { drugGroups => Ok(html.drugGroups.list(drugGroups)) } } def create = Action { Ok(html.drugGroups.create(drugGroupForm)) } def save = Action.async { implicit rs => drugGroupForm.bindFromRequest.fold( formWithErrors => Future.successful(BadRequest(html.drugGroups.create(formWithErrors))), drugGroup => db.run(DrugGroup.insert(drugGroup)).map { id => Redirect(routes.DrugGroupGenericTypesController.list(id)) .flashing("success" -> "The drug group was created successfully.") } ) } def edit(id: DrugGroupID) = Action.async { implicit rs => db.run(DrugGroup.one(id).result).map { case Some(drugGroup) => Ok(html.drugGroups.edit(id, drugGroupForm.fill(drugGroup))) case _ => NotFound } } def update(id: DrugGroupID) = Action.async { implicit rs => drugGroupForm.bindFromRequest.fold( formWithErrors => Future.successful(BadRequest(html.drugGroups.edit(id, formWithErrors))), drugGroup => db.run(DrugGroup.update(drugGroup)).map { _ => Redirect(routes.DrugGroupsController.list()) .flashing("success" -> "The drug group was updated successfully.") } ) } def remove(id: DrugGroupID) = Action.async { implicit rs => db.run(DrugGroup.one(id).result).map { case Some(drugGroup) => Ok(html.drugGroups.remove(drugGroup)) case _ => NotFound } } def delete(id: DrugGroupID) = Action.async { implicit rs => db.run(DrugGroup.delete(id)).map { _ => Redirect(routes.DrugGroupsController.list()) .flashing("success" -> "The drug group was deleted successfully.") } } }
RSSchermer/pim-aid
app/controllers/DrugGroupsController.scala
Scala
mit
2,475
package nl.dekkr.feedr.route import akka.actor._ import nl.dekkr.feedr.domain.UserAggregate import nl.dekkr.feedr.service._ import spray.httpx.Json4sSupport import spray.routing._ import spray.routing.authentication.BasicAuth object UserRoute { case class ChangePasswordRequest(pass: String) } trait UserRoute extends HttpService with Json4sSupport with RequestHandlerCreator with UserAuthenticator { import UserAggregateManager._ val userAggregateManager: ActorRef val userRoute = pathPrefix("user") { pathEndOrSingleSlash { post { entity(as[RegisterUser]) { cmd => serveRegister(cmd) } } } ~ path("password") { post { authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user => entity(as[UserRoute.ChangePasswordRequest]) { cmd => serveUpdate(ChangeUserPassword(user.id, cmd.pass)) } } } } } private def serveRegister(message : AggregateManager.Command): Route = ctx => handleRegister[UserAggregate.User](ctx, userAggregateManager, message) private def serveUpdate(message : AggregateManager.Command): Route = ctx => handleUpdate[UserAggregate.User](ctx, userAggregateManager, message) }
plamola/feedR
src/main/scala/nl/dekkr/feedr/route/UserRoute.scala
Scala
mit
1,292
package eu.devtty.peerinfo import eu.devtty.multiaddr.Multiaddr import eu.devtty.peerid.PeerId import scala.scalajs.js import scala.scalajs.js.annotation.JSImport @js.native @JSImport("peer-info", JSImport.Namespace) object PeerInfo extends js.Object { def create(id: PeerId, callBack: js.Function1[PeerInfo, _]): Unit = js.native def create(callBack: js.Function1[PeerInfo, _]): Unit = js.native } @js.native @JSImport("peer-info", JSImport.Namespace) class PeerInfo(id: PeerId) extends js.Object { val multiaddrs: MultiaddrSet = js.native def connect(ma: Multiaddr): Unit = js.native def disconnect(): Unit = js.native def isConnected: Boolean = js.native } @js.native trait MultiaddrSet extends js.Object { /** * Adds a new multiaddress that peer can be reached at. * @param addr */ def add(addr: Multiaddr): Unit = js.native /** * The addSafe call, in comparison to add, will only add the multiaddr to * multiaddrs if the same multiaddr tries to be added twice. * * This is a simple mechanism to prevent multiaddrs from becoming bloated with * unusable addresses, which happens when we exchange observed multiaddrs with * peers which will not provide a useful multiaddr to be shared to the rest * of the network (e.g. a multiaddr referring to a peer inside a LAN being shared to the outside world). * @param addr */ def addSafe(addr: Multiaddr): Unit = js.native /** * Removes a multiaddress instance addr from peer * @param addr */ def delete(addr: Multiaddr): Unit = js.native /** * Removes the array of multiaddresses existing from peer, and adds the array of multiaddresses fresh. * @param existing * @param fresh */ def replace(existing: Multiaddr, fresh: Multiaddr): Unit = js.native }
magik6k/scalajs-ipfs-api
peerInfo/src/main/scala/eu/devtty/peerinfo/PeerInfo.scala
Scala
mit
1,808
package stormlantern.consul.client.dao trait Indexed[T] { def index: Long def resource: T }
derjust/reactive-consul
client/src/main/scala/stormlantern/consul/client/dao/Indexed.scala
Scala
mit
97
package com.devlaam.simulate.server /* Simulator Server to connect to multiple Lora Gateways Copyright (C) 2015 Ruud Vlaming This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.import scala.util.Random */ //http://doc.akka.io/docs/akka/2.3.12/scala/io.html //http://doc.akka.io/docs/akka/2.3.12/scala/io-udp.html import java.io._ import java.net.InetSocketAddress import java.util.concurrent.TimeUnit import scala.language.postfixOps import scala.util.{ Random, Try} import scala.collection.immutable._ import scala.concurrent.duration._ import scala.concurrent.ExecutionContext.Implicits.global import akka.io.{ IO, Udp , UdpConnected} import akka.actor.{ Actor, ActorRef, Props, ActorSystem, ActorLogging, Cancellable } import akka.event.Logging import akka.pattern.{ ask, pipe } import akka.util.{ Timeout, ByteString, ByteStringBuilder} import play.api.libs.json._ import com.devlaam.coco.JsonLib._ import com.devlaam.coco.JsStack import com.devlaam.simulate.helpers._ object MessageBird { import scalaj.http.Http val http = Http("https://rest.messagebird.com/messages") def send(phone: String) = { val message = s""" { "recipients":[$phone], "originator":"TTN", "body":"Water in de boot!" } """ val response = http.postData(message) .header("Authorization","AccessKey live_aOxPo5XfOXQhpslmqrfNtwbt0") .header("content-type","application/json") .asString if (response.code >= 300) ("SMS sent, response not 20x, but: "+response) else "SMS sent." } } object Relay { import scalaj.http.Http val http = Http("http://ruud.things.nonred.nl:3000") def send(json: String) = { val response = http.postData(json).header("content-type","application/json").asString if (response.code >= 300) ("Post done, response not 20x, but: "+response) else "Message relayed."} } case class ReadPHYPayload(data64: String, nwkSKey: Array[Byte], appSKey: Array[Byte], topFCnt: Int = 0) extends PHYPayload { import base64._ import pimps.BytesOps private def printError = { println("BASE ERROR") println("data64 = "+data64) val data64Ints = data64.toArray.map(_.toInt).toList println("data64 = "+data64Ints) } val dataBare = decode64(data64).getOrElse(Array[Byte]()) val dataMHDR = dataBare.take(1) val dataDevAddr = dataBare.drop(1).take(4) val dataFCtrl = dataBare.drop(5).take(1) val dataFCnt = dataBare.drop(6).take(2) ++ arr8(topFCnt & 0xFF) ++ arr8(topFCnt>>>8 & 0xFF ) val dataFOpts = dataBare.drop(8).take(dataFoptsLen) val dataFPort = dataBare.drop(8+dataFoptsLen).take(1) val dataFRMPayload = dataBare.drop(9+dataFoptsLen).dropRight(4) val dataMIC = dataBare.takeRight(4) val payloadLen = dataFRMPayload.size val blockedPayload = dataFRMPayload.grouped(16).toArray.map(_.padTo(16,0x00.toByte)).zipWithIndex lazy val plainPayload = aesCryption lazy val validMsg = dataMIC sameElements calcMic.take(4) } /* Handle the information from one specific message from the Gateway*/ case class Upstream(socket: ActorRef, remote: InetSocketAddress) { import pimps._ val defaultKey = Array(0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C).map(_.toByte) def make = new Action def decrypt(data: String) = ReadPHYPayload(data,defaultKey,defaultKey).jsonFields class Action extends Actor with ActorLogging { def receive = { case msg: ByteString => val printer = context.actorSelection("/user/Printer") val poster = context.actorSelection("/user/Poster") val header = msg.take(4) val gwID = msg.drop(4).take(8).toArray.toHex val crypted = JsStack.parse(msg.drop(12).decodeString("utf8")) val result = ( `!{}` |+ "Gateway"-> J(gwID) |+ "stat" -> (crypted | "stat") |+ "rxpk" -> (crypted | "rxpk" |* { j => j |+ "PHYPayload"->decrypt(j|"data"|>"") } ) |> ) val response = header.take(3) ++ header.take(1) printer ! result.toPretty poster ! result socket ! Udp.Send(response, remote) context.stop(self) case _ => log.error("Unrecognized message in Conversation") } } } /* Handle all incoming connections. */ object Listener { object stop def props(address: String, port: Int): Props = Props(new Listener(address,port)) } class Listener(address: String, port: Int) extends Actor with ActorLogging { import context.system IO(Udp) ! Udp.Bind(self, new InetSocketAddress(address, port)) def receive = { case Udp.Bound(local) => log.debug("Listening on "+local.getPort) context.become(ready(sender())) case _ => log.error("Unrecognized message in Listener.receive") } def ready(socket: ActorRef): Receive = { case Udp.Received(data, remote) => log.debug("Data connection requested.") system.actorOf(Props(Upstream(socket,remote).make)) ! data case Udp.Unbind => log.debug("GOT: Udp.Unbind"); socket ! Udp.Unbind case Udp.Unbound => log.debug("GOT: Udp.Unbound"); system.shutdown case Listener.stop => log.debug("GOT: Listener.stop"); socket ! Udp.Unbind case _ => log.error("Unrecognized message in Listener.ready") } } class Printer extends Actor with ActorLogging { def receive = { case s: String => println(s); } } /* Actor handling the posting of UDP to the the server of Hans. * Use this for relaying the data for testing purposes.*/ object Poster { def props(phone: Option[String]): Props = Props(new Poster(phone)) } class Poster(phone: Option[String]) extends Actor with ActorLogging { import scalaj.http.Http lazy val http = Http("http://dev.ibeaconlivinglab.com:1880/ruud") def post(bericht: JsStack) { val response = http.postData(bericht.toPretty).header("content-type","application/json").asString.code if (response != 200) log.info("Post done, response not 200, but: "+response) } def sms(bericht: JsStack) { val water = bericht | "rxpk" | 0 | "PHYPayload" | "plainJson" | "water" |> false if (water && phone.isDefined) MessageBird.send(phone.head) } def receive = { //case json: JsStack => // val bericht = json.toPretty // val response = http.postData(bericht).header("content-type","application/json").asString.code // if (response != 200) log.info("Post done, response not 200, but: "+response) case json: JsStack => //post(json) sms(json) } } /* Main server object. Used as a UDP listener for packet forwarder. */ object main extends App { import pimps._ println("This is a Lora Server Simulator") (args.lift(0), args.lift(1).flatMap(_.asInt)) match { case (Some(address),Some(port)) => println(" Listening on address "+address) println(" Listening on port "+port) println(" Type exit to stop.") val system = ActorSystem("MainSystem") val gwc = system.actorOf(Listener.props(address,port),"GlobalListener") val printer = system.actorOf(Props[Printer],"Printer") val poster = system.actorOf(Poster.props(args.lift(2)),"Poster") var cnt = true while (cnt) { val text = scala.io.StdIn.readLine("> ") cnt = (text != "exit") if (!cnt) gwc ! Listener.stop } case _ => println("Usuage: simserv <address> <port>") println(" address: address of service where to listen (usually localhost) to for gateway data.") println(" port: port on local host to listen to for gateway data: port > 1024.") println(" phone: phone number to send sms to for 'hoosjebootje' demo.") } } /* Activate this to perfom small tests ... */ object test //extends App { import base64._ import crypto._ import pimps.BytesOps println("Test omgeving") val defaultKey = Array(0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F, 0x3C).map(_.toByte) val RFC4493Key = Array(0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6, 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c).map(_.toByte) val msg1 = Array[Byte]() val msg2 = Array(0x6b, 0xc1, 0xbe, 0xe2, 0x2e, 0x40, 0x9f, 0x96, 0xe9, 0x3d, 0x7e, 0x11, 0x73, 0x93, 0x17, 0x2a).map(_.toByte) val cmac1 = crypto.AES.cmac(msg1,RFC4493Key).getOrElse(Array[Byte]()) val cmac2 = crypto.AES.cmac(msg2,RFC4493Key).getOrElse(Array[Byte]()) println("cmac1 = "+cmac1.toHex) println("cmac2 = "+cmac2.toHex) //MessageBird.send("31651311702") println("done") }
devlaam/lora_simulator
src/main/scala/server.scala
Scala
gpl-3.0
9,389
package pfds.number sealed trait SparseRepresentation extends Nat[SparseRepresentation] { val weights: List[Int] private def carry(w: Int, weights: List[Int]): List[Int] = weights match { case Nil => List(w) case ww::remain => if (w < ww) w::weights else if (w == ww) carry(w + ww, remain) else throw new RuntimeException(s"Illegal carry value: $w, where current weight is: $ww"); } implicit private def genSparse(n: List[Int]): SparseRepresentation = new SparseRepresentation { val weights: List[Int] = n } override def inc: SparseRepresentation = carry(1, weights) override def dec: SparseRepresentation = { def borrow(w: Int, weights: List[Int]): List[Int] = weights match { case Nil => throw NegNatException case ww::remain => if (w == ww) remain else if (w < ww) w::borrow(w * 2, weights) else throw new RuntimeException(s"Illegal borrow value: $w, where current weight is: $ww"); } borrow(1, weights) } override def +(that: SparseRepresentation): SparseRepresentation = { def add(ws1: List[Int], ws2: List[Int]): List[Int] = (ws1, ws2) match { case (Nil, _) => ws2 case (_, Nil) => ws1 case (w1::remain1, w2::remain2) => if (w1 < w2) w1::add(remain1, ws2) else if (w1 > w2) w2::add(ws1, remain2) else carry(w1 + w2, add(remain1, remain2)) } add(weights, that.weights) } override def toInt: Int = weights.sum } object SparseRepresentation { def apply(num: Int): SparseRepresentation = { def gen(num: Int, acc: SparseRepresentation): SparseRepresentation = { if (num == 0) acc else gen(num - 1, acc.inc) } if (num < 0) throw NegNatException else gen(num, new SparseRepresentation { val weights = Nil }) } }
ZhiruiLi/PfdsInScala
src/main/scala/pfds/number/SparseRepresentation.scala
Scala
mit
1,806
package ImpatientScala import scala.beans.BeanProperty /** * Classes - Chapter 5 */ object Chapter5Main { println( "1. Improve the Counter class in Section 5.1 , 'Simple Classes and Parameterless Methods,' Improve the " + "Counter class in Section 5.1 , “Simple Classes and Parameterless Methods,”" ) class myCounter(private var value: Int) { def increment(): Unit = { if (value + 1 <= Int.MaxValue) { value += 1 } } def current: Int = value } val a1 = new myCounter(4) println(a1.current.toString) println() println( "2. Write a class BankAccount with methods deposit and withdraw, and a read-only property balance." ) class bankAccount(private var _balance: Double = 0.0) { def deposit(value: Int): Unit = { _balance += value } def deposit(value: Double): Unit = { _balance += value } def withdraw(value: Int): Unit = { if (_balance - value > 0) { _balance -= value } } def withdraw(value: Double): Unit = { if (_balance - value > 0) { _balance -= value } } def balance: Double = _balance } val myBank: bankAccount = new bankAccount myBank.deposit(1000) myBank.withdraw(299.12) myBank.withdraw(41.12) println(s"My Balance: ${myBank.balance}") println() println( "3. Write a class Time with read-only properties hours and minutes and a method before(other: Time):" + "Boolean that checks whether this time comes before the other. " + "A Time object should be constructed as new Time(hrs, min), where hrs is in military time format " + "(between 0 and 23)." ) class time3(private var _hrs: Int, private var _min: Int) { _min = _min match { case i: Int if i % 60 == 0 => _hrs += i / 60; 0 case i: Int if i < 0 => _hrs += (i / 60 - 1); 60 + i % 60 case i: Int if i > 59 => _hrs += i / 60; i % 60 case _ => _min } _hrs = _hrs match { case i: Int if i < 0 => 24 + i % 24 case i: Int if i > 23 => i % 24 case _ => _hrs } def hrs: Int = _hrs def min: Int = _min def before(other: time3): Boolean = { _hrs < other._hrs || (other._hrs == _hrs && _min < other._min) } } val aT1 = new time3(-1, 70) val aT2 = new time3(2, -59) println(s"Time ${aT1.hrs}, ${aT1.min}, ${aT2.before(aT1)}") println() println( "4. Reimplement the Time class from the preceding exercise so that the internal representation is " + "the number of minutes since midnight (between 0 and 24 × 60 – 1). " + "Do not change the public interface. That is, client code should be unaffected by your change." ) class time4(private var _hrs: Int, private var _min: Int) { _min = _min match { case i: Int if i % 60 == 0 => _hrs += i / 60; 0 case i: Int if i < 0 => _hrs += (i / 60 - 1); 60 + i % 60 case i: Int if i > 59 => _hrs += i / 60; i % 60 case _ => _min } _hrs = _hrs match { case i: Int if i < 0 => 24 + i % 24 case i: Int if i > 23 => i % 24 case _ => _hrs } def hrs: Int = _hrs def min: Int = _min def before(other: time4): Boolean = { _hrs < other._hrs || (other._hrs == _hrs && _min < other._min) } } val aT3 = new time4(1, -70) val aT4 = new time4(2, -59) println(s"Time ${aT3.hrs}, ${aT3.min}, ${aT4.before(aT3)}") println() println( "5. Make a class Student with read-write JavaBeans properties name (of type String) and id (of type Long)." + "What methods are generated? (Use javap to check.) Can you call the JavaBeans getters and setters in Scala?" ) class Student(@BeanProperty var name: String, @BeanProperty var id: Long) val john = new Student("John", 2457) println(john.id, john.name, john.getId == john.id) println() println( "6. In the Person class of Section 5.1 , “Simple Classes and Parameterless Methods,” on page 49 , " + "provide a primary constructor that turns negative ages to 0." ) class Person(private var _age: Int) { if (_age < 0) _age = 0 def age: Int = _age def age_=(newValue: Int): Unit = { if (newValue > _age) _age = newValue } } val Paul = new Person(-2) println(Paul.age) println() println( "7. Write a class Person with a primary constructor that accepts a string containing a first name, a space, " + "and a last name, such as new Person('Fred Smith') . Supply read-only properties firstName and lastName. " + "Should the primary constructor parameter be a var , a val , or a plain parameter? Why?" ) class Person7(_fullName: String) { val (firstName, lastName) = _fullName.split(" ") match { case Array(x: String, y: String, _*) => (x, y) case _ => (null, null) } } val fredSmith = new Person7("Fred Smith") println(fredSmith.firstName, fredSmith.lastName) println() println( "8. Make a class Car with read-only properties for manufacturer, model name, and model year, and a " + "read-write property for the license plate." + "Supply four constructors. All require the manufacturer and model name. Optionally, model year and license " + "plate can also be specified in the constructor. If not, the model year is set to -1 and the license plate" + " to the empty string. Which constructor? Why?" ) class Car(val manufacturer: String, val modelName: String) { var licencePlate: String = "" private var _modelYear: Int = -1 def this(manufacturer: String, modelName: String, modelYear: Int) { this(manufacturer, modelName) this._modelYear = modelYear } def this(manufacturer: String, modelName: String, licencePlate: String) { this(manufacturer, modelName) this.licencePlate = licencePlate } def this(manufacturer: String, modelName: String, modelYear: Int, licencePlate: String) { this(manufacturer, modelName) this._modelYear = modelYear this.licencePlate = licencePlate } def modelYear: Int = _modelYear } val VWGolf = new Car("Volkswagen", "Golf") val VWGolf2009 = new Car("Volkswagen", "Golf", 2009) val VWGolfPlate = new Car("Volkswagen", "Golf", 2012, "IM NOT TELLING YOU") println(VWGolfPlate.manufacturer, VWGolfPlate.modelName, VWGolfPlate.modelYear, VWGolfPlate.licencePlate) println() println( "9. Reimplement the class of the preceding exercise in Java, C#, or C++ (your choice). How much " + "shorter is the Scala class?" ) println("Python?!") println() println( "10. Consider the class " + "class Employee(val name: String, var salary: Double) {def this() { this('John Q. Public', 0.0) } }" + "Rewrite it to use explicit fields and a default primary constructor. Which form do you prefer? Why?" ) class Employee(val name: String, var salary: Double) { def this() { this("John Q. Public", 0.0) } } // A more syntactic form class EmployeeRewritten(val name: String = "John Q. Public", var salary: Double = 0.0) {} }
dandxy89/LearningScala
src/main/scala/ImpatientScala/Chapter5Main.scala
Scala
mit
7,202
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.utils import org.apache.flink.api.common.JobExecutionResult import org.apache.flink.api.common.typeinfo.TypeInformation import org.apache.flink.table.api.{Table, TableConfig, TableEnvironment} import org.apache.flink.table.catalog.Catalog import org.apache.flink.table.descriptors.{ConnectTableDescriptor, ConnectorDescriptor} import org.apache.flink.table.functions.ScalarFunction import org.apache.flink.table.sinks.TableSink import org.apache.flink.table.sources.TableSource import java.util.Optional class MockTableEnvironment extends TableEnvironment { override def fromTableSource(source: TableSource[_]): Table = ??? override def registerFunction(name: String, function: ScalarFunction): Unit = ??? override def registerTable(name: String, table: Table): Unit = ??? override def registerTableSource(name: String, tableSource: TableSource[_]): Unit = ??? override def registerTableSink( name: String, fieldNames: Array[String], fieldTypes: Array[TypeInformation[_]], tableSink: TableSink[_]): Unit = ??? override def registerTableSink(name: String, configuredSink: TableSink[_]): Unit = ??? override def scan(tablePath: String*): Table = ??? override def connect(connectorDescriptor: ConnectorDescriptor): ConnectTableDescriptor = ??? override def listCatalogs(): Array[String] = ??? override def listDatabases(): Array[String] = ??? override def listTables(): Array[String] = ??? override def listUserDefinedFunctions(): Array[String] = ??? override def listFunctions(): Array[String] = ??? override def explain(table: Table): String = ??? override def explain(table: Table, extended: Boolean): String = ??? override def explain(extended: Boolean): String = ??? override def getCompletionHints(statement: String, position: Int): Array[String] = ??? override def sqlQuery(query: String): Table = ??? override def sqlUpdate(stmt: String): Unit = ??? override def getConfig: TableConfig = ??? override def registerCatalog( name: String, catalog: Catalog): Unit = ??? override def getCatalog(catalogName: String): Optional[Catalog] = ??? override def getCurrentCatalog: String = ??? override def getCurrentDatabase: String = ??? override def useCatalog(catalogName: String): Unit = ??? override def useDatabase(databaseName: String): Unit = ??? override def insertInto( table: Table, sinkPath: String, sinkPathContinued: String*): Unit = ??? override def execute(jobName: String): JobExecutionResult = ??? }
fhueske/flink
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/utils/MockTableEnvironment.scala
Scala
apache-2.0
3,368
package org.camunda.feel.impl.spi import org.camunda.feel.context.CustomFunctionProvider import org.camunda.feel.syntaxtree._ // DO NOT DELETE, used in ScriptEngineTest through src/test/resources/META-INF/services class TestFunctionProvider extends CustomFunctionProvider { override def getFunction(name: String): Option[ValFunction] = functions.get(name) override def functionNames: Iterable[String] = functions.keys val functions: Map[String, ValFunction] = Map( "foo" -> ValFunction( params = List("x"), invoke = { case List(ValNumber(x)) => ValNumber(x + 1) } ) ) }
saig0/feel
feel-engine/src/test/scala/org/camunda/feel/impl/spi/TestFunctionProvider.scala
Scala
apache-2.0
620
package controllers import java.util.UUID import com.google.inject.AbstractModule import com.mohiva.play.silhouette.api.{ Environment, LoginInfo } import com.mohiva.play.silhouette.impl.authenticators.CookieAuthenticator import com.mohiva.play.silhouette.test._ import models.User import net.codingwell.scalaguice.ScalaModule import org.specs2.mock.Mockito import org.specs2.specification.Scope import play.api.inject.guice.GuiceApplicationBuilder import play.api.libs.concurrent.Execution.Implicits._ import play.api.test.{ FakeRequest, PlaySpecification, WithApplication } /** * Test case for the [[controllers.ApplicationController]] class. * I don't think I like this style of testing as much as the other test style I have in place * check the other tests as they are simpler */ class ApplicationControllerSpec extends PlaySpecification with Mockito { sequential "The `index` action" should { "redirect to login page if user is unauthorized" in new Context { new WithApplication(application) { val Some(redirectResult) = route(FakeRequest(routes.ApplicationController.index()) .withAuthenticator[CookieAuthenticator](LoginInfo("invalid", "invalid")) ) status(redirectResult) must be equalTo SEE_OTHER val redirectURL = redirectLocation(redirectResult).getOrElse("") redirectURL must contain(routes.ApplicationController.signIn().toString()) val Some(unauthorizedResult) = route(FakeRequest(GET, redirectURL)) status(unauthorizedResult) must be equalTo OK contentType(unauthorizedResult) must beSome("text/html") contentAsString(unauthorizedResult) must contain("Silhouette - Sign In") } } "return 200 if user is authorized" in new Context { new WithApplication(application) { val Some(result) = route(FakeRequest(routes.ApplicationController.index()) .withAuthenticator[CookieAuthenticator](identity.loginInfo) ) status(result) must beEqualTo(OK) } } } /** * The context. */ trait Context extends Scope { /** * A fake Guice module. */ class FakeModule extends AbstractModule with ScalaModule { def configure() = { bind[Environment[User, CookieAuthenticator]].toInstance(env) } } /** * An identity. */ val identity = User( userID = UUID.randomUUID(), loginInfo = LoginInfo("facebook", "[email protected]"), firstName = None, lastName = None, fullName = None, email = None, avatarURL = None ) /** * A Silhouette fake environment. */ implicit val env: Environment[User, CookieAuthenticator] = new FakeEnvironment[User, CookieAuthenticator](Seq(identity.loginInfo -> identity)) /** * The application. */ lazy val application = new GuiceApplicationBuilder() .overrides(new FakeModule) .build() } }
phosphene/play-slick-silhouette-scalatest
test/controllers/ApplicationControllerSpec.scala
Scala
apache-2.0
2,947
package mimir.parser import java.io.{Reader,File} import scala.collection.mutable.Buffer import org.jline.terminal.{Terminal,TerminalBuilder} import org.jline.reader.{LineReader,LineReaderBuilder,EndOfFileException,UserInterruptException} import com.typesafe.scalalogging.LazyLogging import fastparse.Parsed import mimir.util.LineReaderInputSource /** * Intermediary between JLine and FastParse */ class LineReaderParser( terminal: Terminal, historyFile: String = LineReaderInputSource.defaultHistoryFile, headPrompt: String = "mimir> ", restPrompt: String = " > " ) extends Iterator[Parsed[MimirCommand]] with LazyLogging { private val input: LineReader = LineReaderBuilder. builder(). terminal(terminal). variable(LineReader.HISTORY_FILE, historyFile). build() private val inputBuffer = Buffer[String]() private var commandBuffer: Option[Parsed[MimirCommand]] = None // private var pos = 0 private var eof = false private def prompt = if(inputBuffer.isEmpty) { headPrompt } else { restPrompt } /** * Read a line from JLine into the input buffer */ private def readLine() { while(!eof){ try { val lineRead = input.readLine(prompt)//.replace("\\\\n", " ") logger.trace(s"Got: $lineRead") if( ! lineRead.trim().equals("") ){ // ignore blank lines inputBuffer += lineRead return } } catch { // if there's anything in the input buffer clear it and reset. Otherwise // pass the exception out. case _ : UserInterruptException if !inputBuffer.isEmpty => inputBuffer.clear() case _ : EndOfFileException => eof = true; } } } /** * Attempt to parse the current input buffer * @return A parse result (on success or failure) or None if more data is needed * * This implementation is a nasty hack built as a result of butting up against * a FastParse limitation. FastParse gives no indication that a failure * is a result of an EOF or a legitimate parsing glitch. As a result, we need * to get Clever about how to detect the EOF. * * inputBufferIterator mimics inputBuffer.iterator, but includes an * additional *two* "sentinel" lines at the end consisting of nothing but * whitespace. If *both* lines are consumed by the parser, we take that * to mean that more data might change the result. If at least one of * the sentinels survives, we treat it as a legitimate failure. */ private def tryParse(): Option[Parsed[MimirCommand]] = { logger.trace(s"Trying to parse (${inputBuffer.size} lines)") inputBufferIterator.reset() fastparse.parse( inputBufferIterator, MimirCommand.command(_), verboseFailures = true ) match { case [email protected](result, index) => Some(r) case f:Parsed.Failure if inputBufferIterator.hasNext => Some(f) case f:Parsed.Failure => logger.trace(s"Need more input ($f)"); None } } /** * Buffer the next parser response from JLine * @return A parsed command, parser error, or None if the stream is over * * If a parser response has already been buffered, this function * returns immediately. Otherwise, lines will be read from JLine * until either a legitimate (i.e., non-EOF) parser failure occurs, or * the parser gets a command. * * A legitimate parser failure will cause the input to be flushed. */ private def tryReadNext(): Option[Parsed[MimirCommand]] = { while(!eof && commandBuffer == None){ readLine() if(!inputBuffer.isEmpty) { commandBuffer = tryParse() commandBuffer match { case Some([email protected](result, index)) => { logger.info(s"Parsed(index = $index): $result") skipBytes(index) } case Some([email protected](token, index, extra)) => { inputBuffer.clear() } case None => {} } } } return commandBuffer } /** * Reset the state of the parser to pristine */ def flush() { inputBuffer.clear(); commandBuffer = None } /** * Return true if the iterator has another parser response. * * This method blocks until a parser response becomes available or * the terminal session ends. */ def hasNext(): Boolean = { tryReadNext() != None } /** * Return the next parser response. * * This method blocks until a parser response becomes available or * the terminal session ends. */ def next(): Parsed[MimirCommand] = { tryReadNext() match { case None => throw new IndexOutOfBoundsException("reading from an empty iterator") case Some(command) => { commandBuffer = None return command } } } /** * Advance the inputBuffer by a fixed number of bytes (e.g., after a successful parse). */ private def skipBytes(offset: Int) { var dropped = 0 while(offset > dropped && !inputBuffer.isEmpty){ logger.debug(s"Checking for drop: $dropped / $offset: Next unit: ${inputBuffer.head.length}") if(inputBuffer.head.length < (offset - dropped)){ dropped = dropped + inputBuffer.head.length logger.debug(s"Dropping '${inputBuffer.head}' ($dropped / $offset dropped so far)") inputBuffer.remove(0) } else { logger.debug(s"Trimming '${inputBuffer.head}' (by ${offset - dropped}) and done") var firstLine = inputBuffer.head // trim off the remaining characters firstLine = firstLine.substring(offset - dropped) // trim off any leading whitespace firstLine = firstLine.replaceFirst("^\\\\s+", "") // if that polished off all of the remaining characters, // just drop the first line. if(firstLine.equals("")){ inputBuffer.remove(0) } // otherwise replace the string else { inputBuffer.update(0, firstLine) } return } } } /** * An iterator for InputBuffer + two extra sentinel lines * * See tryParse() above. The short of it is that this * iterator produces the same result as inputBuffer.iterator * but with two extra trailing "sentinel" lines of whitespace. */ private object inputBufferIterator extends Iterator[String] { var pos = 0 def reset() { pos = 0 } def hasNext():Boolean = { pos < inputBuffer.size + 2 } def next():String = { if(pos < inputBuffer.size){ // Normal line val ret = inputBuffer(pos) pos += 1 return ret } else { // Sentinel line pos += 1 // need to make sure that each sentinel line is bigger // the fastparse read buffer (about 10 characters by // default), or else both sentinel lines could be read in // as part of a normal error. return " " } } } }
UBOdin/mimir
src/main/scala/mimir/parser/LineReaderParser.scala
Scala
apache-2.0
6,959
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive.thriftserver import java.security.PrivilegedExceptionAction import java.util.{Arrays, Map => JMap} import java.util.concurrent.RejectedExecutionException import scala.collection.JavaConverters._ import scala.collection.mutable.ArrayBuffer import scala.util.control.NonFatal import org.apache.hadoop.hive.metastore.api.FieldSchema import org.apache.hadoop.hive.shims.Utils import org.apache.hive.service.cli._ import org.apache.hive.service.cli.operation.ExecuteStatementOperation import org.apache.hive.service.cli.session.HiveSession import org.apache.spark.internal.Logging import org.apache.spark.sql.{DataFrame, Row => SparkRow, SQLContext} import org.apache.spark.sql.execution.HiveResult.{getTimeFormatters, toHiveString, TimeFormatters} import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.types._ import org.apache.spark.unsafe.types.CalendarInterval import org.apache.spark.util.{Utils => SparkUtils} private[hive] class SparkExecuteStatementOperation( val sqlContext: SQLContext, parentSession: HiveSession, statement: String, confOverlay: JMap[String, String], runInBackground: Boolean = true) extends ExecuteStatementOperation(parentSession, statement, confOverlay, runInBackground) with SparkOperation with Logging { private var result: DataFrame = _ // We cache the returned rows to get iterators again in case the user wants to use FETCH_FIRST. // This is only used when `spark.sql.thriftServer.incrementalCollect` is set to `false`. // In case of `true`, this will be `None` and FETCH_FIRST will trigger re-execution. private var resultList: Option[Array[SparkRow]] = _ private var previousFetchEndOffset: Long = 0 private var previousFetchStartOffset: Long = 0 private var iter: Iterator[SparkRow] = _ private var dataTypes: Array[DataType] = _ private lazy val resultSchema: TableSchema = { if (result == null || result.schema.isEmpty) { new TableSchema(Arrays.asList(new FieldSchema("Result", "string", ""))) } else { logInfo(s"Result Schema: ${result.schema}") SparkExecuteStatementOperation.getTableSchema(result.schema) } } def addNonNullColumnValue( from: SparkRow, to: ArrayBuffer[Any], ordinal: Int, timeFormatters: TimeFormatters): Unit = { dataTypes(ordinal) match { case StringType => to += from.getString(ordinal) case IntegerType => to += from.getInt(ordinal) case BooleanType => to += from.getBoolean(ordinal) case DoubleType => to += from.getDouble(ordinal) case FloatType => to += from.getFloat(ordinal) case DecimalType() => to += from.getDecimal(ordinal) case LongType => to += from.getLong(ordinal) case ByteType => to += from.getByte(ordinal) case ShortType => to += from.getShort(ordinal) case BinaryType => to += from.getAs[Array[Byte]](ordinal) // SPARK-31859, SPARK-31861: Date and Timestamp need to be turned to String here to: // - respect spark.sql.session.timeZone // - work with spark.sql.datetime.java8API.enabled // These types have always been sent over the wire as string, converted later. case _: DateType | _: TimestampType => to += toHiveString((from.get(ordinal), dataTypes(ordinal)), false, timeFormatters) case CalendarIntervalType => to += toHiveString( (from.getAs[CalendarInterval](ordinal), CalendarIntervalType), false, timeFormatters) case _: ArrayType | _: StructType | _: MapType | _: UserDefinedType[_] => to += toHiveString((from.get(ordinal), dataTypes(ordinal)), false, timeFormatters) } } def getNextRowSet(order: FetchOrientation, maxRowsL: Long): RowSet = withLocalProperties { log.info(s"Received getNextRowSet request order=${order} and maxRowsL=${maxRowsL} " + s"with ${statementId}") validateDefaultFetchOrientation(order) assertState(OperationState.FINISHED) setHasResultSet(true) val resultRowSet: RowSet = ThriftserverShimUtils.resultRowSet(getResultSetSchema, getProtocolVersion) // Reset iter when FETCH_FIRST or FETCH_PRIOR if ((order.equals(FetchOrientation.FETCH_FIRST) || order.equals(FetchOrientation.FETCH_PRIOR)) && previousFetchEndOffset != 0) { // Reset the iterator to the beginning of the query. iter = if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) { resultList = None result.toLocalIterator.asScala } else { if (resultList.isEmpty) { resultList = Some(result.collect()) } resultList.get.iterator } } var resultOffset = { if (order.equals(FetchOrientation.FETCH_FIRST)) { logInfo(s"FETCH_FIRST request with $statementId. Resetting to resultOffset=0") 0 } else if (order.equals(FetchOrientation.FETCH_PRIOR)) { // TODO: FETCH_PRIOR should be handled more efficiently than rewinding to beginning and // reiterating. val targetOffset = math.max(previousFetchStartOffset - maxRowsL, 0) logInfo(s"FETCH_PRIOR request with $statementId. Resetting to resultOffset=$targetOffset") var off = 0 while (off < targetOffset && iter.hasNext) { iter.next() off += 1 } off } else { // FETCH_NEXT previousFetchEndOffset } } resultRowSet.setStartOffset(resultOffset) previousFetchStartOffset = resultOffset if (!iter.hasNext) { resultRowSet } else { val timeFormatters = getTimeFormatters // maxRowsL here typically maps to java.sql.Statement.getFetchSize, which is an int val maxRows = maxRowsL.toInt var curRow = 0 while (curRow < maxRows && iter.hasNext) { val sparkRow = iter.next() val row = ArrayBuffer[Any]() var curCol = 0 while (curCol < sparkRow.length) { if (sparkRow.isNullAt(curCol)) { row += null } else { addNonNullColumnValue(sparkRow, row, curCol, timeFormatters) } curCol += 1 } resultRowSet.addRow(row.toArray.asInstanceOf[Array[Object]]) curRow += 1 resultOffset += 1 } previousFetchEndOffset = resultOffset log.info(s"Returning result set with ${curRow} rows from offsets " + s"[$previousFetchStartOffset, $previousFetchEndOffset) with $statementId") resultRowSet } } def getResultSetSchema: TableSchema = resultSchema override def runInternal(): Unit = { setState(OperationState.PENDING) logInfo(s"Submitting query '$statement' with $statementId") HiveThriftServer2.eventManager.onStatementStart( statementId, parentSession.getSessionHandle.getSessionId.toString, statement, statementId, parentSession.getUsername) setHasResultSet(true) // avoid no resultset for async run if (!runInBackground) { execute() } else { val sparkServiceUGI = Utils.getUGI() // Runnable impl to call runInternal asynchronously, // from a different thread val backgroundOperation = new Runnable() { override def run(): Unit = { val doAsAction = new PrivilegedExceptionAction[Unit]() { override def run(): Unit = { registerCurrentOperationLog() try { withLocalProperties { execute() } } catch { case e: HiveSQLException => setOperationException(e) log.error("Error running hive query: ", e) } } } try { sparkServiceUGI.doAs(doAsAction) } catch { case e: Exception => setOperationException(new HiveSQLException(e)) logError("Error running hive query as user : " + sparkServiceUGI.getShortUserName(), e) } } } try { // This submit blocks if no background threads are available to run this operation val backgroundHandle = parentSession.getSessionManager().submitBackgroundOperation(backgroundOperation) setBackgroundHandle(backgroundHandle) } catch { case rejected: RejectedExecutionException => logError("Error submitting query in background, query rejected", rejected) setState(OperationState.ERROR) HiveThriftServer2.eventManager.onStatementError( statementId, rejected.getMessage, SparkUtils.exceptionString(rejected)) throw new HiveSQLException("The background threadpool cannot accept" + " new task for execution, please retry the operation", rejected) case NonFatal(e) => logError(s"Error executing query in background", e) setState(OperationState.ERROR) HiveThriftServer2.eventManager.onStatementError( statementId, e.getMessage, SparkUtils.exceptionString(e)) throw new HiveSQLException(e) } } } private def execute(): Unit = { try { synchronized { if (getStatus.getState.isTerminal) { logInfo(s"Query with $statementId in terminal state before it started running") return } else { logInfo(s"Running query with $statementId") setState(OperationState.RUNNING) } } // Always use the latest class loader provided by executionHive's state. val executionHiveClassLoader = sqlContext.sharedState.jarClassLoader Thread.currentThread().setContextClassLoader(executionHiveClassLoader) // Always set the session state classloader to `executionHiveClassLoader` even for sync mode if (!runInBackground) { parentSession.getSessionState.getConf.setClassLoader(executionHiveClassLoader) } sqlContext.sparkContext.setJobGroup(statementId, statement) result = sqlContext.sql(statement) logDebug(result.queryExecution.toString()) HiveThriftServer2.eventManager.onStatementParsed(statementId, result.queryExecution.toString()) iter = { if (sqlContext.getConf(SQLConf.THRIFTSERVER_INCREMENTAL_COLLECT.key).toBoolean) { resultList = None result.toLocalIterator.asScala } else { resultList = Some(result.collect()) resultList.get.iterator } } dataTypes = result.schema.fields.map(_.dataType) } catch { // Actually do need to catch Throwable as some failures don't inherit from Exception and // HiveServer will silently swallow them. case e: Throwable => // When cancel() or close() is called very quickly after the query is started, // then they may both call cleanup() before Spark Jobs are started. But before background // task interrupted, it may have start some spark job, so we need to cancel again to // make sure job was cancelled when background thread was interrupted if (statementId != null) { sqlContext.sparkContext.cancelJobGroup(statementId) } val currentState = getStatus().getState() if (currentState.isTerminal) { // This may happen if the execution was cancelled, and then closed from another thread. logWarning(s"Ignore exception in terminal state with $statementId: $e") } else { logError(s"Error executing query with $statementId, currentState $currentState, ", e) setState(OperationState.ERROR) HiveThriftServer2.eventManager.onStatementError( statementId, e.getMessage, SparkUtils.exceptionString(e)) e match { case _: HiveSQLException => throw e case _ => throw new HiveSQLException("Error running query: " + e.toString, e) } } } finally { synchronized { if (!getStatus.getState.isTerminal) { setState(OperationState.FINISHED) HiveThriftServer2.eventManager.onStatementFinish(statementId) } } sqlContext.sparkContext.clearJobGroup() } } override def cancel(): Unit = { synchronized { if (!getStatus.getState.isTerminal) { logInfo(s"Cancel query with $statementId") setState(OperationState.CANCELED) cleanup() HiveThriftServer2.eventManager.onStatementCanceled(statementId) } } } override protected def cleanup(): Unit = { if (runInBackground) { val backgroundHandle = getBackgroundHandle() if (backgroundHandle != null) { backgroundHandle.cancel(true) } } // RDDs will be cleaned automatically upon garbage collection. if (statementId != null) { sqlContext.sparkContext.cancelJobGroup(statementId) } } } object SparkExecuteStatementOperation { def getTableSchema(structType: StructType): TableSchema = { val schema = structType.map { field => val attrTypeString = field.dataType match { case NullType => "void" case CalendarIntervalType => StringType.catalogString case other => other.catalogString } new FieldSchema(field.name, attrTypeString, field.getComment.getOrElse("")) } new TableSchema(schema.asJava) } }
spark-test/spark
sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkExecuteStatementOperation.scala
Scala
apache-2.0
14,238
// Copyright 2012 Foursquare Labs Inc. All Rights Reserved. package io.fsq.spindle.__shaded_for_spindle_bootstrap__.runtime trait SemitypedHasPrimaryKey[F] { def primaryKey: F } trait HasPrimaryKey[F, R <: Record[R]] extends SemitypedHasPrimaryKey[F] trait HasMetaPrimaryKey[F, R <: Record[R]] { def primaryKey: FieldDescriptor[F, R, _ <: MetaRecord[R, _]] }
foursquare/fsqio
src/jvm/io/fsq/spindle/codegen/__shaded_for_spindle_bootstrap__/runtime/HasPrimaryKey.scala
Scala
apache-2.0
367
package org.broadinstitute.dsde.firecloud.service import java.util.UUID import org.broadinstitute.dsde.rawls.model._ import org.joda.time.DateTime import spray.json.DefaultJsonProtocol._ import spray.json.RootJsonFormat object DataUseRestrictionTestFixtures { case class DataUseRestriction( GRU: Boolean = false, HMB: Boolean = false, DS: Seq[Int] = Seq.empty[Int], NCU: Boolean = false, NPU: Boolean = false, NMDS: Boolean = false, NAGR: Boolean = false, NCTRL: Boolean = false, `RS-PD`: Boolean = false, `RS-G`: Boolean = false, `RS-FM`: Boolean = false, `RS-M`: Boolean = false, IRB: Boolean = false ) implicit val impAttributeFormat: AttributeFormat with PlainArrayAttributeListSerializer = new AttributeFormat with PlainArrayAttributeListSerializer implicit val impDataUseRestriction: RootJsonFormat[DataUseRestriction] = jsonFormat13(DataUseRestriction) // Datasets are named by the code for easier identification in tests val booleanCodes: Seq[String] = Seq("GRU", "HMB", "NCU", "NPU", "NMDS", "NCTRL", "RS-PD", "IRB") val booleanDatasets: Seq[WorkspaceDetails] = booleanCodes.map { code => val attributes = Map(AttributeName.withLibraryNS(code) -> AttributeBoolean(true)) mkWorkspace(attributes, code, s"{${code.replace("-","")}}-unique") } val listValues: Seq[String] = Seq("TERM-1", "TERM-2") val diseaseCodes: Seq[String] = Seq("DS_URL") val diseaseURLs: Seq[String] = Seq("http://purl.obolibrary.org/obo/DOID_9220", "http://purl.obolibrary.org/obo/DOID_535") val diseaseValuesLabels: Seq[String] = Seq("central sleep apnea", "sleep disorder") val diseaseValuesInts: Seq[Int] = Seq(9220, 535) val diseaseDatasets: Seq[WorkspaceDetails] = diseaseCodes.map { code => val attributes = Map( AttributeName.withLibraryNS(code) -> AttributeValueList(diseaseURLs.map(AttributeString)), AttributeName.withLibraryNS("DS") -> AttributeValueList(diseaseValuesLabels.map(AttributeString)) ) mkWorkspace(attributes, "DS", s"{${code.replace("-","")}}-unique") } // Gender datasets are named by the gender value for easier identification in tests val genderVals: Seq[(String, String)] = Seq(("Female", "RS-FM"), ("Male", "RS-M"), ("N/A", "N/A")) val genderDatasets: Seq[WorkspaceDetails] = genderVals.flatMap { case (gender: String, code: String) => val attributes = Map(AttributeName.withLibraryNS("RS-G") -> AttributeString(gender)) Seq(mkWorkspace(attributes, gender, code), mkWorkspace(attributes, gender, s"""RSG${gender.replace("/","")}""")) } // Both gender and 'NAGR' codes are saved as string values in workspace attributes val nagrVals: Seq[String] = Seq("Yes", "No", "Unspecified") val nagrDatasets: Seq[WorkspaceDetails] = nagrVals.map { value => val attributes = Map(AttributeName.withLibraryNS("NAGR") -> AttributeString(value)) mkWorkspace(attributes, value, s"NAGR$value") } val everythingDataset = Seq(mkWorkspace( booleanCodes.map(AttributeName.withLibraryNS(_) -> AttributeBoolean(true)).toMap ++ diseaseCodes.map(AttributeName.withLibraryNS(_) -> AttributeValueList(diseaseURLs.map(AttributeString))).toMap ++ Map(AttributeName.withLibraryNS("DS") -> AttributeValueList(diseaseValuesLabels.map(AttributeString))) ++ Map(AttributeName.withLibraryNS("NAGR") -> AttributeString("Yes")) ++ Map(AttributeName.withLibraryNS("RS-G") -> AttributeString("Female")), "EVERYTHING", "EVERYTHING") ) val topThreeDataset = Seq(mkWorkspace( Seq("GRU", "HMB").map(AttributeName.withLibraryNS(_) -> AttributeBoolean(true)).toMap ++ diseaseCodes.map(AttributeName.withLibraryNS(_) -> AttributeValueList(diseaseURLs.map(AttributeString))).toMap ++ Map(AttributeName.withLibraryNS("DS") -> AttributeValueList(diseaseValuesLabels.map(AttributeString))), "TOP_THREE", "TOP_THREE") ) val allDatasets: Seq[WorkspaceDetails] = booleanDatasets ++ diseaseDatasets ++ genderDatasets ++ nagrDatasets ++ everythingDataset ++ topThreeDataset val validDisplayDatasets: Seq[WorkspaceDetails] = booleanDatasets ++ everythingDataset ++ topThreeDataset def mkWorkspace(attributes: Map[AttributeName, Attribute], wsName: String, wsDescription: String): WorkspaceDetails = { val testUUID: UUID = UUID.randomUUID() val defaultAttributes = attributes ++ Map( AttributeName.withDefaultNS("description") -> AttributeString(wsDescription), AttributeName.withLibraryNS("description") -> AttributeString(wsDescription), AttributeName.withDefaultNS("userAttributeTwo") -> AttributeString("two"), AttributeName.withLibraryNS("datasetName") -> AttributeString("name"), AttributeName.withLibraryNS("datasetVersion") -> AttributeString("v1.0"), AttributeName.withLibraryNS("datasetDescription") -> AttributeString("desc"), AttributeName.withLibraryNS("datasetCustodian") -> AttributeString("cust"), AttributeName.withLibraryNS("datasetDepositor") -> AttributeString("depo"), AttributeName.withLibraryNS("contactEmail") -> AttributeString("[email protected]"), AttributeName.withLibraryNS("datasetOwner") -> AttributeString("owner"), AttributeName.withLibraryNS("institute") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))), AttributeName.withLibraryNS("indication") -> AttributeString("indication"), AttributeName.withLibraryNS("numSubjects") -> AttributeNumber(123), AttributeName.withLibraryNS("projectName") -> AttributeString("projectName"), AttributeName.withLibraryNS("datatype") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))), AttributeName.withLibraryNS("dataCategory") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))), AttributeName.withLibraryNS("dataUseRestriction") -> AttributeString("dur"), AttributeName.withLibraryNS("studyDesign") -> AttributeString("study"), AttributeName.withLibraryNS("cellType") -> AttributeString("cellType"), AttributeName.withLibraryNS("requiresExternalApproval") -> AttributeBoolean(false), AttributeName.withLibraryNS("technology") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))), AttributeName.withLibraryNS("useLimitationOption") -> AttributeString("questionnaire"), AttributeName.withDefaultNS("_discoverableByGroups") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))) ) WorkspaceDetails( workspaceId=testUUID.toString, namespace="testWorkspaceNamespace", name=wsName, isLocked=false, createdBy="createdBy", createdDate=DateTime.now(), lastModified=DateTime.now(), attributes=Some(defaultAttributes), bucketName="bucketName", workflowCollectionName=Some("wf-collection"), authorizationDomain=Some(Set.empty[ManagedGroupRef]), workspaceVersion=WorkspaceVersions.V2, googleProject = GoogleProjectId("googleProject"), googleProjectNumber = Some(GoogleProjectNumber("googleProjectNumber")), billingAccount = Some(RawlsBillingAccountName("billingAccount")), completedCloneWorkspaceFileTransfer = Option(DateTime.now()) ) } }
broadinstitute/firecloud-orchestration
src/test/scala/org/broadinstitute/dsde/firecloud/service/DataUseRestrictionTestFixtures.scala
Scala
bsd-3-clause
7,250