code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package ch.bsisa.hyperbird.security
case class Role(name : String, ID_G : String, Id : String) {
} | bsisa/hb-api | app/ch/bsisa/hyperbird/security/Role.scala | Scala | gpl-2.0 | 100 |
sealed abstract class Base
object Test {
case object Up extends Base
def foo(d1: Base) =
d1 match {
case Up =>
}
// Sealed subtype: ModuleTypeRef <empty>.this.Test.Up.type
// Pattern: UniqueThisType Test.this.type
}
object Test1 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
def foo(d1: Base, d2: Base) =
(d1, d2) match {
case (Up, Up) | (Down, Down) => false
case (Down, Up) => true
case (Up, Down) => false
}
}
}
object Test2 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
def foo(d1: Base, d2: Base) =
(d1) match {
case Up | Down => false
}
}
}
object Test3 {
sealed abstract class Base
object Base {
case object Down extends Base
def foo(d1: Base, d2: Base) =
(d1, d2) match {
case (Down, Down) => false
}
}
}
object Test4 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
}
import Test4.Base._
def foo(d1: Base, d2: Base) =
(d1, d2) match {
case (Up, Up) | (Down, Down) => false
case (Down, Test4.Base.Up) => true
case (Up, Down) => false
}
}
| som-snytt/dotty | tests/patmat/t7285a.scala | Scala | apache-2.0 | 1,423 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.plans.physical.HashPartitioning
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecutionSuite, EnableAdaptiveExecutionSuite}
import org.apache.spark.sql.execution.exchange.ShuffleExchangeExec
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.test.{SharedSparkSession, SQLTestUtils}
class DisableUnnecessaryBucketedScanWithoutHiveSupportSuite
extends DisableUnnecessaryBucketedScanSuite
with SharedSparkSession
with DisableAdaptiveExecutionSuite {
protected override def beforeAll(): Unit = {
super.beforeAll()
assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
}
}
class DisableUnnecessaryBucketedScanWithoutHiveSupportSuiteAE
extends DisableUnnecessaryBucketedScanSuite
with SharedSparkSession
with EnableAdaptiveExecutionSuite {
protected override def beforeAll(): Unit = {
super.beforeAll()
assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "in-memory")
}
}
abstract class DisableUnnecessaryBucketedScanSuite
extends QueryTest
with SQLTestUtils
with AdaptiveSparkPlanHelper {
import testImplicits._
private lazy val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
private lazy val df2 =
(0 until 50).map(i => (i % 7, i % 11, i.toString)).toDF("i", "j", "k").as("df2")
private def checkDisableBucketedScan(
query: String,
expectedNumScanWithAutoScanEnabled: Int,
expectedNumScanWithAutoScanDisabled: Int): Unit = {
def checkNumBucketedScan(query: String, expectedNumBucketedScan: Int): Unit = {
val plan = sql(query).queryExecution.executedPlan
val bucketedScan = collect(plan) { case s: FileSourceScanExec if s.bucketedScan => s }
assert(bucketedScan.length == expectedNumBucketedScan)
}
withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "true") {
checkNumBucketedScan(query, expectedNumScanWithAutoScanEnabled)
val result = sql(query).collect()
withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "false") {
checkNumBucketedScan(query, expectedNumScanWithAutoScanDisabled)
checkAnswer(sql(query), result)
}
}
}
test("SPARK-32859: disable unnecessary bucketed table scan - basic test") {
withTable("t1", "t2", "t3") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1")
df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2")
df2.write.format("parquet").bucketBy(4, "i").saveAsTable("t3")
Seq(
// Read bucketed table
("SELECT * FROM t1", 0, 1),
("SELECT i FROM t1", 0, 1),
("SELECT j FROM t1", 0, 0),
// Filter on bucketed column
("SELECT * FROM t1 WHERE i = 1", 0, 1),
// Filter on non-bucketed column
("SELECT * FROM t1 WHERE j = 1", 0, 1),
// Join with same buckets
("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 0, 2),
("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 2, 2),
("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 2, 2),
// Join with different buckets
("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 0, 2),
("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 1, 2),
("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i", 1, 2),
// Join on non-bucketed column
("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 0, 2),
("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 1, 2),
("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.j", 1, 2),
("SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2),
("SELECT /*+ shuffle_hash(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2),
("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.j = t2.j", 0, 2),
// Aggregate on bucketed column
("SELECT SUM(i) FROM t1 GROUP BY i", 1, 1),
// Aggregate on non-bucketed column
("SELECT SUM(i) FROM t1 GROUP BY j", 0, 1),
("SELECT j, SUM(i), COUNT(j) FROM t1 GROUP BY j", 0, 1)
).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) =>
checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled)
}
}
}
test("SPARK-32859: disable unnecessary bucketed table scan - multiple joins test") {
withTable("t1", "t2", "t3") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1")
df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2")
df2.write.format("parquet").bucketBy(4, "i").saveAsTable("t3")
Seq(
// Multiple joins on bucketed columns
("""
SELECT /*+ broadcast(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.i AND t2.i = t3.i
""".stripMargin, 0, 3),
("""
SELECT /*+ broadcast(t1) merge(t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.i AND t2.i = t3.i
""".stripMargin, 2, 3),
("""
SELECT /*+ merge(t1) broadcast(t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.i AND t2.i = t3.i
""".stripMargin, 2, 3),
("""
SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.i AND t2.i = t3.i
""".stripMargin, 2, 3),
// Multiple joins on non-bucketed columns
("""
SELECT /*+ broadcast(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.j AND t2.j = t3.i
""".stripMargin, 0, 3),
("""
SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.i = t2.j AND t2.j = t3.i
""".stripMargin, 1, 3),
("""
SELECT /*+ merge(t1, t3)*/ * FROM t1 JOIN t2 JOIN t3
ON t1.j = t2.j AND t2.j = t3.j
""".stripMargin, 0, 3)
).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) =>
checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled)
}
}
}
test("SPARK-32859: disable unnecessary bucketed table scan - multiple bucketed columns test") {
withTable("t1", "t2", "t3") {
df1.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("t1")
df2.write.format("parquet").bucketBy(8, "i", "j").saveAsTable("t2")
df2.write.format("parquet").bucketBy(4, "i", "j").saveAsTable("t3")
Seq(
// Filter on bucketed columns
("SELECT * FROM t1 WHERE i = 1", 0, 1),
("SELECT * FROM t1 WHERE i = 1 AND j = 1", 0, 1),
// Join on bucketed columns
("""
SELECT /*+ broadcast(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i AND t1.j = t2.j
""".stripMargin, 0, 2),
("""
SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i AND t1.j = t2.j
""".stripMargin, 2, 2),
("""
SELECT /*+ merge(t1)*/ * FROM t1 JOIN t3 ON t1.i = t3.i AND t1.j = t3.j
""".stripMargin, 1, 2),
("SELECT /*+ merge(t1)*/ * FROM t1 JOIN t2 ON t1.i = t2.i", 0, 2),
// Aggregate on bucketed columns
("SELECT i, j, COUNT(*) FROM t1 GROUP BY i, j", 1, 1),
("SELECT i, COUNT(i) FROM t1 GROUP BY i", 0, 0),
("SELECT i, COUNT(j) FROM t1 GROUP BY i", 0, 1)
).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) =>
checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled)
}
}
}
test("SPARK-32859: disable unnecessary bucketed table scan - other operators test") {
withTable("t1", "t2", "t3") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1")
df2.write.format("parquet").bucketBy(8, "i").saveAsTable("t2")
df1.write.format("parquet").saveAsTable("t3")
Seq(
// Operator with interesting partition not in sub-plan
("""
SELECT t1.i FROM t1
UNION ALL
(SELECT t2.i FROM t2 GROUP BY t2.i)
""".stripMargin, 1, 2),
// Non-allowed operator in sub-plan
("""
SELECT COUNT(*)
FROM (SELECT t1.i FROM t1 UNION ALL SELECT t2.i FROM t2)
GROUP BY i
""".stripMargin, 2, 2),
// Multiple [[Exchange]] in sub-plan
("""
SELECT j, SUM(i), COUNT(*) FROM t1 GROUP BY j
DISTRIBUTE BY j
""".stripMargin, 0, 1),
("""
SELECT j, COUNT(*)
FROM (SELECT i, j FROM t1 DISTRIBUTE BY i, j)
GROUP BY j
""".stripMargin, 0, 1),
// No bucketed table scan in plan
("""
SELECT j, COUNT(*)
FROM (SELECT t1.j FROM t1 JOIN t3 ON t1.j = t3.j)
GROUP BY j
""".stripMargin, 0, 0)
).foreach { case (query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled) =>
checkDisableBucketedScan(query, numScanWithAutoScanEnabled, numScanWithAutoScanDisabled)
}
}
}
test("SPARK-33075: not disable bucketed table scan for cached query") {
withTable("t1") {
withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "true") {
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("t1")
spark.catalog.cacheTable("t1")
assertCached(spark.table("t1"))
// Verify cached bucketed table scan not disabled
val partitioning = stripAQEPlan(spark.table("t1").queryExecution.executedPlan)
.outputPartitioning
assert(partitioning match {
case HashPartitioning(Seq(column: AttributeReference), 8) if column.name == "i" => true
case _ => false
})
val aggregateQueryPlan = sql("SELECT SUM(i) FROM t1 GROUP BY i").queryExecution.executedPlan
assert(find(aggregateQueryPlan)(_.isInstanceOf[ShuffleExchangeExec]).isEmpty)
}
}
}
test("Aggregates with no groupby over tables having 1 BUCKET, return multiple rows") {
withTable("t1") {
withSQLConf(SQLConf.AUTO_BUCKETED_SCAN_ENABLED.key -> "true") {
sql(
"""
|CREATE TABLE t1 (`id` BIGINT, `event_date` DATE)
|USING PARQUET
|CLUSTERED BY (id)
|INTO 1 BUCKETS
|""".stripMargin)
sql(
"""
|INSERT INTO TABLE t1 VALUES(1.23, cast("2021-07-07" as date))
|""".stripMargin)
sql(
"""
|INSERT INTO TABLE t1 VALUES(2.28, cast("2021-08-08" as date))
|""".stripMargin)
val df = spark.sql("select sum(id) from t1 where id is not null")
assert(df.count == 1)
checkDisableBucketedScan(query = "SELECT SUM(id) FROM t1 WHERE id is not null",
expectedNumScanWithAutoScanEnabled = 1, expectedNumScanWithAutoScanDisabled = 1)
}
}
}
}
| ueshin/apache-spark | sql/core/src/test/scala/org/apache/spark/sql/sources/DisableUnnecessaryBucketedScanSuite.scala | Scala | apache-2.0 | 11,903 |
package com.softwaremill.codebrag.usecases.reactions
import com.softwaremill.codebrag.domain.{CommitInfo, ReviewedCommit}
import com.softwaremill.codebrag.common.{EventBus, Clock}
import com.typesafe.scalalogging.slf4j.Logging
import com.softwaremill.codebrag.dao.commitinfo.CommitInfoDAO
import com.softwaremill.codebrag.domain.reactions.CommitReviewedEvent
import org.bson.types.ObjectId
import com.softwaremill.codebrag.cache.UserReviewedCommitsCache
/**
* Handles user activity when user wants to mark given commit as reviewed
*/
class ReviewCommitUseCase(
commitDao: CommitInfoDAO,
reviewedCommitsCache: UserReviewedCommitsCache,
eventBus: EventBus) (implicit clock: Clock) extends Logging {
type ReviewCommitResult = Either[String, Unit]
def execute(repoName: String, sha: String, userId: ObjectId): ReviewCommitResult = {
commitDao.findBySha(repoName, sha) match {
case Some(commit) => Right(review(userId, commit))
case None => Left("Cannot find commit to review")
}
}
private def review(userId: ObjectId, commit: CommitInfo) = {
val reviewedCommit = ReviewedCommit(commit.sha, userId, commit.repoName, clock.nowUtc)
reviewedCommitsCache.markCommitAsReviewed(reviewedCommit)
eventBus.publish(CommitReviewedEvent(commit, userId))
}
}
| softwaremill/codebrag | codebrag-service/src/main/scala/com/softwaremill/codebrag/usecases/reactions/ReviewCommitUseCase.scala | Scala | agpl-3.0 | 1,297 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.data
import java.util.Date
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.{DataStoreFinder, Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.hbase.data.HBaseDataStoreParams.{ConnectionParam, HBaseCatalogParam}
import org.locationtech.geomesa.index.conf.QueryHints.{BIN_BATCH_SIZE, BIN_LABEL, BIN_SORT, BIN_TRACK}
import org.locationtech.geomesa.index.conf.QueryProperties
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.BIN_ATTRIBUTE_INDEX
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.{FeatureUtils, SimpleFeatureTypes}
import org.locationtech.geomesa.utils.io.WithClose
import org.opengis.feature.simple.SimpleFeature
class HBaseS3IndexTest extends HBaseTest with LazyLogging {
import scala.collection.JavaConverters._
sequential
step {
logger.info("Starting the HBase S3 Test")
}
"S3Index" should {
"work with HBase" in {
val typeName = "testS3"
val params = Map(ConnectionParam.getName -> connection, HBaseCatalogParam.getName -> catalogTableName)
val ds = DataStoreFinder.getDataStore(params.asJava).asInstanceOf[HBaseDataStore]
ds must not(beNull)
try {
ds.getSchema(typeName) must beNull
ds.createSchema(SimpleFeatureTypes.createType(typeName,
"name:String,track:String,dtg:Date,*geom:Point:srid=4326;geomesa.indices.enabled=s3:geom:dtg"))
val sft = ds.getSchema(typeName)
val features =
(0 until 10).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track1", s"2010-05-07T0$i:00:00.000Z", s"POINT(4$i 60)")
} ++ (10 until 20).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track2", s"2010-05-${i}T$i:00:00.000Z", s"POINT(4${i - 10} 60)")
} ++ (20 until 30).map { i =>
ScalaSimpleFeature.create(sft, s"$i", s"name$i", "track3", s"2010-05-${i}T${i-10}:00:00.000Z", s"POINT(6${i - 20} 60)")
}
WithClose(ds.getFeatureWriterAppend(typeName, Transaction.AUTO_COMMIT)) { writer =>
features.foreach(f => FeatureUtils.write(writer, f, useProvidedFid = true))
}
def runQuery(query: Query): Seq[SimpleFeature] =
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
{ // return all features for inclusive filter
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(10)
features.map(_.getID.toInt) must containTheSameElementsAs(0 to 9)
}
{ // return some features for exclusive geom filter
val filter = "bbox(geom, 38, 59, 45, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(6)
features.map(_.getID.toInt) must containTheSameElementsAs(0 to 5)
}
{ // return some features for exclusive date filter
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
}
{ // work with whole world filter
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T05:00:00.000Z' and '2010-05-07T08:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(5 to 8)
}
{ // work across week bounds
val filter = "bbox(geom, 45, 59, 51, 61)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-21T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(9)
features.map(_.getID.toInt) must containTheSameElementsAs((6 to 9) ++ (15 to 19))
}
{ // work across 2 weeks
val filter = "bbox(geom, 44.5, 59, 50, 61)" +
" AND dtg between '2010-05-10T00:00:00.000Z' and '2010-05-17T23:59:59.999Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(3)
features.map(_.getID.toInt) must containTheSameElementsAs(15 to 17)
}
{ // work with whole world filter across week bounds
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-21T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(15)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 20)
}
{ // work with whole world filter across 3 week periods
val filter = "bbox(geom, -180, -90, 180, 90)" +
" AND dtg between '2010-05-08T06:00:00.000Z' and '2010-05-30T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(20)
features.map(_.getID.toInt) must containTheSameElementsAs(10 to 29)
}
{ // work with small bboxes and date ranges
val filter = "bbox(geom, 40.999, 59.999, 41.001, 60.001)" +
" AND dtg between '2010-05-07T00:59:00.000Z' and '2010-05-07T01:01:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(1)
features.head.getID.toInt mustEqual 1
}
{ // support AND'ed GT/LT for dates
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg >= '2010-05-07T06:00:00.000Z' AND dtg <= '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
}
{ // apply secondary filters
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T05:00:00.000Z' and '2010-05-07T10:00:00.000Z'" +
" AND name = 'name8'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter)))
features must haveSize(1)
features.map(_.getID.toInt) must containTheSameElementsAs(Seq(8))
}
{ // apply transforms
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter), Array("name")))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
forall(features) { f =>
f.getAttributeCount mustEqual 1
f.getAttribute("name") must not(beNull)
}
}
{ // apply functional transforms
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T06:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val features = runQuery(new Query(sft.getTypeName, ECQL.toFilter(filter), Array("derived=strConcat('my', name)")))
features must haveSize(4)
features.map(_.getID.toInt) must containTheSameElementsAs(6 to 9)
forall(features) { f =>
f.getAttributeCount mustEqual 1
f.getAttribute("derived").asInstanceOf[String] must beMatching("myname\\\\d")
}
}
{ // optimize for bin format
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
// reduce our scan ranges so that we get fewer iterator instances and some aggregation
QueryProperties.ScanRangesTarget.threadLocalValue.set("1")
val aggregates = try {
// the same simple feature gets reused - so make sure you access in serial order
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]])
.toList
} finally {
QueryProperties.ScanRangesTarget.threadLocalValue.remove()
}
aggregates.size must beLessThan(10) // ensure some aggregation was done
val bin = aggregates.flatMap(a => a.grouped(16).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
bin.map(_.trackId) must containAllOf((0 until 10).map(i => s"name$i".hashCode))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
forall(bin.map(_.lat))(_ mustEqual 60.0)
bin.map(_.lon) must containAllOf((0 until 10).map(_ + 40.0f))
}
{ // optimize for bin format with sorting
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
query.getHints.put(BIN_SORT, true)
// reduce our scan ranges so that we get fewer iterator instances and some aggregation
QueryProperties.ScanRangesTarget.threadLocalValue.set("1")
val aggregates = try {
// the same simple feature gets reused - so make sure you access in serial order
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]])
.toList
} finally {
QueryProperties.ScanRangesTarget.threadLocalValue.remove()
}
aggregates.size must beLessThan(10) // ensure some aggregation was done
forall(aggregates) { a =>
val window = a.grouped(16).map(BinaryOutputEncoder.decode(_).dtg).sliding(2).filter(_.length > 1)
forall(window.toSeq)(w => w.head must beLessThanOrEqualTo(w(1)))
}
val bin = aggregates.flatMap(a => a.grouped(16).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
bin.map(_.trackId) must containAllOf((0 until 10).map(i => s"name$i".hashCode))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
forall(bin.map(_.lat))(_ mustEqual 60.0)
bin.map(_.lon) must containAllOf((0 until 10).map(_ + 40.0f))
}
{ // optimize for bin format with label
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_LABEL, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
// reduce our scan ranges so that we get fewer iterator instances and some aggregation
QueryProperties.ScanRangesTarget.threadLocalValue.set("1")
val aggregates = try {
// the same simple feature gets reused - so make sure you access in serial order
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]])
.toList
} finally {
QueryProperties.ScanRangesTarget.threadLocalValue.remove()
}
aggregates.size must beLessThan(10) // ensure some aggregation was done
val bin = aggregates.flatMap(a => a.grouped(24).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
bin.map(_.trackId) must containAllOf((0 until 10).map(i => s"name$i".hashCode))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
forall(bin.map(_.lat))(_ mustEqual 60.0)
bin.map(_.lon) must containAllOf((0 until 10).map(_ + 40.0f))
bin.map(_.label) must containAllOf((0 until 10).map(i => BinaryOutputEncoder.convertToLabel(s"name$i")))
}
{ // optimize for bin format with transforms
val filter = "bbox(geom, 38, 59, 51, 61)" +
" AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-07T12:00:00.000Z'"
val query = new Query(sft.getTypeName, ECQL.toFilter(filter), Array("name", "geom"))
query.getHints.put(BIN_TRACK, "name")
query.getHints.put(BIN_BATCH_SIZE, 100)
// reduce our scan ranges so that we get fewer iterator instances and some aggregation
QueryProperties.ScanRangesTarget.threadLocalValue.set("1")
val aggregates = try {
// the same simple feature gets reused - so make sure you access in serial order
SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
.map(f => f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]])
.toList
} finally {
QueryProperties.ScanRangesTarget.threadLocalValue.remove()
}
aggregates.size must beLessThan(10) // ensure some aggregation was done
val bin = aggregates.flatMap(a => a.grouped(16).map(BinaryOutputEncoder.decode))
bin must haveSize(10)
bin.map(_.trackId) must containAllOf((0 until 10).map(i => s"name$i".hashCode))
bin.map(_.dtg) must
containAllOf((0 until 10).map(i => features(i).getAttribute("dtg").asInstanceOf[Date].getTime))
forall(bin.map(_.lat))(_ mustEqual 60.0)
bin.map(_.lon) must containAllOf((0 until 10).map(_ + 40.0f))
}
} finally {
ds.dispose()
}
}
}
}
| elahrvivaz/geomesa | geomesa-hbase/geomesa-hbase-datastore/src/test/scala/org/locationtech/geomesa/hbase/data/HBaseS3IndexTest.scala | Scala | apache-2.0 | 15,292 |
package controllers.sitedata.table
import javax.inject._
import play.api._
import play.api.mvc._
import play.api.data.Form
import play.api.data.Forms._
import play.api.data._
import models.sitedata.SiteInfo
import models.sitedata.SiteInfoDetail
import models.sitedata.SubZone
import models.sitedata.Zone
import models.sitedata.EquipmentModel
import models.sitedata.EquipmentName
import models.sitedata.Petname
import models.sitedata.Modality
import play.api.i18n.Messages
import play.api.i18n.I18nSupport
import play.api.i18n.MessagesApi
import services.sitedata.ISiteInfoService
import services.sitedata.ISiteInfoDetailService
import services.sitedata.ISubZoneService
import services.sitedata.IZoneService
import services.sitedata.IEquipmentModelService
import services.sitedata.IEquipmentNameService
import services.sitedata.IPetnameService
import services.sitedata.IModalityService
import play.Application
import utils.Awaits
import play.api.libs.iteratee.Enumerator
import reports.ReportBuilder
import play.api.Configuration
@Singleton
class PetnameController @Inject() (
val messagesApi: MessagesApi,
val applicationconf: Configuration,
val service_siteinfo: ISiteInfoService,
val service_siteinfodetail: ISiteInfoDetailService,
val service_zone: IZoneService,
val service_subzone: ISubZoneService,
val service_equipmentmodel: IEquipmentModelService,
val service_equipmentname: IEquipmentNameService,
val service_petname: IPetnameService,
val service_modality: IModalityService) extends Controller with I18nSupport {
val petnameForm: Form[Petname] = Form(
mapping(
"id" -> longNumber,
"name" -> text,
"modalityid" -> longNumber,
"spmodalitydir" -> text
)(models.sitedata.Petname.apply)(models.sitedata.Petname.unapply))
def index = Action { implicit request =>
Logger.info("/petname -> PetnameController index called.")
val petname = Awaits.get(5, service_petname.findAll()).getOrElse(Seq())
Ok(views.html.sitedata.petname.index(petname))
}
def blank = Action { implicit request =>
Logger.info("blank called. ")
Ok(views.html.sitedata.petname.create(petnameForm))
}
def details(id: Long) = Action { implicit request =>
Logger.info("details called. id: " + id)
val petname = Awaits.get(5, service_petname.findById(id)).get
Ok(views.html.sitedata.petname.details(petname))
}
def insert()= Action { implicit request =>
Logger.info("insert called.")
petnameForm.bindFromRequest.fold(
form => {
BadRequest(views.html.sitedata.petname.insert(form))
},
petname => {
service_petname.insert(petname)
Redirect(controllers.sitedata.table.routes.PetnameController.index)
.flashing("success" -> Messages("success.insert", "new petname created"))
})
}
def update(id: Long) = Action { implicit request =>
Logger.info("updated called. id: " + id)
petnameForm.bindFromRequest.fold(
form => {
Ok(views.html.sitedata.petname.update(form))
.flashing("error" -> "Fix the errors!")
},
petname => {
service_petname.update(id, petname)
Redirect(controllers.sitedata.table.routes.PetnameController.index)
.flashing("success" -> Messages("success.update", petname.name))
})
}
def remove(id: Long) = Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val result = Awaits.get(5, service_petname.findById(id))
result.map { petname =>
service_petname.remove(id)
Redirect(controllers.sitedata.table.routes.PetnameController.index)
.flashing("success" -> Messages("success.delete", petname.name))
}.getOrElse(NotFound)
}
def report() = Action {
import play.api.libs.concurrent.Execution.Implicits.defaultContext
val url = applicationconf.getString("slick.dbs.SiteData.db.url").getOrElse("None")
Ok.chunked( Enumerator.fromStream( ReportBuilder.toPdf("Petname.jrxml", url) ) )
.withHeaders(CONTENT_TYPE -> "application/octet-stream")
.withHeaders(CONTENT_DISPOSITION -> "attachment; filename=report-petname.pdf"
)
}
}
| tnddn/iv-web | portal/rest-portal/app/controllers/sitedata/table/PetnameController.scala | Scala | apache-2.0 | 4,195 |
package breeze.stats
package distributions
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import math._
import breeze.numerics.{lgamma,digamma}
import breeze.linalg._
import breeze.optimize._
import breeze.numerics
/**
* The Beta distribution, which is the conjugate prior for the Bernoulli distribution
*
* @author dlwh
* @param a the number of pseudo-observations for true
* @param b the number of pseudo-observations for false
*/
class Beta(a: Double, b: Double)(implicit rand: RandBasis = Rand) extends ContinuousDistr[Double] with Moments[Double, Double] {
require(a > 0.0)
require(b > 0.0)
override def unnormalizedLogPdf(x: Double) = {
require(x >= 0)
require(x <= 1)
(a-1) * log(x) + (b-1) * log(1-x)
}
val logNormalizer = lgamma(a) + lgamma(b) - lgamma(a+b)
private val aGamma = new Gamma(a,1)(rand)
private val bGamma = new Gamma(b,1)(rand)
override def draw():Double = {
// from tjhunter, a corrected version of numpy's rk_beta sampling in mtrand/distributions.c
if(a <= .5 && b <= .5) {
while (true) {
val U = rand.uniform.draw()
val V = rand.uniform.draw()
if (U > 0 && V > 0) {
// Performing the computations in the log-domain
// The exponentiation may fail if a or b are really small
// val X = math.pow(U, 1.0 / a)
val logX = math.log(U) / a
// val Y = math.pow(V, 1.0 / b)
val logY= math.log(V) / b
val logSum = numerics.logSum(logX, logY)
if (logSum <= 0.0) {
return math.exp(logX - logSum)
}
} else {
throw new RuntimeException("Underflow!")
}
}
throw new RuntimeException("Shouldn't be here.")
} else if(a <= 1 && b <= 1) {
while (true) {
val U = rand.uniform.draw()
val V = rand.uniform.draw()
if (U > 0 && V > 0) {
// Performing the computations in the log-domain
// The exponentiation may fail if a or b are really small
val X = math.pow(U, 1.0 / a)
val Y = math.pow(V, 1.0 / b)
val sum = X + Y
if (sum <= 1.0) {
return X / sum
}
} else {
throw new RuntimeException("Underflow!")
}
}
throw new RuntimeException("Shouldn't be here.")
} else {
val ad = aGamma.draw()
val bd = bGamma.draw()
ad / (ad + bd)
}
}
def mean = a / (a + b)
def variance = (a * b) / ( (a + b) * (a+b) * (a+b+1))
def mode = (a - 1) / (a+b - 2)
def entropy = logNormalizer - (a - 1) * digamma(a) - (b-1) * digamma(b) + (a + b - 2) * digamma(a + b)
}
object Beta extends ExponentialFamily[Beta,Double] {
type Parameter = (Double,Double)
case class SufficientStatistic(n: Double, meanLog: Double, meanLog1M: Double) extends distributions.SufficientStatistic[SufficientStatistic] {
def *(weight: Double) = SufficientStatistic(n*weight,meanLog, meanLog1M)
def +(t: SufficientStatistic) = {
val delta = t.meanLog - meanLog
val newMeanLog = meanLog + delta * (t.n /(t.n + n))
val logDelta = t.meanLog1M - meanLog1M
val newMeanLog1M = meanLog1M + logDelta * (t.n /(t.n + n))
SufficientStatistic(n+t.n, newMeanLog, newMeanLog1M)
}
}
def emptySufficientStatistic = SufficientStatistic(0,0,0)
def sufficientStatisticFor(t: Double) = SufficientStatistic(1,math.log(t),math.log1p(-t))
def mle(stats: SufficientStatistic): (Double, Double) = {
import breeze.linalg.DenseVector.TupleIsomorphisms._
val lensed = likelihoodFunction(stats).throughLens[DenseVector[Double]]
val startingA = stats.meanLog.abs // MoM would include variance, meh.
val startingB = stats.meanLog1M.abs // MoM would include variance, meh
val result = minimize(lensed,DenseVector(startingA,startingB))
val res@(a,b) = (result(0),result(1))
res
}
def distribution(ab: Parameter) = new Beta(ab._1,ab._2)
def likelihoodFunction(stats: SufficientStatistic):DiffFunction[(Double,Double)] = new DiffFunction[(Double,Double)]{
import stats.n
def calculate(x: (Double, Double)) = {
val (a,b) = x
if(a < 0 || b < 0) (Double.PositiveInfinity,(0.0,0.0))
else {
val obj = n * (lgamma(a) + lgamma(b) - lgamma(a+b) - (a-1)*stats.meanLog - (b-1) *stats.meanLog1M)
val gradA = n * (digamma(a) - digamma(a+b) - stats.meanLog)
val gradB = n * (digamma(b) - digamma(a+b) - stats.meanLog1M)
(obj,(gradA,gradB))
}
}
}
}
| ktakagaki/breeze | src/main/scala/breeze/stats/distributions/Beta.scala | Scala | apache-2.0 | 5,080 |
/**
* Created by jdrews on 2/21/2015.
*
* Represents a log message.
*/
package com.jdrews.logstation.webserver
case class LogMessage(logMessage: String, logFile: String)
| jdrews/logstation | src/main/scala/com/jdrews/logstation/webserver/LogMessage.scala | Scala | apache-2.0 | 177 |
package se.culvertsoft.mgen.visualdesigner.view
import java.awt.BasicStroke
import java.awt.Color
import java.awt.Graphics2D
import java.awt.Point
import java.awt.Stroke
object Graphics2DOps {
implicit class RichGraphics2D(g: Graphics2D) {
def color(c: Color)(f: => Unit) {
val prevColor = g.getColor()
g.setColor(c)
f
g.setColor(prevColor)
}
def stroke(s: Stroke)(f: => Unit) {
val prevStroke = g.getStroke()
g.setStroke(s)
f
g.setStroke(prevStroke)
}
def transl(x: Int, y: Int)(f: => Unit) {
g.translate(x, y)
f
g.translate(-x, -y)
}
def transl(p: Point)(f: => Unit) {
transl(p.x, p.y)(f)
}
def lineWidth(lw: Int)(f: => Unit) {
stroke(new BasicStroke(lw))(f)
}
}
} | culvertsoft/mgen-visualdesigner | src/main/scala/se/culvertsoft/mgen/visualdesigner/view/Graphics2DOps.scala | Scala | gpl-2.0 | 859 |
import scala.collection.immutable._
case class Scrabble() {
private val letterMap = HashMap((List('A', 'E', 'I', 'O', 'U', 'L', 'N', 'R',
'S', 'T').zip(List.fill(10)(1)) ++
List('D', 'G').zip(List.fill(2)(2)) ++
List('B', 'C', 'M', 'P').zip(List.fill(4)(3)) ++
List('F', 'H', 'V', 'W', 'Y').zip(List.fill(5)(4)) ++
List('K').zip(List.fill(1)(5)) ++
List('J', 'X').zip(List.fill(2)(8)) ++
List('Q', 'Z').zip(List.fill(2)(10)): _*))
def scoreLetter(ch: Char): Int = letterMap.get(ch.toUpper).getOrElse(0)
def scoreWord(word: String): Int = word.map(ch => scoreLetter(ch)).sum
}
| stanciua/exercism | scala/scrabble-score/src/main/scala/ScrabbleScore.scala | Scala | mit | 601 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.metric
import java.io.File
import scala.reflect.{classTag, ClassTag}
import scala.util.Random
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.expressions.aggregate.{Final, Partial}
import org.apache.spark.sql.catalyst.plans.logical.LocalRelation
import org.apache.spark.sql.execution.{FilterExec, RangeExec, SparkPlan, WholeStageCodegenExec}
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.util.{AccumulatorContext, JsonProtocol}
class SQLMetricsSuite extends SharedSparkSession with SQLMetricsTestUtils {
import testImplicits._
/**
* Generates a `DataFrame` by filling randomly generated bytes for hash collision.
*/
private def generateRandomBytesDF(numRows: Int = 65535): DataFrame = {
val random = new Random()
val manyBytes = (0 until numRows).map { _ =>
val byteArrSize = random.nextInt(100)
val bytes = new Array[Byte](byteArrSize)
random.nextBytes(bytes)
(bytes, random.nextInt(100))
}
manyBytes.toSeq.toDF("a", "b")
}
test("LocalTableScanExec computes metrics in collect and take") {
val df1 = spark.createDataset(Seq(1, 2, 3))
val logical = df1.queryExecution.logical
require(logical.isInstanceOf[LocalRelation])
df1.collect()
val metrics1 = df1.queryExecution.executedPlan.collectLeaves().head.metrics
assert(metrics1.contains("numOutputRows"))
assert(metrics1("numOutputRows").value === 3)
val df2 = spark.createDataset(Seq(1, 2, 3)).limit(2)
df2.collect()
val metrics2 = df2.queryExecution.executedPlan.collectLeaves().head.metrics
assert(metrics2.contains("numOutputRows"))
assert(metrics2("numOutputRows").value === 2)
}
test("Filter metrics") {
// Assume the execution plan is
// PhysicalRDD(nodeId = 1) -> Filter(nodeId = 0)
val df = person.filter('age < 25)
testSparkPlanMetrics(df, 1, Map(
0L -> (("Filter", Map(
"number of output rows" -> 1L))))
)
}
test("WholeStageCodegen metrics") {
// Assume the execution plan with node id is
// WholeStageCodegen(nodeId = 0)
// Filter(nodeId = 1)
// Range(nodeId = 2)
// TODO: update metrics in generated operators
val ds = spark.range(10).filter('id < 5)
testSparkPlanMetricsWithPredicates(ds.toDF(), 1, Map(
0L -> (("WholeStageCodegen", Map(
"duration total (min, med, max)" -> {_.toString.matches(timingMetricPattern)})))
), true)
}
test("Aggregate metrics") {
// Assume the execution plan is
// ... -> HashAggregate(nodeId = 2) -> Exchange(nodeId = 1)
// -> HashAggregate(nodeId = 0)
val df = testData2.groupBy().count() // 2 partitions
val expected1 = Seq(
Map("number of output rows" -> 2L,
"avg hash probe bucket list iters (min, med, max)" -> "\n(1, 1, 1)"),
Map("number of output rows" -> 1L,
"avg hash probe bucket list iters (min, med, max)" -> "\n(1, 1, 1)"))
val shuffleExpected1 = Map(
"records read" -> 2L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L,
"shuffle records written" -> 2L)
testSparkPlanMetrics(df, 1, Map(
2L -> (("HashAggregate", expected1(0))),
1L -> (("Exchange", shuffleExpected1)),
0L -> (("HashAggregate", expected1(1))))
)
// 2 partitions and each partition contains 2 keys
val df2 = testData2.groupBy('a).count()
val expected2 = Seq(
Map("number of output rows" -> 4L,
"avg hash probe bucket list iters (min, med, max)" -> "\n(1, 1, 1)"),
Map("number of output rows" -> 3L,
"avg hash probe bucket list iters (min, med, max)" -> "\n(1, 1, 1)"))
val shuffleExpected2 = Map(
"records read" -> 4L,
"local blocks read" -> 4L,
"remote blocks read" -> 0L,
"shuffle records written" -> 4L)
testSparkPlanMetrics(df2, 1, Map(
2L -> (("HashAggregate", expected2(0))),
1L -> (("Exchange", shuffleExpected2)),
0L -> (("HashAggregate", expected2(1))))
)
}
test("Aggregate metrics: track avg probe") {
// The executed plan looks like:
// HashAggregate(keys=[a#61], functions=[count(1)], output=[a#61, count#71L])
// +- Exchange hashpartitioning(a#61, 5)
// +- HashAggregate(keys=[a#61], functions=[partial_count(1)], output=[a#61, count#76L])
// +- Exchange RoundRobinPartitioning(1)
// +- LocalTableScan [a#61]
//
// Assume the execution plan with node id is:
// Wholestage disabled:
// HashAggregate(nodeId = 0)
// Exchange(nodeId = 1)
// HashAggregate(nodeId = 2)
// Exchange (nodeId = 3)
// LocalTableScan(nodeId = 4)
//
// Wholestage enabled:
// WholeStageCodegen(nodeId = 0)
// HashAggregate(nodeId = 1)
// Exchange(nodeId = 2)
// WholeStageCodegen(nodeId = 3)
// HashAggregate(nodeId = 4)
// Exchange(nodeId = 5)
// LocalTableScan(nodeId = 6)
Seq(true, false).foreach { enableWholeStage =>
val df = generateRandomBytesDF().repartition(1).groupBy('a).count()
val nodeIds = if (enableWholeStage) {
Set(4L, 1L)
} else {
Set(2L, 0L)
}
val metrics = getSparkPlanMetrics(df, 1, nodeIds, enableWholeStage).get
nodeIds.foreach { nodeId =>
val probes = metrics(nodeId)._2("avg hash probe bucket list iters (min, med, max)")
probes.toString.stripPrefix("\n(").stripSuffix(")").split(", ").foreach { probe =>
assert(probe.toDouble > 1.0)
}
}
}
}
test("ObjectHashAggregate metrics") {
// Assume the execution plan is
// ... -> ObjectHashAggregate(nodeId = 2) -> Exchange(nodeId = 1)
// -> ObjectHashAggregate(nodeId = 0)
val df = testData2.groupBy().agg(collect_set('a)) // 2 partitions
testSparkPlanMetrics(df, 1, Map(
2L -> (("ObjectHashAggregate", Map("number of output rows" -> 2L))),
1L -> (("Exchange", Map(
"shuffle records written" -> 2L,
"records read" -> 2L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L))),
0L -> (("ObjectHashAggregate", Map("number of output rows" -> 1L))))
)
// 2 partitions and each partition contains 2 keys
val df2 = testData2.groupBy('a).agg(collect_set('a))
testSparkPlanMetrics(df2, 1, Map(
2L -> (("ObjectHashAggregate", Map("number of output rows" -> 4L))),
1L -> (("Exchange", Map(
"shuffle records written" -> 4L,
"records read" -> 4L,
"local blocks read" -> 4L,
"remote blocks read" -> 0L))),
0L -> (("ObjectHashAggregate", Map("number of output rows" -> 3L))))
)
}
test("Sort metrics") {
// Assume the execution plan with node id is
// Sort(nodeId = 0)
// Exchange(nodeId = 1)
// Project(nodeId = 2)
// LocalTableScan(nodeId = 3)
// Because of SPARK-25267, ConvertToLocalRelation is disabled in the test cases of sql/core,
// so Project here is not collapsed into LocalTableScan.
val df = Seq(1, 3, 2).toDF("id").sort('id)
testSparkPlanMetricsWithPredicates(df, 2, Map(
0L -> (("Sort", Map(
"sort time total (min, med, max)" -> {_.toString.matches(timingMetricPattern)},
"peak memory total (min, med, max)" -> {_.toString.matches(sizeMetricPattern)},
"spill size total (min, med, max)" -> {_.toString.matches(sizeMetricPattern)})))
))
}
test("SortMergeJoin metrics") {
// Because SortMergeJoin may skip different rows if the number of partitions is different, this
// test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 4 because we only read 3 rows in the first partition and 1 row in the second one
"number of output rows" -> 4L))),
2L -> (("Exchange", Map(
"records read" -> 4L,
"local blocks read" -> 2L,
"remote blocks read" -> 0L,
"shuffle records written" -> 2L))))
)
}
}
test("SortMergeJoin(outer) metrics") {
// Because SortMergeJoin may skip different rows if the number of partitions is different,
// this test should use the deterministic number of partitions.
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> SortMergeJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 8 because we read 6 rows in the left and 2 row in the right one
"number of output rows" -> 8L))))
)
val df2 = spark.sql(
"SELECT * FROM testDataForJoin right JOIN testData2 ON testData2.a = testDataForJoin.a")
testSparkPlanMetrics(df2, 1, Map(
0L -> (("SortMergeJoin", Map(
// It's 8 because we read 6 rows in the left and 2 row in the right one
"number of output rows" -> 8L))))
)
}
}
test("BroadcastHashJoin metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = df1.join(broadcast(df2), "key")
testSparkPlanMetrics(df, 2, Map(
1L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 2L))))
)
}
test("ShuffledHashJoin metrics") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "40",
SQLConf.SHUFFLE_PARTITIONS.key -> "2",
SQLConf.PREFER_SORTMERGEJOIN.key -> "false") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = (1 to 10).map(i => (i, i.toString)).toSeq.toDF("key", "value")
// Assume the execution plan is
// Project(nodeId = 0)
// +- ShuffledHashJoin(nodeId = 1)
// :- Exchange(nodeId = 2)
// : +- Project(nodeId = 3)
// : +- LocalTableScan(nodeId = 4)
// +- Exchange(nodeId = 5)
// +- Project(nodeId = 6)
// +- LocalTableScan(nodeId = 7)
val df = df1.join(df2, "key")
testSparkPlanMetrics(df, 1, Map(
1L -> (("ShuffledHashJoin", Map(
"number of output rows" -> 2L))),
2L -> (("Exchange", Map(
"shuffle records written" -> 2L,
"records read" -> 2L))),
5L -> (("Exchange", Map(
"shuffle records written" -> 10L,
"records read" -> 10L))))
)
}
}
test("BroadcastHashJoin(outer) metrics") {
val df1 = Seq((1, "a"), (1, "b"), (4, "c")).toDF("key", "value")
val df2 = Seq((1, "a"), (1, "b"), (2, "c"), (3, "d")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 0)
val df = df1.join(broadcast(df2), $"key" === $"key2", "left_outer")
testSparkPlanMetrics(df, 2, Map(
0L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 5L))))
)
val df3 = df1.join(broadcast(df2), $"key" === $"key2", "right_outer")
testSparkPlanMetrics(df3, 2, Map(
0L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 6L))))
)
}
test("BroadcastNestedLoopJoin metrics") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> BroadcastNestedLoopJoin(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 left JOIN testDataForJoin ON " +
"testData2.a * testDataForJoin.a != testData2.a + testDataForJoin.a")
testSparkPlanMetrics(df, 3, Map(
1L -> (("BroadcastNestedLoopJoin", Map(
"number of output rows" -> 12L))))
)
}
}
}
test("BroadcastLeftSemiJoinHash metrics") {
val df1 = Seq((1, "1"), (2, "2")).toDF("key", "value")
val df2 = Seq((1, "1"), (2, "2"), (3, "3"), (4, "4")).toDF("key2", "value")
// Assume the execution plan is
// ... -> BroadcastHashJoin(nodeId = 1)
val df = df1.join(broadcast(df2), $"key" === $"key2", "leftsemi")
testSparkPlanMetrics(df, 2, Map(
1L -> (("BroadcastHashJoin", Map(
"number of output rows" -> 2L))))
)
}
test("CartesianProduct metrics") {
withSQLConf(SQLConf.CROSS_JOINS_ENABLED.key -> "true") {
val testDataForJoin = testData2.filter('a < 2) // TestData2(1, 1) :: TestData2(1, 2)
testDataForJoin.createOrReplaceTempView("testDataForJoin")
withTempView("testDataForJoin") {
// Assume the execution plan is
// ... -> CartesianProduct(nodeId = 1) -> TungstenProject(nodeId = 0)
val df = spark.sql(
"SELECT * FROM testData2 JOIN testDataForJoin")
testSparkPlanMetrics(df, 1, Map(
0L -> (("CartesianProduct", Map("number of output rows" -> 12L))))
)
}
}
}
test("SortMergeJoin(left-anti) metrics") {
val anti = testData2.filter("a > 2")
withTempView("antiData") {
anti.createOrReplaceTempView("antiData")
val df = spark.sql(
"SELECT * FROM testData2 ANTI JOIN antiData ON testData2.a = antiData.a")
testSparkPlanMetrics(df, 1, Map(
0L -> (("SortMergeJoin", Map("number of output rows" -> 4L))))
)
}
}
test("save metrics") {
withTempPath { file =>
// person creates a temporary view. get the DF before listing previous execution IDs
val data = person.select('name)
val previousExecutionIds = currentExecutionIds()
// Assume the execution plan is
// PhysicalRDD(nodeId = 0)
data.write.format("json").save(file.getAbsolutePath)
sparkContext.listenerBus.waitUntilEmpty(10000)
val executionIds = currentExecutionIds().diff(previousExecutionIds)
assert(executionIds.size === 1)
val executionId = executionIds.head
val jobs = statusStore.execution(executionId).get.jobs
// Use "<=" because there is a race condition that we may miss some jobs
// TODO Change "<=" to "=" once we fix the race condition that missing the JobStarted event.
assert(jobs.size <= 1)
val metricValues = statusStore.executionMetrics(executionId)
// Because "save" will create a new DataFrame internally, we cannot get the real metric id.
// However, we still can check the value.
assert(metricValues.values.toSeq.exists(_ === "2"))
}
}
test("metrics can be loaded by history server") {
val metric = SQLMetrics.createMetric(sparkContext, "zanzibar")
metric += 10L
val metricInfo = metric.toInfo(Some(metric.value), None)
metricInfo.update match {
case Some(v: Long) => assert(v === 10L)
case Some(v) => fail(s"metric value was not a Long: ${v.getClass.getName}")
case _ => fail("metric update is missing")
}
assert(metricInfo.metadata === Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
// After serializing to JSON, the original value type is lost, but we can still
// identify that it's a SQL metric from the metadata
val metricInfoJson = JsonProtocol.accumulableInfoToJson(metricInfo)
val metricInfoDeser = JsonProtocol.accumulableInfoFromJson(metricInfoJson)
metricInfoDeser.update match {
case Some(v: String) => assert(v.toLong === 10L)
case Some(v) => fail(s"deserialized metric value was not a string: ${v.getClass.getName}")
case _ => fail("deserialized metric update is missing")
}
assert(metricInfoDeser.metadata === Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER))
}
test("range metrics") {
val res1 = InputOutputMetricsHelper.run(
spark.range(30).filter(x => x % 3 == 0).toDF()
)
assert(res1 === (30L, 0L, 30L) :: Nil)
val res2 = InputOutputMetricsHelper.run(
spark.range(150).repartition(4).filter(x => x < 10).toDF()
)
assert(res2 === (150L, 0L, 150L) :: (0L, 150L, 10L) :: Nil)
// TODO: test file source V2 as well when its statistics is correctly computed.
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
withTempDir { tempDir =>
val dir = new File(tempDir, "pqS").getCanonicalPath
spark.range(10).write.parquet(dir)
spark.read.parquet(dir).createOrReplaceTempView("pqS")
// The executed plan looks like:
// Exchange RoundRobinPartitioning(2)
// +- BroadcastNestedLoopJoin BuildLeft, Cross
// :- BroadcastExchange IdentityBroadcastMode
// : +- Exchange RoundRobinPartitioning(3)
// : +- *Range (0, 30, step=1, splits=2)
// +- *FileScan parquet [id#465L] Batched: true, Format: Parquet, Location: ...(ignored)
val res3 = InputOutputMetricsHelper.run(
spark.range(30).repartition(3).crossJoin(sql("select * from pqS")).repartition(2).toDF()
)
// The query above is executed in the following stages:
// 1. range(30) => (30, 0, 30)
// 2. sql("select * from pqS") => (0, 30, 0)
// 3. crossJoin(...) of 1. and 2. => (10, 0, 300)
// 4. shuffle & return results => (0, 300, 0)
assert(res3 === (30L, 0L, 30L) :: (0L, 30L, 0L) :: (10L, 0L, 300L) :: (0L, 300L, 0L) :: Nil)
}
}
}
test("SPARK-25278: output metrics are wrong for plans repeated in the query") {
val name = "demo_view"
withView(name) {
sql(s"CREATE OR REPLACE VIEW $name AS VALUES 1,2")
val view = spark.table(name)
val union = view.union(view)
testSparkPlanMetrics(union, 1, Map(
0L -> ("Union" -> Map()),
1L -> ("LocalTableScan" -> Map("number of output rows" -> 2L)),
2L -> ("LocalTableScan" -> Map("number of output rows" -> 2L))))
}
}
test("writing data out metrics: parquet") {
testMetricsNonDynamicPartition("parquet", "t1")
}
test("writing data out metrics with dynamic partition: parquet") {
testMetricsDynamicPartition("parquet", "parquet", "t1")
}
private def collectNodeWithinWholeStage[T <: SparkPlan : ClassTag](plan: SparkPlan): Seq[T] = {
val stages = plan.collect {
case w: WholeStageCodegenExec => w
}
assert(stages.length == 1, "The query plan should have one and only one whole-stage.")
val cls = classTag[T].runtimeClass
stages.head.collect {
case n if n.getClass == cls => n.asInstanceOf[T]
}
}
test("SPARK-25602: SparkPlan.getByteArrayRdd should not consume the input when not necessary") {
def checkFilterAndRangeMetrics(
df: DataFrame,
filterNumOutputs: Int,
rangeNumOutputs: Int): Unit = {
val plan = df.queryExecution.executedPlan
val filters = collectNodeWithinWholeStage[FilterExec](plan)
assert(filters.length == 1, "The query plan should have one and only one Filter")
assert(filters.head.metrics("numOutputRows").value == filterNumOutputs)
val ranges = collectNodeWithinWholeStage[RangeExec](plan)
assert(ranges.length == 1, "The query plan should have one and only one Range")
assert(ranges.head.metrics("numOutputRows").value == rangeNumOutputs)
}
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
val df = spark.range(0, 3000, 1, 2).toDF().filter('id % 3 === 0)
df.collect()
checkFilterAndRangeMetrics(df, filterNumOutputs = 1000, rangeNumOutputs = 3000)
df.queryExecution.executedPlan.foreach(_.resetMetrics())
// For each partition, we get 2 rows. Then the Filter should produce 2 rows per-partition,
// and Range should produce 4 rows per-partition ([0, 1, 2, 3] and [15, 16, 17, 18]). Totally
// Filter produces 4 rows, and Range produces 8 rows.
df.queryExecution.toRdd.mapPartitions(_.take(2)).collect()
checkFilterAndRangeMetrics(df, filterNumOutputs = 4, rangeNumOutputs = 8)
// Top-most limit will call `CollectLimitExec.executeCollect`, which will only run the first
// task, so totally the Filter produces 2 rows, and Range produces 4 rows ([0, 1, 2, 3]).
val df2 = df.limit(2)
df2.collect()
checkFilterAndRangeMetrics(df2, filterNumOutputs = 2, rangeNumOutputs = 4)
}
}
test("SPARK-25497: LIMIT within whole stage codegen should not consume all the inputs") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
// A special query that only has one partition, so there is no shuffle and the entire query
// can be whole-stage-codegened.
val df = spark.range(0, 1500, 1, 1).limit(10).groupBy('id).count().limit(1).filter('id >= 0)
df.collect()
val plan = df.queryExecution.executedPlan
val ranges = collectNodeWithinWholeStage[RangeExec](plan)
assert(ranges.length == 1, "The query plan should have one and only one Range")
// The Range should only produce the first batch, i.e. 1000 rows.
assert(ranges.head.metrics("numOutputRows").value == 1000)
val aggs = collectNodeWithinWholeStage[HashAggregateExec](plan)
assert(aggs.length == 2, "The query plan should have two and only two Aggregate")
val partialAgg = aggs.filter(_.aggregateExpressions.head.mode == Partial).head
// The partial aggregate should output 10 rows, because its input is 10 rows.
assert(partialAgg.metrics("numOutputRows").value == 10)
val finalAgg = aggs.filter(_.aggregateExpressions.head.mode == Final).head
// The final aggregate should only produce 1 row, because the upstream limit only needs 1 row.
assert(finalAgg.metrics("numOutputRows").value == 1)
val filters = collectNodeWithinWholeStage[FilterExec](plan)
assert(filters.length == 1, "The query plan should have one and only one Filter")
// The final Filter should produce 1 rows, because the input is just one row.
assert(filters.head.metrics("numOutputRows").value == 1)
}
}
test("SPARK-26327: FileSourceScanExec metrics") {
withTable("testDataForScan") {
spark.range(10).selectExpr("id", "id % 3 as p")
.write.partitionBy("p").saveAsTable("testDataForScan")
// The execution plan only has 1 FileScan node.
val df = spark.sql(
"SELECT * FROM testDataForScan WHERE p = 1")
testSparkPlanMetrics(df, 1, Map(
0L -> (("Scan parquet default.testdataforscan", Map(
"number of output rows" -> 3L,
"number of files read" -> 2L))))
)
}
}
test("InMemoryTableScan shows the table name on UI if possible") {
// Show table name on UI
withView("inMemoryTable", "```a``b```") {
sql("CREATE TEMPORARY VIEW inMemoryTable AS SELECT 1 AS c1")
sql("CACHE TABLE inMemoryTable")
testSparkPlanMetrics(spark.table("inMemoryTable"), 1,
Map(1L -> (("Scan In-memory table `inMemoryTable`", Map.empty)))
)
sql("CREATE TEMPORARY VIEW ```a``b``` AS SELECT 2 AS c1")
sql("CACHE TABLE ```a``b```")
testSparkPlanMetrics(spark.table("```a``b```"), 1,
Map(1L -> (("Scan In-memory table ```a``b```", Map.empty)))
)
}
// Show InMemoryTableScan on UI
testSparkPlanMetrics(spark.range(1).cache().select("id"), 1,
Map(1L -> (("InMemoryTableScan", Map.empty)))
)
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/metric/SQLMetricsSuite.scala | Scala | apache-2.0 | 25,037 |
package org.apache.spark.hbase.helpers
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.hbase.Serde
/**
* Created by mharis on 07/08/15.
*/
trait SerdeBool extends Serde[Boolean] {
final override def toBytes = (value: Boolean) => Bytes.toBytes(value)
final override def fromBytes = (bytes: Array[Byte], o: Int, l: Int) => Bytes.toBoolean(bytes)
}
| michal-harish/spark-on-hbase | src/main/scala/org/apache/spark/hbase/helpers/SerdeBool.scala | Scala | apache-2.0 | 373 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.data.TimestampData
import org.apache.flink.table.functions.TableFunction
import org.apache.flink.table.planner.plan.stream.sql.RelTimeIndicatorConverterTest.TableFunc
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.flink.table.types.logical.BigIntType
import org.junit.Test
import java.sql.Timestamp
/**
* Tests for [[org.apache.flink.table.planner.calcite.RelTimeIndicatorConverter]].
*/
class RelTimeIndicatorConverterTest extends TableTestBase {
private val util = streamTestUtil()
util.addDataStream[(Long, Long, Int)](
"MyTable", 'rowtime.rowtime, 'long, 'int, 'proctime.proctime)
util.addDataStream[(Long, Long, Int)]("MyTable1", 'rowtime.rowtime, 'long, 'int)
util.addDataStream[(Long, Int)]("MyTable2", 'long, 'int, 'proctime.proctime)
@Test
def testSimpleMaterialization(): Unit = {
val sqlQuery =
"""
|SELECT rowtime FROM
| (SELECT FLOOR(rowtime TO DAY) AS rowtime, long FROM MyTable WHERE long > 0) t
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testSelectAll(): Unit = {
util.verifyPlan("SELECT * FROM MyTable")
}
@Test
def testFilteringOnRowtime(): Unit = {
val sqlQuery =
"SELECT rowtime FROM MyTable1 WHERE rowtime > CAST('1990-12-02 12:11:11' AS TIMESTAMP(3))"
util.verifyPlan(sqlQuery)
}
@Test
def testGroupingOnRowtime(): Unit = {
util.verifyPlan("SELECT COUNT(long) FROM MyTable GROUP BY rowtime")
}
@Test
def testAggregationOnRowtime(): Unit = {
util.verifyPlan("SELECT MIN(rowtime) FROM MyTable1 GROUP BY long")
}
@Test
def testGroupingOnProctime(): Unit = {
util.verifyPlan("SELECT COUNT(long) FROM MyTable2 GROUP BY proctime")
}
@Test
def testAggregationOnProctime(): Unit = {
util.verifyPlan("SELECT MIN(proctime) FROM MyTable2 GROUP BY long")
}
@Test
def testTableFunction(): Unit = {
util.addFunction("tableFunc", new TableFunc)
val sqlQuery =
"""
|SELECT rowtime, proctime, s
|FROM MyTable, LATERAL TABLE(tableFunc(rowtime, proctime, '')) AS T(s)
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testUnion(): Unit = {
util.verifyPlan("SELECT rowtime FROM MyTable1 UNION ALL SELECT rowtime FROM MyTable1")
}
@Test
def testWindow(): Unit = {
val sqlQuery =
"""
|SELECT TUMBLE_END(rowtime, INTERVAL '10' SECOND),
| long,
| SUM(`int`)
|FROM MyTable1
| GROUP BY TUMBLE(rowtime, INTERVAL '10' SECOND), long
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWindow2(): Unit = {
val sqlQuery =
"""
|SELECT TUMBLE_END(rowtime, INTERVAL '0.1' SECOND) AS `rowtime`,
| `long`,
| SUM(`int`)
|FROM MyTable1
| GROUP BY `long`, TUMBLE(rowtime, INTERVAL '0.1' SECOND)
|
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testMultiWindow(): Unit = {
val sqlQuery =
"""
|SELECT TUMBLE_END(newrowtime, INTERVAL '30' SECOND), long, sum(`int`) FROM (
| SELECT
| TUMBLE_ROWTIME(rowtime, INTERVAL '10' SECOND) AS newrowtime,
| long,
| sum(`int`) as `int`
| FROM MyTable1
| GROUP BY TUMBLE(rowtime, INTERVAL '10' SECOND), long
|) t GROUP BY TUMBLE(newrowtime, INTERVAL '30' SECOND), long
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWindowWithAggregationOnRowtime(): Unit = {
val sqlQuery =
"""
|SELECT MIN(rowtime), long FROM MyTable1
|GROUP BY long, TUMBLE(rowtime, INTERVAL '0.1' SECOND)
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWindowWithAggregationOnRowtimeWithHaving(): Unit = {
val result =
"""
|SELECT MIN(rowtime), long FROM MyTable1
|GROUP BY long, TUMBLE(rowtime, INTERVAL '1' SECOND)
|HAVING QUARTER(TUMBLE_END(rowtime, INTERVAL '1' SECOND)) = 1
""".stripMargin
util.verifyPlan(result)
}
@Test
def testKeepProcessTimeAttrAfterSubGraphOptimize(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val sql =
"""
|SELECT
| long,
| SUM(`int`)
|FROM MyTable2
| GROUP BY TUMBLE(proctime, INTERVAL '10' SECOND), long
""".stripMargin
val table = util.tableEnv.sqlQuery(sql)
val appendSink1 = util.createAppendTableSink(
Array("long", "sum"),
Array(new BigIntType(), new BigIntType()))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink1", appendSink1)
stmtSet.addInsert("appendSink1", table)
val appendSink2 = util.createAppendTableSink(
Array("long", "sum"),
Array(new BigIntType(), new BigIntType()))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink2", appendSink2)
stmtSet.addInsert("appendSink2", table)
util.verifyPlan(stmtSet)
}
// TODO add temporal table join case
}
object RelTimeIndicatorConverterTest {
class TableFunc extends TableFunction[String] {
val t = new Timestamp(0L)
def eval(time1: TimestampData, time2: Timestamp, string: String): Unit = {
collect(time1.toString + time2.after(t) + string)
}
}
}
| greghogan/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/RelTimeIndicatorConverterTest.scala | Scala | apache-2.0 | 6,389 |
package org.scaladebugger.tool.commands
import java.io.File
import org.scaladebugger.api.profiles.traits.info.ThreadGroupInfo
import org.scaladebugger.api.utils.JDITools
import org.scaladebugger.test.helpers.ParallelMockFunSpec
import org.scalatest.concurrent.Eventually
import test.{ToolConstants, ToolFixtures, ToolTestUtilities}
class ThreadGroupCommandIntegrationSpec extends ParallelMockFunSpec
with ToolFixtures
with ToolTestUtilities
with Eventually
{
implicit override val patienceConfig = PatienceConfig(
timeout = scaled(ToolConstants.EventuallyTimeout),
interval = scaled(ToolConstants.EventuallyInterval)
)
describe("ThreadGroupCommand") {
it("should clear the active thread group if no name provided") {
val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
val testFile = JDITools.scalaClassStringToFileString(testClass)
val testFileName = new File(testFile).getName
// Create a breakpoint before connecting to the JVM
val q = "\\""
val virtualTerminal = newVirtualTerminal()
virtualTerminal.newInputLine(s"bp $q$testFile$q 10")
withToolRunningUsingTerminal(
className = testClass,
virtualTerminal = virtualTerminal
) { (vt, sm, start) =>
logTimeTaken({
// Assert that we hit the breakpoint
eventually {
validateNextLine(vt, s"Breakpoint hit at $testFileName:10\\n")
}
// Set a fake active thread group
sm.updateActiveThreadGroup(mock[ThreadGroupInfo])
// Clear our thread group
vt.newInputLine(s"threadgroup")
// Verify that the active thread group is empty
eventually {
sm.state.activeThreadGroup should be (None)
}
})
}
}
it("should set the active thread group by name if provided") {
val testClass = "org.scaladebugger.test.breakpoints.DelayedInit"
val testFile = JDITools.scalaClassStringToFileString(testClass)
val testFileName = new File(testFile).getName
val threadGroupName = "main"
// Create a breakpoint before connecting to the JVM
val q = "\\""
val virtualTerminal = newVirtualTerminal()
virtualTerminal.newInputLine(s"bp $q$testFile$q 10")
withToolRunningUsingTerminal(
className = testClass,
virtualTerminal = virtualTerminal
) { (vt, sm, start) =>
logTimeTaken({
// Assert that we hit the breakpoint
eventually {
validateNextLine(vt, s"Breakpoint hit at $testFileName:10\\n")
}
// Set our active thread group
vt.newInputLine(s"threadgroup $q$threadGroupName$q")
// Verify that the active thread group is set
eventually {
sm.state.activeThreadGroup.get.name should be (threadGroupName)
}
})
}
}
}
}
| ensime/scala-debugger | scala-debugger-tool/src/it/scala/org/scaladebugger/tool/commands/ThreadGroupCommandIntegrationSpec.scala | Scala | apache-2.0 | 2,906 |
package edu.gemini.pit
import java.util.logging.{Logger, Level}
package object catalog {
val Log = Logger.getLogger(this.getClass.getPackage.getName)
type Callback = Result => Unit
implicit class StringPimp(val s:String) extends AnyVal {
def toDoubleOption = try {
Some(s.toDouble)
} catch {
case _ : NumberFormatException => None
}
}
import scala.language.implicitConversions
implicit def pimpSideEffect[A](f: A => Unit) = new SafeSideEffect[A] {
def safe = (a: A) =>
try {
f(a)
} catch {
case t: Throwable => Log.log(Level.WARNING, "Trouble invoking callback function.", t)
}
}
}
package catalog {
trait SafeSideEffect[A] {
def safe: A => Unit
}
} | arturog8m/ocs | bundle/edu.gemini.pit/src/main/scala/edu/gemini/pit/catalog/package.scala | Scala | bsd-3-clause | 745 |
package binconcifartests
import chisel3._
import chisel3.iotesters.{PeekPokeTester, Driver, ChiselFlatSpec}
import scala.util.Random
import binconcifar.PoolLayer
import scala.collection.mutable.ArrayBuffer
class PoolComputeTests( c : PoolLayer ) extends PeekPokeTester( c ) {
val myRand = new Random
val cycs = c.latency*5
def getRndFP() : BigInt = {
val x = 2 * myRand.nextDouble() - 1
BigInt( math.round( x * ( 1 << 4 ) ).toInt )
}
val img = List.fill( cycs ) {
List.fill( c.noIn ) {
List.fill( c.kernShape._1 ) {
List.fill( c.kernShape._2 ){
List.fill( c.kernShape._3 ) { getRndFP() }
}
}
}
}
val poolRes = img.map( poolTasks => {
poolTasks.map( poolTask => {
( 0 until c.kernShape._3 ).map( idx => {
poolTask.reduce( _ ++ _ ).map( _(idx) ).max
})
}).reduce( _ ++ _ )
})
val chosenOutput = ArrayBuffer[List[BigInt]]()
val vldCheck = ArrayBuffer[Boolean]()
var inputPtr = 0
var outputPtr = 0
for ( cyc <- 0 until cycs ) {
val vld = myRand.nextInt(4) != 0
vldCheck += vld
poke( c.io.dataIn.valid, vld )
val imgData = img(inputPtr).reduce( _ ++ _ ).reduce( _ ++ _ ).reduce( _ ++ _ )
for ( dataIn1 <- c.io.dataIn.bits.zip( imgData ) )
poke( dataIn1._1, dataIn1._2 )
if ( vld )
chosenOutput += poolRes(inputPtr).toList
inputPtr += 1
step( 1 )
if ( cyc >= c.latency - 1 ) {
expect( c.io.dataOut.valid, vldCheck( cyc - c.latency + 1 ) )
if ( vldCheck( cyc - c.latency + 1 ) ) {
for ( i <- 0 until chosenOutput( outputPtr ).size )
expect( c.io.dataOut.bits(i), chosenOutput( outputPtr )(i) )
outputPtr += 1
}
}
}
}
class PoolComputeSuite extends ChiselFlatSpec {
behavior of "PoolCompute"
backends foreach {backend =>
it should s"correctly compute the max pool $backend" in {
for ( grpSize <- List( 1, 3, 8 ) ) {
for ( tPut <- 1 until 6 ) {
val kernShape = ( 2, 2, grpSize )
Driver(() => {
new PoolLayer( SInt( 16.W ), tPut, kernShape )
}, backend, true )( c => new PoolComputeTests( c ) ) should be (true)
}
}
}
}
}
| da-steve101/binary_connect_cifar | src/test/scala/PoolComputeSuite.scala | Scala | gpl-3.0 | 2,210 |
package com.twitter.finagle.ssl
import java.lang.reflect.Method
import java.util.logging.Logger
import org.jboss.netty.channel.{
ChannelHandlerContext, ChannelStateEvent, SimpleChannelUpstreamHandler
}
class SslShutdownHandler(o: Object) extends SimpleChannelUpstreamHandler {
private[this] val log = Logger.getLogger(getClass().getName())
private[this] val shutdownMethod: Option[Method] =
try {
Some(o.getClass().getMethod("shutdown"))
} catch {
case _: NoSuchMethodException => None
}
private[this] def shutdownAfterChannelClosure() {
shutdownMethod foreach { method: Method =>
method.invoke(o)
}
}
override def channelClosed(ctx: ChannelHandlerContext, e: ChannelStateEvent) {
shutdownAfterChannelClosure()
super.channelClosed(ctx, e)
}
}
| enachb/finagle_2.9_durgh | finagle-core/src/main/scala/com/twitter/finagle/ssl/SslShutdownHandler.scala | Scala | apache-2.0 | 809 |
// Copyright (C) 2011-2012 the original author or authors.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.scalastyle.scalariform
import org.scalastyle.CombinedAst
import org.scalastyle.CombinedChecker
import org.scalastyle.LineError
import org.scalastyle.Lines
import org.scalastyle.ScalastyleError
import org.scalastyle.scalariform.VisitorHelper.visit
import scalariform.lexer.Tokens.CLASS
import scalariform.lexer.{TokenType, HiddenTokens, Token}
import scalariform.parser.AccessModifier
import scalariform.parser.FullDefOrDcl
import scalariform.parser.FunDefOrDcl
import scalariform.parser.ParamClauses
import scalariform.parser.PatDefOrDcl
import scalariform.parser.TmplDef
import scalariform.parser.Type
import scalariform.parser.TypeDefOrDcl
import scalariform.parser.TypeParamClause
/**
* Checks that the ScalaDoc exists for all accessible members:
* - classes, traits, case classes and objects
* - methods
* - vals, vars and types
*
* The ScalaDoc's structure must satisfy the parameter of the constructor in case of
* case classes and classes, or the parameter of the methods. The ScalaDoc must include
* the type parameters. Finally, the ScalaDoc must include return description for non-Unit
* returning methods.
*/
class ScalaDocChecker extends CombinedChecker {
protected val errorKey: String = "scaladoc"
val DefaultIgnoreRegex = "^$"
val skipPrivate = true
val skipQualifiedPrivate = false
val skipProtected = false
val skipQualifiedProtected = false
override def verify(ast: CombinedAst): List[ScalastyleError] = {
val tokens = ast.compilationUnit.tokens
val ignoreRegex = getString("ignoreRegex", DefaultIgnoreRegex)
def trimToTokenOfType(list: List[Token], tokenType: TokenType): List[Token] = {
if (list.isEmpty) {
list
} else {
list.head match {
case Token(`tokenType`, _, _, _) => list
case _ => trimToTokenOfType(list.tail, tokenType)
}
}
}
val ts = trimToTokenOfType(tokens, CLASS)
val ignore = !ts.isEmpty && ts(1).text.matches(ignoreRegex)
ignore match {
case true => Nil
case false => localVisit(skip = false, HiddenTokens(Nil), ast.lines)(ast.compilationUnit.immediateChildren(0))
}
}
import ScalaDocChecker._ // scalastyle:ignore underscore.import import.grouping
/*
* Finds the ScalaDoc hidden in the ``token``, falling back on ``fallback`` if ``token``
* contains no ScalaDoc.
*
* This is useful when including access levels, annotations and such like,
* which are not reported as part of the following token. So,
*
* ```
* /**
* * Contains magic
* */
* @magic protected val foo = 5
* ```
* is interpreted as
*
* ``FullDefOrDcl`` -> ``PatDefOrDcl``, with the ScalaDoc attached to the ``FulDefOrDcl``, which
* finds its way to us here in ``fallback``.
*/
private def findScalaDoc(token: Token, fallback: HiddenTokens): Option[ScalaDoc] = {
def toScalaDoc(ht: HiddenTokens): Option[ScalaDoc] = ht.rawTokens.find(_.isScalaDocComment).map(ScalaDoc.apply)
toScalaDoc(token.associatedWhitespaceAndComments).orElse(toScalaDoc(fallback))
}
// parse the parameters and report errors for the parameters (constructor or method)
private def paramErrors(line: Int, paramClausesOpt: Option[ParamClauses])(scalaDoc: ScalaDoc): List[ScalastyleError] = {
def params(xs: List[Token]): List[String] = xs match {
// @annotation a: B; @annotation(...) a: B
case Token(_, "@", _, _)::Token(_, annotation, _, _)::
Token(_, paramName, _, _)::Token(_, ":", _, _)::Token(_, _, _, _)::t => paramName :: params(t)
// a: B
case Token(_, paramName, _, _)::Token(_, ":", _, _)::Token(_, _, _, _)::t => paramName :: params(t)
// any other token
case _::t => params(t)
case Nil => Nil
}
val paramNames = paramClausesOpt.map(pc => params(pc.tokens)).getOrElse(Nil)
val missingScalaDocParams = paramNames.filterNot(name => scalaDoc.params.exists(_.name == name))
val extraScalaDocParams = scalaDoc.params.filterNot(param => paramNames.contains(param.name))
val validScalaDocParams = scalaDoc.params.filter(param => paramNames.contains(param.name))
missingScalaDocParams.map(missing => LineError(line, List(missingParam(missing)))) ++
extraScalaDocParams.map(extra => LineError(line, List(extraParam(extra.name)))) ++
validScalaDocParams.filter(_.text.isEmpty).map(empty => LineError(line, List(emptyParam(empty.name))))
// if (!scalaDoc.params.forall(p => paramNames.exists(name => p.name == name && !p.text.isEmpty))) List(LineError(line, List(MalformedParams)))
// else Nil
}
// parse the type parameters and report errors for the parameters (constructor or method)
private def tparamErrors(line: Int, tparamClausesOpt: Option[TypeParamClause])(scalaDoc: ScalaDoc): List[ScalastyleError] = {
def tparams(xs: List[Token]): List[String] = xs match {
// [@foo A, @bar(b) B]
case Token(_, "@", _, _)::Token(_, annotation, _, _)::
Token(tokenType, paramName, _, _)::t if tokenType.name == "VARID" => paramName :: tparams(t)
// [A, B]
case Token(tokenType, paramName, _, _)::t if tokenType.name == "VARID" => paramName :: tparams(t)
// any other token
case _::t => tparams(t)
case Nil => Nil
}
val tparamNames = tparamClausesOpt.map(tc => tparams(tc.tokens)).getOrElse(Nil)
if (tparamNames.size != scalaDoc.typeParams.size) {
// bad param sizes
List(LineError(line, List(MalformedTypeParams)))
} else {
if (!scalaDoc.typeParams.forall(tp => tparamNames.contains(tp.name))) List(LineError(line, List(MalformedTypeParams))) else Nil
}
}
// parse the parameters and report errors for the return types
private def returnErrors(line: Int, returnTypeOpt: Option[(Token, Type)])(scalaDoc: ScalaDoc): List[ScalastyleError] = {
val needsReturn = returnTypeOpt.exists { case (_, tpe) => tpe.firstToken.text != "Unit" }
if (needsReturn && !scalaDoc.returns.isDefined) {
List(LineError(line, List(MalformedReturn)))
} else {
Nil
}
}
/*
* process the AST, picking up only the parts that are interesting to us, that is
* - access modifiers
* - classes, traits, case classes and objects
* - methods
* - vals, vars and types
*
* we do not bother descending down any further
*/
private def localVisit(skip: Boolean, fallback: HiddenTokens, lines: Lines)(ast: Any): List[ScalastyleError] = ast match {
case t: FullDefOrDcl =>
// private, private[xxx];
// protected, protected[xxx];
// check if we are going to include or skip depending on access modifier
val accessModifier = t.modifiers.find {
case AccessModifier(_, _) => true
case _ => false
}
val skip = accessModifier.exists {
case AccessModifier(pop, Some(_)) =>
if (pop.text == "private") skipQualifiedPrivate else skipQualifiedProtected
case AccessModifier(pop, None) =>
if (pop.text == "private") skipPrivate else skipProtected
case _ =>
false
}
// pick the ScalaDoc "attached" to the modifier, which actually means
// ScalaDoc of the following member
val scalaDocs = for {
token <- t.tokens
comment <- token.associatedWhitespaceAndComments
if comment.token.isScalaDocComment
} yield comment
// descend
visit(t, localVisit(skip, HiddenTokens(scalaDocs), lines))
case t: TmplDef =>
// trait Foo, trait Foo[A];
// class Foo, class Foo[A](a: A);
// case class Foo(), case class Foo[A](a: A);
// object Foo;
val (_, line) = lines.findLineAndIndex(t.firstToken.offset).get
// we are checking parameters and type parameters
val errors = if (skip) Nil else findScalaDoc(t.firstToken, fallback).
map { scalaDoc =>
paramErrors(line, t.paramClausesOpt)(scalaDoc) ++
tparamErrors(line, t.typeParamClauseOpt)(scalaDoc)
}.getOrElse(List(LineError(line, List(Missing))))
// and we descend, because we're interested in seeing members of the types
errors ++ visit(t, localVisit(skip, fallback, lines))
case t: FunDefOrDcl =>
// def foo[A, B](a: Int): B = ...
val (_, line) = lines.findLineAndIndex(t.firstToken.offset).get
// we are checking parameters, type parameters and returns
val errors = if (skip) Nil else findScalaDoc(t.firstToken, fallback).
map { scalaDoc =>
paramErrors(line, Some(t.paramClauses))(scalaDoc) ++
tparamErrors(line, t.typeParamClauseOpt)(scalaDoc) ++
returnErrors(line, t.returnTypeOpt)(scalaDoc)
}.
getOrElse(List(LineError(line, List(Missing))))
// we don't descend any further
errors
case t: TypeDefOrDcl =>
// type Foo = ...
val (_, line) = lines.findLineAndIndex(t.firstToken.offset).get
// error is non-existence
val errors = if (skip) Nil else findScalaDoc(t.firstToken, fallback).
map(_ => Nil).
getOrElse(List(LineError(line, List(Missing))))
// we don't descend any further
errors
case t: PatDefOrDcl =>
// val a = ...
// var a = ...
val (_, line) = lines.findLineAndIndex(t.valOrVarToken.offset).get
val errors = if (skip) Nil else findScalaDoc(t.firstToken, fallback).
map(_ => Nil).
getOrElse(List(LineError(line, List(Missing))))
// we don't descend any further
errors
case t: Any =>
// anything else, we descend (unless we stopped above)
visit(t, localVisit(skip, fallback, lines))
}
}
/**
* Contains the ScalaDoc model with trivial parsers
*/
object ScalaDocChecker {
val Missing = "Missing"
def missingParam(name: String): String = "Missing @param " + name
def extraParam(name: String): String = "Extra @param " + name
def emptyParam(name: String): String = "Missing text for @param " + name
val MalformedTypeParams = "Malformed @tparams"
val MalformedReturn = "Malformed @return"
/**
* Companion for the ScalaDoc object that parses its text to pick up its elements
*/
private object ScalaDoc {
private val ParamRegex = "@param\\\\W+(\\\\w+)\\\\W+(.*)".r
private val TypeParamRegex = "@tparam\\\\W+(\\\\w+)\\\\W+(.*)".r
private val ReturnRegex = "@return\\\\W+(.*)".r
private val TagRegex = """\\W*[*]\\W+\\@(\\w+)\\W+(\\w+)(.*)""".r
sealed trait ScalaDocLine {
def isTag: Boolean
}
case class TagSclaDocLine(tag: String, ref: String, rest: String) extends ScalaDocLine {
def isTag: Boolean = true
}
case class RawScalaDocLine(text: String) extends ScalaDocLine {
def isTag: Boolean = false
override val toString = text.replaceFirst("\\\\*\\\\W+", "")
}
/**
* Take the ``raw`` and parse an instance of ``ScalaDoc``
* @param raw the token containing the ScalaDoc
* @return the parsed instance
*/
def apply(raw: Token): ScalaDoc = {
val lines = raw.rawText.split("\\\\n").toList.flatMap(x => x.trim match {
case TagRegex(tag, ref, rest) => Some(TagSclaDocLine(tag, ref, rest))
case "/**" => None
case "*/" => None
case text: Any => Some(RawScalaDocLine(text))
})
def combineScalaDocFor[A](lines: List[ScalaDocLine], tag: String, f: (String, String) => A): List[A] = lines match {
case TagSclaDocLine(`tag`, ref, text)::ls =>
val rawLines = ls.takeWhile(!_.isTag)
f(ref, text + rawLines.mkString(" ")) :: combineScalaDocFor(ls.drop(rawLines.length), tag, f)
case _::ls => combineScalaDocFor(ls, tag, f)
case Nil => Nil
}
val params = combineScalaDocFor(lines, "param", ScalaDocParameter)
val typeParams = combineScalaDocFor(lines, "tparam", ScalaDocParameter)
val returns = combineScalaDocFor(lines, "return", _ + _).headOption
ScalaDoc(raw.rawText, params, typeParams, returns, None)
}
}
/**
* Models a parameter: either plain or type
* @param name the parameter name
* @param text the parameter text
*/
private case class ScalaDocParameter(name: String, text: String)
/**
* Models the parsed ScalaDoc
* @param text arbitrary text
* @param params the parameters
* @param typeParams the type parameters
* @param returns the returns clause, if present
* @param throws the throws clause, if present
*/
private case class ScalaDoc(text: String, params: List[ScalaDocParameter], typeParams: List[ScalaDocParameter],
returns: Option[String], throws: Option[String])
}
| jkerfs/scalastyle | src/main/scala/org/scalastyle/scalariform/ScalaDocChecker.scala | Scala | apache-2.0 | 13,454 |
/*
* Copyright (C) 2013 Alcatel-Lucent.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
* Licensed to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package molecule.examples.io.choice
import molecule._
import molecule.io._
/**
* Alternate reads on two streams of integers.
*
* This process prints only even numbers of one stream and odd numbers of
* the other stream. Also, the stream with the highest throughput (larger batches)
* is consumed faster than the stream with the lowest throughput.
*
* The example terminates when both streams have entirely been
* consumed.
*
* For a less theoretical example of choice, see the stopwatch
* controller.
*
*/
object SimpleChoice extends ProcessType2x1[Int, Int, Int, Unit] { outer =>
def main(i1: Input[Int], i2: Input[Int], o1: Output[Int]) = for {
_ <- (i1 <+> i2) foreach {
case Left(a) =>
if (a % 2 == 0)
o1.write(a)
else
IO()
case Right(b) =>
if (b % 2 != 0)
o1.write(b)
else
IO()
}
_ <- o1.write(-1)
} yield ()
import molecule.platform.Platform
import molecule.channel.Console
import molecule.channel.IChan
def main(args: Array[String]): Unit = {
val src1 = IChan.source((1 to 100).toList, 5)
val src2 = IChan.source((1 to 100).toList, 10)
/**
* Logger channel prints the output on stdout.
*/
val log = Console.logOut[Int]("log") // .smap[Int, Int](1)((i, n) => (i+1, i+":"+n))
val platform = Platform("simple-choice")
platform.launch(SimpleChoice(src1, src2, log)).get_!()
}
}
| molecule-labs/molecule | molecule-io-examples/src/main/scala/molecule/examples/io/choice/SimpleChoice.scala | Scala | apache-2.0 | 2,170 |
package org.jetbrains.plugins.scala.lang.formatting.settings.inference
import com.intellij.psi.codeStyle.CodeStyleScheme
import com.intellij.psi.impl.source.codeStyle.CodeStyleSchemesImpl
import org.jetbrains.plugins.scala.ScalaLanguage
import org.jetbrains.plugins.scala.components.RunOnceStartupActivity
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import scala.jdk.CollectionConverters._
/**
* @author Roman.Shein
* Date: 24.01.2017
*/
//todo: Revalidate, add tests and remove
final class LegacyCodeStyleSettingsComponent extends RunOnceStartupActivity {
override def doRunActivity(): Unit = {
val codeStyleSchemes = CodeStyleSchemesImpl.getSchemeManager.getAllSchemes.asScala
codeStyleSchemes.foreach { scheme =>
val scalaSettings = scheme.getCodeStyleSettings.getCustomSettings(classOf[ScalaCodeStyleSettings])
val commonSettings = scheme.getCodeStyleSettings.getCommonSettings(ScalaLanguage.INSTANCE)
if (commonSettings.CALL_PARAMETERS_LPAREN_ON_NEXT_LINE &&
scalaSettings.CALL_PARAMETERS_NEW_LINE_AFTER_LPAREN == ScalaCodeStyleSettings.NO_NEW_LINE
) {
commonSettings.CALL_PARAMETERS_LPAREN_ON_NEXT_LINE = false
scalaSettings.CALL_PARAMETERS_NEW_LINE_AFTER_LPAREN = ScalaCodeStyleSettings.NEW_LINE_ALWAYS
}
}
}
override protected def doCleanup(): Unit = {}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/formatting/settings/inference/LegacyCodeStyleSettingsComponent.scala | Scala | apache-2.0 | 1,394 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.inject
package guice
import com.google.inject.util.{ Modules => GuiceModules }
import com.google.inject.util.{ Providers => GuiceProviders }
import com.google.inject.Binder
import com.google.inject.CreationException
import com.google.inject.Guice
import com.google.inject.Stage
import com.google.inject.{ Module => GuiceModule }
import java.io.File
import javax.inject.Inject
import javax.inject.Provider
import play.api.inject.{ Binding => PlayBinding }
import play.api.inject.{ Injector => PlayInjector }
import play.api.inject.{ Module => PlayModule }
import play.api.Configuration
import play.api.Environment
import play.api.Mode
import play.api.PlayException
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
class GuiceLoadException(message: String) extends RuntimeException(message)
/**
* A builder for creating Guice-backed Play Injectors.
*/
abstract class GuiceBuilder[Self] protected (
environment: Environment,
configuration: Configuration,
modules: Seq[GuiceableModule],
overrides: Seq[GuiceableModule],
disabled: Seq[Class[_]],
binderOptions: Set[BinderOption],
eagerly: Boolean
) {
import BinderOption._
/**
* Set the environment.
*/
final def in(env: Environment): Self =
copyBuilder(environment = env)
/**
* Set the environment path.
*/
final def in(path: File): Self =
copyBuilder(environment = environment.copy(rootPath = path))
/**
* Set the environment mode.
*/
final def in(mode: Mode): Self =
copyBuilder(environment = environment.copy(mode = mode))
/**
* Set the environment class loader.
*/
final def in(classLoader: ClassLoader): Self =
copyBuilder(environment = environment.copy(classLoader = classLoader))
/**
* Set the dependency initialization to eager.
*/
final def eagerlyLoaded(): Self =
copyBuilder(eagerly = true)
/**
* Add additional configuration.
*/
final def configure(conf: Configuration): Self =
copyBuilder(configuration = conf.withFallback(configuration))
/**
* Add additional configuration.
*/
final def configure(conf: Map[String, Any]): Self =
configure(Configuration.from(conf))
/**
* Add additional configuration.
*/
final def configure(conf: (String, Any)*): Self =
configure(conf.toMap)
private def withBinderOption(opt: BinderOption, enabled: Boolean = false): Self = {
copyBuilder(binderOptions = if (enabled) binderOptions + opt else binderOptions - opt)
}
/**
* Disable circular proxies on the Guice Binder. Without this option, Guice will try to proxy interfaces/traits to
* break a circular dependency.
*
* Circular proxies are disabled by default. Use disableCircularProxies(false) to allow circular proxies.
*/
final def disableCircularProxies(disable: Boolean = true): Self =
withBinderOption(DisableCircularProxies, disable)
/**
* Requires that Guice finds an exactly matching binding annotation.
*
* Disables the error-prone feature in Guice where it can substitute a binding for @Named Foo when injecting @Named("foo") Foo.
*
* This option is disabled by default.``
*/
final def requireExactBindingAnnotations(require: Boolean = true): Self =
withBinderOption(RequireExactBindingAnnotations, require)
/**
* Require @Inject on constructors (even default constructors).
*
* This option is disabled by default.
*/
final def requireAtInjectOnConstructors(require: Boolean = true): Self =
withBinderOption(RequireAtInjectOnConstructors, require)
/**
* Instructs the injector to only inject classes that are explicitly bound in a module.
*
* This option is disabled by default.
*/
final def requireExplicitBindings(require: Boolean = true): Self =
withBinderOption(RequireExplicitBindings, require)
/**
* Add Guice modules, Play modules, or Play bindings.
*
* @see [[GuiceableModuleConversions]] for the automatically available implicit
* conversions to [[GuiceableModule]] from modules and bindings.
*/
final def bindings(bindModules: GuiceableModule*): Self =
copyBuilder(modules = modules ++ bindModules)
/**
* Override bindings using Guice modules, Play modules, or Play bindings.
*
* @see [[GuiceableModuleConversions]] for the automatically available implicit
* conversions to [[GuiceableModule]] from modules and bindings.
*/
final def overrides(overrideModules: GuiceableModule*): Self =
copyBuilder(overrides = overrides ++ overrideModules)
/**
* Disable modules by class.
*/
final def disable(moduleClasses: Class[_]*): Self =
copyBuilder(disabled = disabled ++ moduleClasses)
/**
* Disable module by class.
*/
final def disable[T](implicit tag: ClassTag[T]): Self = disable(tag.runtimeClass)
/**
* Create a Play Injector backed by Guice using this configured builder.
*/
def applicationModule(): GuiceModule = createModule()
/**
* Creation of the Guice Module used by the injector.
* Libraries like Guiceberry and Jukito that want to handle injector creation may find this helpful.
*/
def createModule(): GuiceModule = {
import scala.collection.JavaConverters._
val injectorModule = GuiceableModule.guice(
Seq(
bind[GuiceInjector].toSelf,
bind[GuiceClassLoader].to(new GuiceClassLoader(environment.classLoader)),
bind[PlayInjector].toProvider[GuiceInjectorWithClassLoaderProvider],
// Java API injector is bound here so that it's available in both
// the default application loader and the Java Guice builders
bind[play.inject.Injector].to[play.inject.DelegateInjector]
),
binderOptions
)
val enabledModules = modules.map(_.disable(disabled))
val bindingModules = GuiceableModule.guiced(environment, configuration, binderOptions)(enabledModules) :+ injectorModule
val overrideModules = GuiceableModule.guiced(environment, configuration, binderOptions)(overrides)
GuiceModules.`override`(bindingModules.asJava).`with`(overrideModules.asJava)
}
/**
* Create a Play Injector backed by Guice using this configured builder.
*/
def injector(): PlayInjector = {
try {
val stage = environment.mode match {
case Mode.Prod => Stage.PRODUCTION
case _ if eagerly => Stage.PRODUCTION
case _ => Stage.DEVELOPMENT
}
val guiceInjector = Guice.createInjector(stage, applicationModule())
guiceInjector.getInstance(classOf[PlayInjector])
} catch {
case e: CreationException =>
e.getCause match {
case p: PlayException => throw p
case _ => {
e.getErrorMessages.asScala.foreach(_.getCause match {
case p: PlayException => throw p
case _ => // do nothing
})
throw e
}
}
}
}
/**
* Internal copy method with defaults.
*/
private def copyBuilder(
environment: Environment = environment,
configuration: Configuration = configuration,
modules: Seq[GuiceableModule] = modules,
overrides: Seq[GuiceableModule] = overrides,
disabled: Seq[Class[_]] = disabled,
binderOptions: Set[BinderOption] = binderOptions,
eagerly: Boolean = eagerly
): Self =
newBuilder(environment, configuration, modules, overrides, disabled, binderOptions, eagerly)
/**
* Create a new Self for this immutable builder.
* Provided by builder implementations.
*/
protected def newBuilder(
environment: Environment,
configuration: Configuration,
modules: Seq[GuiceableModule],
overrides: Seq[GuiceableModule],
disabled: Seq[Class[_]],
binderOptions: Set[BinderOption],
eagerly: Boolean
): Self
}
/**
* Default empty builder for creating Guice-backed Injectors.
*/
final class GuiceInjectorBuilder(
environment: Environment = Environment.simple(),
configuration: Configuration = Configuration.empty,
modules: Seq[GuiceableModule] = Seq.empty,
overrides: Seq[GuiceableModule] = Seq.empty,
disabled: Seq[Class[_]] = Seq.empty,
binderOptions: Set[BinderOption] = BinderOption.defaults,
eagerly: Boolean = false
) extends GuiceBuilder[GuiceInjectorBuilder](
environment,
configuration,
modules,
overrides,
disabled,
binderOptions,
eagerly
) {
// extra constructor for creating from Java
def this() = this(environment = Environment.simple())
/**
* Create a Play Injector backed by Guice using this configured builder.
*/
def build(): PlayInjector = injector()
protected def newBuilder(
environment: Environment,
configuration: Configuration,
modules: Seq[GuiceableModule],
overrides: Seq[GuiceableModule],
disabled: Seq[Class[_]],
binderOptions: Set[BinderOption],
eagerly: Boolean
): GuiceInjectorBuilder =
new GuiceInjectorBuilder(environment, configuration, modules, overrides, disabled, binderOptions, eagerly)
}
/**
* Magnet pattern for creating Guice modules from Play modules or bindings.
*/
trait GuiceableModule {
def guiced(env: Environment, conf: Configuration, binderOptions: Set[BinderOption]): Seq[GuiceModule]
def disable(classes: Seq[Class[_]]): GuiceableModule
}
/**
* Loading and converting Guice modules.
*/
object GuiceableModule extends GuiceableModuleConversions {
def loadModules(environment: Environment, configuration: Configuration): Seq[GuiceableModule] = {
Modules.locate(environment, configuration).map(guiceable)
}
/**
* Attempt to convert a module of unknown type to a GuiceableModule.
*/
def guiceable(module: Any): GuiceableModule = module match {
case playModule: PlayModule => fromPlayModule(playModule)
case guiceModule: GuiceModule => fromGuiceModule(guiceModule)
case unknown =>
throw new PlayException(
"Unknown module type",
s"Module [$unknown] is not a Play module or a Guice module"
)
}
/**
* Apply GuiceableModules to create Guice modules.
*/
def guiced(env: Environment, conf: Configuration, binderOptions: Set[BinderOption])(
builders: Seq[GuiceableModule]
): Seq[GuiceModule] =
builders.flatMap { module =>
module.guiced(env, conf, binderOptions)
}
}
/**
* Implicit conversions to GuiceableModules.
*/
trait GuiceableModuleConversions {
import scala.language.implicitConversions
implicit def fromGuiceModule(guiceModule: GuiceModule): GuiceableModule = fromGuiceModules(Seq(guiceModule))
implicit def fromGuiceModules(guiceModules: Seq[GuiceModule]): GuiceableModule = new GuiceableModule {
def guiced(env: Environment, conf: Configuration, binderOptions: Set[BinderOption]): Seq[GuiceModule] = guiceModules
def disable(classes: Seq[Class[_]]): GuiceableModule = fromGuiceModules(filterOut(classes, guiceModules))
override def toString = s"GuiceableModule(${guiceModules.mkString(", ")})"
}
implicit def fromPlayModule(playModule: PlayModule): GuiceableModule = fromPlayModules(Seq(playModule))
implicit def fromPlayModules(playModules: Seq[PlayModule]): GuiceableModule = new GuiceableModule {
def guiced(env: Environment, conf: Configuration, binderOptions: Set[BinderOption]): Seq[GuiceModule] =
playModules.map(guice(env, conf, binderOptions))
def disable(classes: Seq[Class[_]]): GuiceableModule = fromPlayModules(filterOut(classes, playModules))
override def toString = s"GuiceableModule(${playModules.mkString(", ")})"
}
implicit def fromPlayBinding(binding: PlayBinding[_]): GuiceableModule = fromPlayBindings(Seq(binding))
implicit def fromPlayBindings(bindings: Seq[PlayBinding[_]]): GuiceableModule = new GuiceableModule {
def guiced(env: Environment, conf: Configuration, binderOptions: Set[BinderOption]): Seq[GuiceModule] =
Seq(guice(bindings, binderOptions))
def disable(classes: Seq[Class[_]]): GuiceableModule = this // no filtering
override def toString = s"GuiceableModule(${bindings.mkString(", ")})"
}
private def filterOut[A](classes: Seq[Class[_]], instances: Seq[A]): Seq[A] =
instances.filterNot(o => classes.exists(_.isAssignableFrom(o.getClass)))
/**
* Convert the given Play module to a Guice module.
*/
def guice(env: Environment, conf: Configuration, binderOptions: Set[BinderOption])(module: PlayModule): GuiceModule =
guice(module.bindings(env, conf).toSeq, binderOptions)
/**
* Convert the given Play bindings to a Guice module.
*/
def guice(bindings: Seq[PlayBinding[_]], binderOptions: Set[BinderOption]): GuiceModule = {
new com.google.inject.AbstractModule {
override def configure(): Unit = {
binderOptions.foreach(_(binder))
for (b <- bindings) {
val binding = b.asInstanceOf[PlayBinding[Any]]
val builder = binder().withSource(binding).bind(GuiceKey(binding.key))
binding.target.foreach {
case ProviderTarget(provider) => builder.toProvider(GuiceProviders.guicify(provider))
case ProviderConstructionTarget(provider) => builder.toProvider(provider)
case ConstructionTarget(implementation) => builder.to(implementation)
case BindingKeyTarget(key) => builder.to(GuiceKey(key))
}
(binding.scope, binding.eager) match {
case (Some(scope), false) => builder.in(scope)
case (None, true) => builder.asEagerSingleton()
case (Some(scope), true) =>
throw new GuiceLoadException("A binding must either declare a scope or be eager: " + binding)
case _ => // do nothing
}
}
}
}
}
}
sealed abstract class BinderOption(configureBinder: Binder => Unit) extends (Binder => Unit) {
def apply(b: Binder) = configureBinder(b)
}
object BinderOption {
val defaults: Set[BinderOption] = Set(DisableCircularProxies)
case object DisableCircularProxies extends BinderOption(_.disableCircularProxies)
case object RequireAtInjectOnConstructors extends BinderOption(_.requireAtInjectOnConstructors)
case object RequireExactBindingAnnotations extends BinderOption(_.requireExactBindingAnnotations)
case object RequireExplicitBindings extends BinderOption(_.requireExplicitBindings)
}
/**
* Conversion from Play BindingKey to Guice Key.
*/
object GuiceKey {
import com.google.inject.Key
def apply[T](key: BindingKey[T]): Key[T] = {
key.qualifier match {
case Some(QualifierInstance(instance)) => Key.get(key.clazz, instance)
case Some(QualifierClass(clazz)) => Key.get(key.clazz, clazz)
case None => Key.get(key.clazz)
}
}
}
/**
* Play Injector backed by a Guice Injector.
*/
class GuiceInjector @Inject() (injector: com.google.inject.Injector) extends PlayInjector {
/**
* Get an instance of the given class from the injector.
*/
def instanceOf[T](implicit ct: ClassTag[T]) = instanceOf(ct.runtimeClass.asInstanceOf[Class[T]])
/**
* Get an instance of the given class from the injector.
*/
def instanceOf[T](clazz: Class[T]) = injector.getInstance(clazz)
/**
* Get an instance bound to the given binding key.
*/
def instanceOf[T](key: BindingKey[T]) = injector.getInstance(GuiceKey(key))
}
/**
* An object that holds a `ClassLoader` for Guice to use. We use this
* simple value object so it can be looked up by its type when we're
* assembling the Guice injector.
*
* @param classLoader The wrapped `ClassLoader`.
*/
class GuiceClassLoader(val classLoader: ClassLoader)
/**
* A provider for a Guice injector that wraps the injector to ensure
* it uses the correct `ClassLoader`.
*
* @param injector The injector to wrap.
* @param guiceClassLoader The `ClassLoader` the injector should use.
*/
class GuiceInjectorWithClassLoaderProvider @Inject() (injector: GuiceInjector, guiceClassLoader: GuiceClassLoader)
extends Provider[Injector] {
override def get(): PlayInjector = new ContextClassLoaderInjector(injector, guiceClassLoader.classLoader)
}
| benmccann/playframework | core/play-guice/src/main/scala/play/api/inject/guice/GuiceInjectorBuilder.scala | Scala | apache-2.0 | 16,470 |
package io.udash.benchmarks.properties
import io.udash._
import japgolly.scalajs.benchmark._
import japgolly.scalajs.benchmark.gui._
object TransformedSeqPropertyListeners extends BenchmarkUtils {
private val seqSize = 50
private val properties: Seq[(String, () => (SeqProperty[Int], ReadableSeqProperty[Int]))] = Seq(
("direct property", () => {
val p = SeqProperty(Seq.tabulate(seqSize)(identity))
(p, p)
}),
("one-way transformed elements", () => {
val p = SeqProperty(Seq.tabulate(seqSize)(identity))
val t = p.transformElements(_ + 1)
(p, t)
}),
("both-ways transformed elements", () => {
val p = SeqProperty(Seq.tabulate(seqSize)(identity))
val t = p.bitransformElements(_ + 1)(_ - 1)
(p, t)
}),
("one-way transformed elements with slow transformer", () => {
val p = SeqProperty(Seq.tabulate(seqSize)(identity))
val t = p.transformElements(slowInc)
(p, t)
}),
("both-ways transformed elements with slow transformer", () => {
val p = SeqProperty(Seq.tabulate(seqSize)(identity))
val t = p.bitransformElements(slowInc)(slowDec)
(p, t)
})
)
private val benchmarks = generateGetSetListenBenchmarks[SeqProperty[Int], ReadableSeqProperty[Int]](properties)(
Seq(20), Seq(0.1, 1, 10), Seq(0, 1, 10, 100),
Seq(
("whole Seq set", (p, i) => p.set(Seq.tabulate(seqSize)(_ + i)), _.get),
("replace part of Seq", replaceElements, _.get)
),
Seq(("empty listener", _.listen(_ => ())))
)
val suite = GuiSuite(
Suite("SeqProperty - transform - set, get & listen")(benchmarks: _*)
)
}
| UdashFramework/udash-core | benchmarks/.js/src/main/scala/io/udash/benchmarks/properties/TransformedSeqPropertyListeners.scala | Scala | apache-2.0 | 1,646 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle.sort
import org.mockito.{Mock, MockitoAnnotations}
import org.mockito.Answers.RETURNS_SMART_NULLS
import org.mockito.Mockito._
import org.scalatest.Matchers
import org.apache.spark.{Partitioner, SharedSparkContext, ShuffleDependency, SparkFunSuite}
import org.apache.spark.memory.MemoryTestingUtils
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.shuffle.{BaseShuffleHandle, IndexShuffleBlockResolver}
import org.apache.spark.shuffle.api.ShuffleExecutorComponents
import org.apache.spark.shuffle.sort.io.LocalDiskShuffleExecutorComponents
import org.apache.spark.storage.BlockManager
import org.apache.spark.util.Utils
class SortShuffleWriterSuite extends SparkFunSuite with SharedSparkContext with Matchers {
@Mock(answer = RETURNS_SMART_NULLS)
private var blockManager: BlockManager = _
private val shuffleId = 0
private val numMaps = 5
private var shuffleHandle: BaseShuffleHandle[Int, Int, Int] = _
private val shuffleBlockResolver = new IndexShuffleBlockResolver(conf)
private val serializer = new JavaSerializer(conf)
private var shuffleExecutorComponents: ShuffleExecutorComponents = _
override def beforeEach(): Unit = {
super.beforeEach()
MockitoAnnotations.initMocks(this)
val partitioner = new Partitioner() {
def numPartitions = numMaps
def getPartition(key: Any) = Utils.nonNegativeMod(key.hashCode, numPartitions)
}
shuffleHandle = {
val dependency = mock(classOf[ShuffleDependency[Int, Int, Int]])
when(dependency.partitioner).thenReturn(partitioner)
when(dependency.serializer).thenReturn(serializer)
when(dependency.aggregator).thenReturn(None)
when(dependency.keyOrdering).thenReturn(None)
new BaseShuffleHandle(shuffleId, numMaps = numMaps, dependency)
}
shuffleExecutorComponents = new LocalDiskShuffleExecutorComponents(
conf, blockManager, shuffleBlockResolver)
}
override def afterAll(): Unit = {
try {
shuffleBlockResolver.stop()
} finally {
super.afterAll()
}
}
test("write empty iterator") {
val context = MemoryTestingUtils.fakeTaskContext(sc.env)
val writer = new SortShuffleWriter[Int, Int, Int](
shuffleBlockResolver,
shuffleHandle,
mapId = 1,
context,
shuffleExecutorComponents)
writer.write(Iterator.empty)
writer.stop(success = true)
val dataFile = shuffleBlockResolver.getDataFile(shuffleId, 1)
val writeMetrics = context.taskMetrics().shuffleWriteMetrics
assert(!dataFile.exists())
assert(writeMetrics.bytesWritten === 0)
assert(writeMetrics.recordsWritten === 0)
}
test("write with some records") {
val context = MemoryTestingUtils.fakeTaskContext(sc.env)
val records = List[(Int, Int)]((1, 2), (2, 3), (4, 4), (6, 5))
val writer = new SortShuffleWriter[Int, Int, Int](
shuffleBlockResolver,
shuffleHandle,
mapId = 2,
context,
shuffleExecutorComponents)
writer.write(records.toIterator)
writer.stop(success = true)
val dataFile = shuffleBlockResolver.getDataFile(shuffleId, 2)
val writeMetrics = context.taskMetrics().shuffleWriteMetrics
assert(dataFile.exists())
assert(dataFile.length() === writeMetrics.bytesWritten)
assert(records.size === writeMetrics.recordsWritten)
}
}
| pgandhi999/spark | core/src/test/scala/org/apache/spark/shuffle/sort/SortShuffleWriterSuite.scala | Scala | apache-2.0 | 4,158 |
package frameless
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeArrayData}
import org.apache.spark.sql.types._
import org.apache.spark.sql.FramelessInternals.UserDefinedType
@SQLUserDefinedType(udt = classOf[UdtEncodedClassUdt])
class UdtEncodedClass(val a: Int, val b: Array[Double]) {
override def equals(other: Any): Boolean = other match {
case that: UdtEncodedClass => a == that.a && java.util.Arrays.equals(b, that.b)
case _ => false
}
override def hashCode(): Int = {
val state = Seq[Any](a, b)
state.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b)
}
override def toString = s"UdtEncodedClass($a, $b)"
}
object UdtEncodedClass {
implicit val udtForUdtEncodedClass = new UdtEncodedClassUdt
}
class UdtEncodedClassUdt extends UserDefinedType[UdtEncodedClass] {
def sqlType: DataType = {
StructType(Seq(
StructField("a", IntegerType, nullable = false),
StructField("b", ArrayType(DoubleType, containsNull = false), nullable = false)
))
}
def serialize(obj: UdtEncodedClass): InternalRow = {
val row = new GenericInternalRow(3)
row.setInt(0, obj.a)
row.update(1, UnsafeArrayData.fromPrimitiveArray(obj.b))
row
}
def deserialize(datum: Any): UdtEncodedClass = datum match {
case row: InternalRow => new UdtEncodedClass(row.getInt(0), row.getArray(1).toDoubleArray())
}
def userClass: Class[UdtEncodedClass] = classOf[UdtEncodedClass]
}
| adelbertc/frameless | dataset/src/test/scala/frameless/UdtEncodedClass.scala | Scala | apache-2.0 | 1,512 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package relite
import r._
import r.data._
import r.data.internal._
import r.builtins.{ CallFactory, Primitives }
import r.nodes.ast._
import r.nodes.exec.{ BaseR, RNode }
import r.runtime._;
import org.antlr.runtime._
import java.io._
import scala.collection.JavaConversions._
object Test2 {
def eval(e: ASTNode, frame: Frame) = e match {
case e: FunctionCall =>
println("unknown f: " + e.getName + " / " + e);
println("unknown f: " + e.getArgs.first.getValue) //foreach(_.getValue));
new RLanguage(e)
case _ => println("unknown: " + e); new RLanguage(e) //RInt.RIntFactory.getScalar(42)
}
def main(args: Array[String]): Unit = {
val cf = new CallFactory("foobar", Array("e"), Array("e")) {
def create(call: ASTNode, names: Array[RSymbol], exprs: Array[RNode]): RNode = {
check(call, names, exprs)
val expr = exprs(0)
val ast = expr.getAST()
val ast1: AnyRef = ast // apparently ASTNode member fields are reassigned -- don't make it look like one!
new BaseR(call) {
def execute(frame: Frame): AnyRef = {
val ast = ast1.asInstanceOf[ASTNode]
println("dyn " + ast1 + "/" + System.identityHashCode(ast1))
eval(ast, null)
}
}
}
}
Primitives.add(cf)
val res = RContext.eval(RContext.parseFile(
new ANTLRInputStream(new ByteArrayInputStream("5+5; foobar(Vector.rand(100))".getBytes))))
println(res.pretty)
}
}
| tesendic/Relite | test-src/test2.scala | Scala | agpl-3.0 | 2,484 |
/*
* Copyright 2012 Pellucid and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package datomisca
import scala.language.reflectiveCalls
import scala.collection.JavaConverters._
import java.util.Date
private[datomisca] class ExciseEntity(
val id: AnyRef,
excisionId: TempId = DId(Partition.USER),
attrs: Set[Keyword] = Set(),
before: Option[Either[Date, Long]] = None
) extends TxData with FinalIdentified {
def before(d: Date) = new ExciseEntity(this.id, this.excisionId, this.attrs, Some(Left(d)))
def before(tx: Long) = new ExciseEntity(this.id, this.excisionId, this.attrs, Some(Right(tx)))
lazy val props = {
val builder = Map.newBuilder[AnyRef, AnyRef]
builder += ((Namespace.DB / "id") -> excisionId.toDatomicId)
builder += ((Namespace.DB / "excise") -> id)
if(!attrs.isEmpty)
builder += ((Namespace.DB.EXCISE / "attrs") -> datomic.Util.list(attrs.toSeq:_*))
before foreach {
case Left(d: Date) =>
builder += ((Namespace.DB.EXCISE / "before") -> d)
case Right(tx: Long) =>
builder += ((Namespace.DB.EXCISE / "beforeT") -> (tx: java.lang.Long))
}
builder.result().asJava
}
def toTxData: AnyRef = {
props
}
override def toString = props.toString
}
private[datomisca] class ExciseAttr(
attr: Keyword,
excisionId: TempId = DId(Partition.USER),
before: Option[Either[Date, Long]]
) extends TxData {
def before(d: Date) = new ExciseAttr(this.attr, this.excisionId, Some(Left(d)))
def before(tx: Long) = new ExciseAttr(this.attr, this.excisionId, Some(Right(tx)))
lazy val props =
before match {
case None => // BE CAREFUL it excises All Values of An Attribute
datomic.Util.map(
(Namespace.DB / "id"), excisionId.toDatomicId,
(Namespace.DB / "excise"), attr
)
case Some(Left(d)) =>
datomic.Util.map(
(Namespace.DB / "id"), excisionId.toDatomicId,
(Namespace.DB / "excise"), attr,
(Namespace.DB.EXCISE / "before"), d
)
case Some(Right(tx)) =>
datomic.Util.map(
(Namespace.DB / "id"), excisionId.toDatomicId,
(Namespace.DB / "excise"), attr,
(Namespace.DB.EXCISE / "beforeT"), (tx: java.lang.Long)
)
}
def toTxData: AnyRef = {
props
}
override def toString = props.toString
}
object Excise {
/** Create operations to excise partialy an entity
* @param id the targeted [[DId]] which can be a Long or a [[LookupRef]]
* @param excisionId the temporary ID of the excision entity
* @param attr attribute to excised from entity (partial excision)
*/
def entity[T](id: T, excisionId: TempId, attr: Keyword, attrs: Keyword*)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), excisionId, (attr +: attrs).toSet)
def entity[T](id: T, attr: Keyword, attrs: Keyword*)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), attrs = (attr +: attrs).toSet)
/** Create operations to excise a full entity
* @param id the targeted [[DId]] which must can be a Long or a [[LookupRef]]
* @param excisionId the temporary ID of the excision entity
*/
def entity[T](id: T, excisionId: TempId)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), excisionId)
def entity[T](id: T)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id))
/** Create operations to excise entity restricting excision to datoms created before a tx
* @param id the targeted [[DId]] which can be a Long or a [[LookupRef]]
* @param excisionId the temporary ID of the excision entity
* @param before the transaction id before which datoms excision is limited
*/
def entity[T](id: T, excisionId: TempId, before: Long)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), excisionId=excisionId, before=Some(Right(before)))
def entity[T](id: T, before: Long)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), before=Some(Right(before)))
/** Create operations to excise entity restricting excision to datoms created before a date
* @param id the targeted [[DId]] which can be a Long or a [[LookupRef]]
* @param excisionId the temporary ID of the excision entity
* @param before the instant before which datoms excision is limited
*/
def entity[T](id: T, excisionId: TempId, before: Date)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), excisionId=excisionId, before=Some(Left(before)))
def entity[T](id: T, before: Date)(implicit ev: AsPermanentEntityId[T]) =
new ExciseEntity(ev.conv(id), before=Some(Left(before)))
/** Create operations to excise all attributes restricting to datoms created before a date
* @param excisionId the temporary ID of the excision entity
* @param before the instant before which datoms excision is limited
*/
def attribute(attr: Keyword, excisionId: TempId, before: Date) =
new ExciseAttr(attr=attr, excisionId=excisionId, before=Some(Left(before)))
def attribute(attr: Keyword, before: Date) =
new ExciseAttr(attr=attr, before=Some(Left(before)))
/** Create operations to excise all attributes restricting to datoms created before a transaction
* @param excisionId the temporary ID of the excision entity
* @param before the transaction before which datoms excision is limited
*/
def attribute(attr: Keyword, excisionId: TempId, before: Long) =
new ExciseAttr(attr=attr, excisionId=excisionId, before=Some(Right(before)))
def attribute(attr: Keyword, before: Long) = new ExciseAttr(attr=attr, before=Some(Right(before)))
/** WARNING: this removes ALL values of this attribute
* Creates operations to excise all attributes restricting to datoms created before a transaction
* @param excisionId the temporary ID of the excision entity
*/
def attribute(attr: Keyword, excisionId: TempId) = new ExciseAttr(attr=attr, excisionId=excisionId, before=None)
def attribute(attr: Keyword) = new ExciseAttr(attr=attr, before=None)
}
| Enalmada/datomisca | core/src/main/scala/datomisca/excision.scala | Scala | apache-2.0 | 6,645 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.body
import io.gatling.commons.validation._
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.session.{ Expression, StaticStringExpression }
import io.gatling.core.util.Resource
import io.gatling.core.util.cache.Cache
import com.github.benmanes.caffeine.cache.LoadingCache
case class ResourceAndCachedBytes(resource: Resource, cachedBytes: Option[Array[Byte]])
class RawFileBodies(implicit configuration: GatlingConfiguration) {
private val resourceCache: LoadingCache[String, Validation[Resource]] = {
val pathToResource: String => Validation[Resource] = path => Resource.resource(path)
Cache.newConcurrentLoadingCache(configuration.core.rawFileBodiesCacheMaxCapacity, pathToResource)
}
private val bytesCache: LoadingCache[Resource, Option[Array[Byte]]] = {
val resourceToBytes: Resource => Option[Array[Byte]] = resource =>
if (resource.file.length > configuration.core.rawFileBodiesInMemoryMaxSize)
None
else
Some(resource.bytes)
Cache.newConcurrentLoadingCache(configuration.core.rawFileBodiesCacheMaxCapacity, resourceToBytes)
}
def asResourceAndCachedBytes(filePath: Expression[String]): Expression[ResourceAndCachedBytes] =
filePath match {
case StaticStringExpression(path) =>
val resourceAndCachedBytes =
for {
resource <- resourceCache.get(path)
} yield ResourceAndCachedBytes(resource, Some(resource.bytes))
_ => resourceAndCachedBytes
case _ =>
session =>
for {
path <- filePath(session)
resource <- resourceCache.get(path)
} yield ResourceAndCachedBytes(resource, bytesCache.get(resource))
}
}
| wiacekm/gatling | gatling-core/src/main/scala/io/gatling/core/body/RawFileBodies.scala | Scala | apache-2.0 | 2,358 |
import sbt._
import Keys._
object build extends Build {
lazy val k1 = taskKey[Unit]("")
lazy val k2 = taskKey[Unit]("")
val UpdateK1 = Command.command("UpdateK1") { st: State =>
val ex = Project extract st
import ex._
val session2 = BuiltinCommands.setThis(st, ex, Seq(k1 := {}), """k1 := {
|//
|//
|}""".stripMargin).session
val st1 = BuiltinCommands.reapply(session2, structure, st)
// SessionSettings.writeSettings(ex.currentRef, session2, ex.session.original, ex.structure)
SessionSettings.saveAllSettings(st1)
}
lazy val root = Project("root", file(".")) settings(
commands += UpdateK1
)
}
// vim: set ts=4 sw=4 et:
| xeno-by/old-scalameta-sbt | sbt/src/sbt-test/project/session-update-from-cmd/project/build.scala | Scala | bsd-3-clause | 676 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.nio.ByteBuffer
import kafka.common.{BrokerEndPointNotAvailableException, BrokerNotAvailableException, KafkaException}
import kafka.utils.Json
import org.apache.kafka.common.protocol.SecurityProtocol
/**
* A Kafka broker.
* A broker has an id and a collection of end-points.
* Each end-point is (host, port, protocolType).
*/
object Broker {
/**
* Create a broker object from id and JSON string.
* @param id
* @param brokerInfoString
*
* Version 1 JSON schema for a broker is:
* {"version":1,
* "host":"localhost",
* "port":9092
* "jmx_port":9999,
* "timestamp":"2233345666" }
*
* The current JSON schema for a broker is:
* {"version":2,
* "host","localhost",
* "port",9092
* "jmx_port":9999,
* "timestamp":"2233345666",
* "endpoints": ["PLAINTEXT://host1:9092",
* "SSL://host1:9093"]
*/
def createBroker(id: Int, brokerInfoString: String): Broker = {
if(brokerInfoString == null)
throw new BrokerNotAvailableException("Broker id %s does not exist".format(id))
try {
Json.parseFull(brokerInfoString) match {
case Some(m) =>
val brokerInfo = m.asInstanceOf[Map[String, Any]]
val version = brokerInfo("version").asInstanceOf[Int]
val endpoints = version match {
case 1 =>
val host = brokerInfo("host").asInstanceOf[String]
val port = brokerInfo("port").asInstanceOf[Int]
Map(SecurityProtocol.PLAINTEXT -> new EndPoint(host, port, SecurityProtocol.PLAINTEXT))
case 2 =>
val listeners = brokerInfo("endpoints").asInstanceOf[List[String]]
listeners.map(listener => {
val ep = EndPoint.createEndPoint(listener)
(ep.protocolType, ep)
}).toMap
case _ => throw new KafkaException("Unknown version of broker registration. Only versions 1 and 2 are supported." + brokerInfoString)
}
new Broker(id, endpoints)
case None =>
throw new BrokerNotAvailableException("Broker id %d does not exist".format(id))
}
} catch {
case t: Throwable => throw new KafkaException("Failed to parse the broker info from zookeeper: " + brokerInfoString, t)
}
}
/**
*
* @param buffer Containing serialized broker.
* Current serialization is:
* id (int), number of endpoints (int), serialized endpoints
* @return broker object
*/
def readFrom(buffer: ByteBuffer): Broker = {
val id = buffer.getInt
val numEndpoints = buffer.getInt
val endpoints = List.range(0, numEndpoints).map(i => EndPoint.readFrom(buffer))
.map(ep => ep.protocolType -> ep).toMap
new Broker(id, endpoints)
}
}
case class Broker(id: Int, endPoints: Map[SecurityProtocol, EndPoint]) {
override def toString: String = id + " : " + endPoints.values.mkString("(",",",")")
def this(id: Int, host: String, port: Int, protocol: SecurityProtocol = SecurityProtocol.PLAINTEXT) = {
this(id, Map(protocol -> EndPoint(host, port, protocol)))
}
def this(bep: BrokerEndPoint, protocol: SecurityProtocol) = {
this(bep.id, bep.host, bep.port, protocol)
}
def writeTo(buffer: ByteBuffer) {
buffer.putInt(id)
buffer.putInt(endPoints.size)
for(endpoint <- endPoints.values) {
endpoint.writeTo(buffer)
}
}
def sizeInBytes: Int =
4 + /* broker id*/
4 + /* number of endPoints */
endPoints.values.map(_.sizeInBytes).sum /* end points */
def supportsChannel(protocolType: SecurityProtocol): Unit = {
endPoints.contains(protocolType)
}
def getBrokerEndPoint(protocolType: SecurityProtocol): BrokerEndPoint = {
val endpoint = endPoints.get(protocolType)
endpoint match {
case Some(endpoint) => new BrokerEndPoint(id, endpoint.host, endpoint.port)
case None =>
throw new BrokerEndPointNotAvailableException("End point %s not found for broker %d".format(protocolType,id))
}
}
}
| junrao/kafka | core/src/main/scala/kafka/cluster/Broker.scala | Scala | apache-2.0 | 4,882 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.s2graph.counter.core
import java.text.SimpleDateFormat
import java.util.Calendar
case class TimedQualifier(q: TimedQualifier.IntervalUnit.Value, ts: Long) {
import TimedQualifier.IntervalUnit._
def dateTime: Long = {
val dateFormat = new SimpleDateFormat("yyyyMMddHHmm")
dateFormat.format(ts).toLong
}
def add(amount: Int): TimedQualifier = {
val cal = Calendar.getInstance()
cal.setTimeInMillis(ts)
q match {
case MINUTELY =>
cal.add(Calendar.MINUTE, amount)
case HOURLY =>
cal.add(Calendar.HOUR, amount)
case DAILY =>
cal.add(Calendar.DAY_OF_MONTH, amount)
case MONTHLY =>
cal.add(Calendar.MONTH, amount)
case TOTAL =>
}
copy(ts = cal.getTimeInMillis)
}
}
object TimedQualifier {
object IntervalUnit extends Enumeration {
type IntervalUnit = Value
val TOTAL = Value("t")
val MONTHLY = Value("M")
val DAILY = Value("d")
val HOURLY = Value("H")
val MINUTELY = Value("m")
}
def apply(q: String, ts: Long): TimedQualifier = TimedQualifier(IntervalUnit.withName(q), ts)
import IntervalUnit._
def getTsUnit(intervalUnit: IntervalUnit.IntervalUnit): Long = {
intervalUnit match {
case MINUTELY => 1 * 60 * 1000l
case HOURLY => 60 * 60 * 1000l
case DAILY => 24 * 60 * 60 * 1000l
case MONTHLY => 31 * 24 * 60 * 60 * 1000l
case v: IntervalUnit.IntervalUnit =>
throw new RuntimeException(s"unsupported operation for ${v.toString}")
}
}
def getQualifiers(intervals: Seq[IntervalUnit], millis: Long): Seq[TimedQualifier] = {
val cal = Calendar.getInstance()
cal.setTimeInMillis(millis)
val newCal = Calendar.getInstance()
newCal.set(cal.get(Calendar.YEAR), cal.get(Calendar.MONTH), 1, 0, 0, 0)
newCal.set(Calendar.MILLISECOND, 0)
val month = newCal.getTimeInMillis
val Seq(day, hour, minute) = {
for {
field <- Seq(Calendar.DATE, Calendar.HOUR_OF_DAY, Calendar.MINUTE)
} yield {
newCal.set(field, cal.get(field))
newCal.getTimeInMillis
}
}
for {
interval <- intervals
} yield {
val ts = interval match {
case MINUTELY => minute
case HOURLY => hour
case DAILY => day
case MONTHLY => month
case TOTAL => 0L
}
TimedQualifier(interval, ts)
}
}
// descending order
def getQualifiersToLimit(intervals: Seq[IntervalUnit], limit: Int, tsOpt: Option[Long] = None): Seq[TimedQualifier] = {
val ts = tsOpt.getOrElse(System.currentTimeMillis())
for {
interval <- intervals
newLimit = if (interval == TOTAL) 1 else limit
i <- 0 until (-newLimit, -1)
} yield {
val newMillis = nextTime(interval, ts, i)
TimedQualifier(interval, newMillis)
}
}
private def nextTime(interval: IntervalUnit, ts: Long, i: Int): Long = {
val newCal = Calendar.getInstance()
newCal.setTimeInMillis(ts)
newCal.set(Calendar.MILLISECOND, 0)
interval match {
case MINUTELY =>
newCal.set(Calendar.SECOND, 0)
newCal.add(Calendar.MINUTE, i)
newCal.getTimeInMillis
case HOURLY =>
newCal.set(Calendar.SECOND, 0)
newCal.set(Calendar.MINUTE, 0)
newCal.add(Calendar.HOUR_OF_DAY, i)
newCal.getTimeInMillis
case DAILY =>
newCal.set(Calendar.SECOND, 0)
newCal.set(Calendar.MINUTE, 0)
newCal.set(Calendar.HOUR_OF_DAY, 0)
newCal.add(Calendar.DAY_OF_MONTH, i)
newCal.getTimeInMillis
case MONTHLY =>
newCal.set(Calendar.SECOND, 0)
newCal.set(Calendar.MINUTE, 0)
newCal.set(Calendar.HOUR_OF_DAY, 0)
newCal.set(Calendar.DAY_OF_MONTH, 1)
newCal.add(Calendar.MONTH, i)
newCal.getTimeInMillis
case TOTAL =>
0L
}
}
def getTimeList(interval: IntervalUnit, from: Long, to: Long, rst: List[Long] = Nil): List[Long] = {
interval match {
case TOTAL => List(0)
case _ =>
val next = nextTime(interval, from, 1)
if (next < from) {
// ignore
getTimeList(interval, next, to, rst)
}
else if (next < to) {
// recall
getTimeList(interval, next, to, rst :+ next)
} else {
// end condition
rst :+ next
}
}
}
// for reader
def getQualifiersToLimit(intervals: Seq[IntervalUnit],
limit: Int,
optFrom: Option[Long],
optTo: Option[Long]): Seq[List[TimedQualifier]] = {
val newLimit = limit - 1
for {
interval <- intervals
} yield {
{
(optFrom, optTo) match {
case (Some(from), Some(to)) =>
getTimeList(interval, from, to)
case (Some(from), None) =>
getTimeList(interval, from, nextTime(interval, from, newLimit))
case (None, Some(to)) =>
getTimeList(interval, nextTime(interval, to, -newLimit), to)
case (None, None) =>
val current = System.currentTimeMillis()
getTimeList(interval, nextTime(interval, current, -newLimit), current)
}
}.map { ts =>
TimedQualifier(interval, ts)
}
}
}
def getTimeRange(intervals: Seq[IntervalUnit],
limit: Int,
optFrom: Option[Long],
optTo: Option[Long]): Seq[(TimedQualifier, TimedQualifier)] = {
val newLimit = limit - 1
val maxInterval = intervals.maxBy {
case MINUTELY => 0
case HOURLY => 1
case DAILY => 2
case MONTHLY => 3
case TOTAL => 4
}
val minInterval = intervals.minBy {
case MINUTELY => 0
case HOURLY => 1
case DAILY => 2
case MONTHLY => 3
case TOTAL => 4
}
val (from, to) = (optFrom, optTo) match {
case (Some(f), Some(t)) =>
(f, t)
case (Some(f), None) =>
(f, nextTime(minInterval, f, newLimit))
case (None, Some(t)) =>
(nextTime(maxInterval, t, -newLimit), t)
case (None, None) =>
val current = System.currentTimeMillis()
(nextTime(maxInterval, current, -newLimit), nextTime(minInterval, current, 0))
}
for {
interval <- intervals
} yield {
(TimedQualifier(interval, from), TimedQualifier(interval, to))
}
}
}
| jongwook/incubator-s2graph | s2counter_core/src/main/scala/org/apache/s2graph/counter/core/TimedQualifier.scala | Scala | apache-2.0 | 7,224 |
package hyperdrive.cj.model
import shapeless._
import shapeless.labelled._
trait DataValueReader[T] {
def readDataValue(value: DataValue): Option[T]
}
object DataValueReader {
// implicit def optionDVR[T](implicit enc: DataValueReader[T]): DataValueReader[Option[T]] = new DataValueReader[Option[T]] {
// def readDataValue(value: DataValue): Option[Option[T]] = Some(enc.readDataValue(value))
// }
implicit val stringDVR: DataValueReader[String] = new DataValueReader[String] {
def readDataValue(value: DataValue): Option[String] = value match {
case StringDataValue(s) => Some(s)
case _ => None
}
}
implicit val boolDVR: DataValueReader[Boolean] = new DataValueReader[Boolean] {
def readDataValue(value: DataValue): Option[Boolean] = value match {
case BooleanDataValue(b) => Some(b)
case _ => None
}
}
implicit val intDVR: DataValueReader[Int] = new DataValueReader[Int] {
def readDataValue(value: DataValue): Option[Int] = value match {
case BigDecimalDataValue(b) => Some(b.intValue)
case _ => None
}
}
implicit val longDVR: DataValueReader[Long] = new DataValueReader[Long] {
def readDataValue(value: DataValue): Option[Long] = value match {
case BigDecimalDataValue(b) => Some(b.longValue)
case _ => None
}
}
implicit val floatDVR: DataValueReader[Float] = new DataValueReader[Float] {
def readDataValue(value: DataValue): Option[Float] = value match {
case BigDecimalDataValue(b) => Some(b.floatValue)
case _ => None
}
}
implicit val doubleDVR: DataValueReader[Double] = new DataValueReader[Double] {
def readDataValue(value: DataValue): Option[Double] = value match {
case BigDecimalDataValue(b) => Some(b.doubleValue)
case _ => None
}
}
}
trait DataReader[T] {
def readData(values: Seq[Data]): Option[T]
}
object DataReader {
def apply[T](implicit dataReader: DataReader[T]): DataReader[T] = dataReader
implicit val hnilDR: DataReader[HNil] = new DataReader[HNil] {
def readData(values: Seq[Data]): Option[HNil] = Some(HNil)
}
implicit def hconsDR[K <: Symbol, V, L <: HList](implicit wit: Witness.Aux[K],
dvr: DataValueReader[V],
tail: DataReader[L]): DataReader[FieldType[K, V] :: L] = new DataReader[FieldType[K, V] :: L] {
def readData(values: Seq[Data]): Option[FieldType[K, V] :: L] = {
val fieldName = wit.value.name
val getValue: Seq[Data] => Option[DataValue] = values => values.find(_.name == fieldName).flatMap(_.value)
for {
hd <- getValue(values).flatMap(dvr.readDataValue(_)).headOption
tl <- tail.readData(values)
} yield hd.asInstanceOf[FieldType[K, V]] :: tl
}
}
implicit def genDR[T, Repr <: HList](implicit lg: LabelledGeneric.Aux[T, Repr],
dr: DataReader[Repr]): DataReader[T] = new DataReader[T] {
def readData(values: Seq[Data]): Option[T] = dr.readData(values).map(lg.from)
}
}
| ScalaConsultants/hyperdrive | src/main/scala/hyperdrive/cj/model/DataReader.scala | Scala | unlicense | 3,137 |
package xfp.fixedpoint
import xfp.utils.{BigRational => Rational}
import Rational._
import AffineUtils._
import collection.mutable.Queue
object FixedPointFormat {
/** Default bitvector length. */
var globalBitLength = 16
/** Default rounding mode. */
var globalRounding = false
/** Default setting for whether to allow unsigned format .*/
var allowUnsignedFormat = false
def apply(b: Int, frac: Int): FixedPointFormat = FixedPointFormat(true, b, frac, false)
/**
Computes the best format for an interval of values while avoiding overflow.
Will return an unsigned format, if possible and allowed and uses default rounding.
Throws an FixedPointOverflowException, if the range cannot fit in the given bits,
and overflow checking is enabled.
@param a lower bound of range of values
@param b upper bound of range of values
@param bits bitvector length
*/
def getFormat(a: Rational, b: Rational, bits: Int): FixedPointFormat = {
assert(a <= b,
"Range given must have lower bound (%s) smaller than upper bound (%s)".format(a.toString, b.toString))
val signed = if (allowUnsignedFormat) a < Rational(0) else true
val intBits = bitsNeeded(math.max(math.abs(a.integerPart), math.abs(b.integerPart)))
// We don't allow 0 fractional bits
if (checkForOverflow && intBits >= bits) {
throw FixedPointOverflowException("Number of max bits (%d) exceeded: %d".format(bits, intBits))
}
val fracBits = if (signed) bits - intBits - 1 else bits - intBits
return new FixedPointFormat(signed, bits, fracBits, globalRounding)
}
/**
Computes the best format for an interval of values while avoiding overflow.
Will return an unsigned format, if possible and uses default rounding.
Uses the default bit length set in the globalBitLength variable.
@param a lower bound of range of values
@param b upper bound of range of values
*/
def getFormat(a: Rational, b: Rational): FixedPointFormat = getFormat(a, b, globalBitLength)
// Computes the format for the queue as given and then checks that this format can also hold
// the new quantization error, if not, a larger format is returned.
private[fixedpoint] def getFormat(x0: Rational, queue1: Queue[Deviation], queue2: Queue[Deviation],
bits: Int): FixedPointFormat = {
val radius = sumQueue(queue1) + sumQueue(queue2)
// preliminary format, without quantization errors
val prelim = getFormat(x0 - radius, x0 + radius, bits)
val newRadius = radius + prelim.quantError
val secondary = getFormat(x0 - newRadius, x0 + newRadius)
if (prelim != secondary) {
return secondary
}
else {
return prelim
}
}
/**
Computes the best format needed to represent a rational number (i.e. a constant).
The result is always a signed format, uses the default bit length
set in the globalBitLength variable.
@param r rational to be represented
*/
def getFormat(r: Rational): FixedPointFormat = {
val tmp = getFormat(r, globalBitLength)
return tmp
}
/**
Computes the best format needed to represent a rational number (i.e. a constant).
The result is always a signed format.
@param r rational to be represented
@param bitLength length of the bitvector that's available
*/
def getFormat(r: Rational, bitLength: Int): FixedPointFormat = {
val fracBits = fractionalBits(math.abs(r.toDouble), bitLength)
val tmp = new FixedPointFormat(true, bitLength, fracBits, globalRounding)
return tmp
}
private def fractionalBits(constant: Double, totalBits: Int): Int = {
return math.ceil(totalBits - 2 - math.log(constant)/math.log(2)).toInt
}
/**
Returns the number of bits needed to represent the given integer.
Assumes 32-bit integers.
*/
private def bitsNeeded(value: Int): Int = {
return 32 - Integer.numberOfLeadingZeros(value)
}
}
/**
* A fixed-point format.
* @param signed sign bit, if true the fixed-point numbers are signed
* @param bits bitvector length
* @param f number of fractional bits
* @param realRounding true if true rounding is used, truncation otherwise
*/
case class FixedPointFormat(val signed: Boolean, val bits: Int, val f: Int, val realRounding: Boolean) {
val weight = math.pow(2, f).asInstanceOf[Long] // denominator of weight really
assert(signed, "Unsigned format!")
/**
Number of integer bits. Can be zero for numbers smaller than 1.
*/
val i = math.max(if (signed) bits - f - 1 else bits - f, 0)
assert(i >= 0, "number of integer bits is negative: %d!".format(i))
assert(f >= 0, "number of fractional bits is negative: %d!".format(f))
/**
The range of this fixedpoint format.
*/
val range: (Rational, Rational) = {
if (signed) {
( Rational(-math.pow(2, bits-1).asInstanceOf[Long], weight),
Rational(math.pow(2, bits-1).asInstanceOf[Long] - 1, weight))
}
else {
(Rational(0, 1), Rational(math.pow(2, bits).asInstanceOf[Long] - 1, weight))
}
}
/**
Determines whether this format includes a certain range.
@param interval range to test
*/
def includesRange(interval: (Rational, Rational)): Boolean = {
assert(interval._1 <= interval._2)
range._1 <= interval._1 && range._2 >= interval._2
}
/**
Checks if a given number (constant) is representable in this format.
@param number to check
*/
def canRepresent(d: Rational): Boolean = {
val tmp = d.toDouble * math.pow(2, f)
if (tmp.toInt == tmp) {
//println("FOUND EXACT CONST: " + d)
return true
}
else
return false
}
/**
Quantization error of this format.
*/
val quantError: Rational =
if (realRounding) Rational(1, math.pow(2, (f + 1)).asInstanceOf[Long])
else Rational(1, math.pow(2, f).asInstanceOf[Long])
override def toString =
"<%d,%d,%d>".format( if (signed) 1 else 0, bits, f)
}
| malyzajko/xfp | analysis_tool/src/xfp/fixedpoint/FixedPointFormat.scala | Scala | bsd-3-clause | 5,920 |
/* Code Pulse: a real-time code coverage tool, for more information, see <http://code-pulse.com/>
*
* Copyright (C) 2014-2017 Code Dx, Inc. <https://codedx.com/>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.codedx.codepulse.hq.protocol
/** Base trait for classes that represent data received from an Agent
*/
sealed trait DataMessage {
def content: DataMessageContent
}
object DataMessage {
case class SequencedData(timestamp: Int, sequence: Int, val content: DataMessageContent) extends DataMessage
case class UnsequencedData(val content: DataMessageContent) extends DataMessage
}
/** Base trait for classes that represent actual trace data
*/
sealed trait DataMessageContent
/** Container for the various concrete `DataMessageContent` classes.
* Each DataMessage class is POJO representation of the messages
* described by the MessageProtocol document, minus any sequencing info.
*/
object DataMessageContent {
case class MapThreadName(
threadName: String,
threadId: Int,
timestamp: Int)
extends DataMessageContent
case class MapMethodSignature(
methodSig: String,
methodId: Int)
extends DataMessageContent
case class MapSourceLocation(
methodId: Int,
startLine: Int,
endLine: Int,
startCharacter: Short,
endCharacter: Short,
sourceLocationId: Int)
extends DataMessageContent
case class SourceLocationCount(
methodId: Int,
sourceLocationCount: Int)
extends DataMessageContent
case class MapException(
exception: String,
exceptionId: Int)
extends DataMessageContent
case class MethodEntry(
methodId: Int,
timestamp: Int,
threadId: Int)
extends DataMessageContent
case class MethodVisit(
methodId: Int,
sourceLocationId: Int,
timestamp: Int,
threadId: Int)
extends DataMessageContent
case class MethodExit(
methodId: Int,
timestamp: Int,
exceptionThrown: Boolean,
threadId: Int)
extends DataMessageContent
case class Exception(
exceptionId: Int,
methodId: Int,
timestamp: Int,
lineNum: Int,
threadId: Int)
extends DataMessageContent
case class ExceptionBubble(
exceptionId: Int,
methodId: Int,
timestamp: Int,
threadId: Int)
extends DataMessageContent
case class Marker(
key: String,
value: String,
timestamp: Int)
extends DataMessageContent
} | secdec/codepulse | hq/src/main/scala/com/secdec/bytefrog/hq/protocol/DataMessage.scala | Scala | apache-2.0 | 2,811 |
package org.scalatest.examples.funsuite.beforeandafter
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import collection.mutable.ListBuffer
class ExampleSuite extends FunSuite with BeforeAndAfter {
val builder = new StringBuilder
val buffer = new ListBuffer[String]
before {
builder.append("ScalaTest is ")
}
after {
builder.clear()
buffer.clear()
}
test("testing should be easy") {
builder.append("easy!")
assert(builder.toString === "ScalaTest is easy!")
assert(buffer.isEmpty)
buffer += "sweet"
}
test("testing should be fun") {
builder.append("fun!")
assert(builder.toString === "ScalaTest is fun!")
assert(buffer.isEmpty)
}
} | hubertp/scalatest | examples/src/main/scala/org/scalatest/examples/funsuite/beforeandafter/ExampleSuite.scala | Scala | apache-2.0 | 715 |
package com.twitter.scrooge.backend.lua
import com.twitter.scrooge.ast._
import com.twitter.scrooge.backend.{Generator, GeneratorFactory, ServiceOption, TemplateGenerator}
import com.twitter.scrooge.frontend.ResolvedDocument
import com.twitter.scrooge.mustache.Dictionary.{CodeFragment, v}
import com.twitter.scrooge.mustache.HandlebarLoader
import java.io.File
import com.twitter.scrooge.mustache.Dictionary
object LuaGeneratorFactory extends GeneratorFactory {
def luaCommentFunction(commentStyle: HandlebarLoader.CommentStyle): String = {
import HandlebarLoader._
commentStyle match {
case BlockBegin => "--[["
case BlockContinuation => " "
case BlockEnd => "--]]\\n"
case SingleLineComment => "-- "
}
}
val language = "lua"
val templateLoader: HandlebarLoader =
new HandlebarLoader("/luagen/", ".mustache", luaCommentFunction)
def apply(
doc: ResolvedDocument,
defaultNamespace: String,
languageFlags: Seq[String]
): Generator = new LuaGenerator(
doc,
defaultNamespace,
templateLoader
)
}
class LuaGenerator(
val doc: ResolvedDocument,
val defaultNamespace: String,
val templateLoader: HandlebarLoader)
extends TemplateGenerator(doc) {
import LuaGenerator._
val namespaceLanguage = "lua"
val fileExtension = ".lua"
val languageFlags: Seq[String] = Seq.empty[String]
def templates: HandlebarLoader = templateLoader
override def genConstant(constant: RHS, fieldType: Option[FieldType] = None): CodeFragment = {
constant match {
case NullLiteral => v("nil")
case _ => super.genConstant(constant, fieldType)
}
}
def quoteKeyword(str: String): String =
if (LuaKeywords.contains(str))
s"_$str"
else
str
override def normalizeCase[N <: Node](node: N): N = {
(node match {
case e: EnumField =>
e.copy(sid = e.sid.toUpperCase)
case _ => super.normalizeCase(node)
}).asInstanceOf[N]
}
protected override def namespacedFolder(
destFolder: File,
namespace: String,
dryRun: Boolean
): File = {
val file = new File(destFolder, "lua/" + namespace.replace('.', File.separatorChar))
if (!dryRun) file.mkdirs()
file
}
override def isLazyReadEnabled(t: FunctionType, optional: Boolean): Boolean = false
// For constants support, not implemented
def genList(list: ListRHS, fieldType: Option[FieldType] = None): CodeFragment = v("")
def genSet(set: SetRHS, fieldType: Option[FieldType]): CodeFragment = v("")
def genMap(map: MapRHS, fieldType: Option[FieldType] = None): CodeFragment = v("")
def genEnum(enum: EnumRHS, fieldType: Option[FieldType] = None): CodeFragment = v("")
def genStruct(struct: StructRHS, fieldType: Option[FieldType] = None): CodeFragment = v("")
def genUnion(struct: UnionRHS, fieldType: Option[FieldType] = None): CodeFragment = v("")
// For mutability/immutability support, not implemented
def genToImmutable(t: FieldType): CodeFragment = v("")
def genToImmutable(f: Field): CodeFragment = v("")
def toMutable(t: FieldType): (String, String) = ("", "")
def toMutable(f: Field): (String, String) = ("", "")
private[this] def genComponentType(part: String, valueType: FieldType): CodeFragment =
valueType match {
case t: ContainerType => v(s"$part = { ${genType(t)} }")
case t: StructType => v(s"$part = ${genID(t.sid.toTitleCase)}")
case t: EnumType => v(s"$part = { ttype = 'enum', value = ${genID(t.sid.toTitleCase)} }")
case _ => v(s"$part = '${genPrimitiveType(valueType)}'")
}
def genType(t: FunctionType, immutable: Boolean = false): CodeFragment = t match {
case bt: BaseType => v(s"ttype = '${genPrimitiveType(bt)}'")
case StructType(st, _) => v(s"ttype = 'struct', fields = ${genID(st.sid.toTitleCase)}.fields")
case EnumType(et, _) => v(s"ttype = 'enum', value = ${genID(et.sid.toTitleCase)}")
case ListType(valueType, _) => v(s"ttype = 'list', ${genComponentType("value", valueType)}")
case MapType(keyType, valueType, _) =>
v(
s"ttype = 'map', ${genComponentType("key", keyType)}, ${genComponentType("value", valueType)}"
)
case SetType(valueType, _) => v(s"ttype = 'set', ${genComponentType("value", valueType)}")
case _ => v("")
}
def genPrimitiveType(t: FunctionType): CodeFragment = t match {
case Void => v("void")
case TBool => v("bool")
case TByte => v("byte")
case TDouble => v("double")
case TI16 => v("i16")
case TI32 => v("i32")
case TI64 => v("i64")
case TString => v("string")
case TBinary => v("binary")
case _ => v("")
}
// Not used for Lua
def genFieldType(f: Field): CodeFragment = v("")
// For functions (services) -- not supported in Lua
def genFieldParams(fields: Seq[Field], asVal: Boolean = false): CodeFragment =
v(
fields
.map { f => genID(f.sid).toData }
.mkString(", ")
)
// Use "lua" namespace if defined, otherwise default to "java" namespace, but replace "thriftjava"
// with "thriftlua"
override def getNamespace(doc: Document): Identifier = {
def replaceThriftJavaWithThriftLua(s: String) = s.replaceAllLiterally("thriftjava", "thriftlua")
doc
.namespace(namespaceLanguage)
.orElse {
// If we don't have a lua namespace, fall back to the java one
doc
.namespace("java")
.map {
case SimpleID(name, origName) =>
SimpleID(replaceThriftJavaWithThriftLua(name), origName)
case QualifiedID(names) =>
QualifiedID(
names.dropRight(1) ++ names.takeRight(1).map(replaceThriftJavaWithThriftLua)
)
}
}
.getOrElse(SimpleID(defaultNamespace))
}
// Finds all struct types that may be referenced by the given struct or by container types (list,
// map, set) including nested container types to arbitrary depths.
// `excludeSelfType` is the SimpleID of the self type such that we avoid adding a require statement
// for self-type references that were introduced in go/rb/873802.
private[this] def findRequireableStructTypes(
ft: FieldType,
excludeSelfType: SimpleID
): Seq[NamedType] = {
ft match {
case t: NamedType if (excludeSelfType == t.sid) => Nil
case t: StructType => Seq(t)
case t: EnumType => Seq(t)
case ListType(t, _) => findRequireableStructTypes(t, excludeSelfType)
case MapType(keyType, valueType, _) =>
findRequireableStructTypes(keyType, excludeSelfType) ++ findRequireableStructTypes(
valueType,
excludeSelfType
)
case SetType(t, _) => findRequireableStructTypes(t, excludeSelfType)
case _ => Nil
}
}
private[this] def genRequireStatement(t: NamedType, namespace: Option[Identifier]): String = {
val typeName = t.sid.toTitleCase.fullName
val qualifiedName = qualifyNamedType(t, namespace).fullName
s"local $typeName = require '$qualifiedName'"
}
override def structDict(
struct: StructLike,
namespace: Option[Identifier],
includes: Seq[Include],
serviceOptions: Set[ServiceOption],
genAdapt: Boolean,
toplevel: Boolean = false,
validator: Option[Identifier]
): Dictionary = {
val dictionary = super.structDict(struct, namespace, includes, serviceOptions, genAdapt)
// Struct or Enum types referenced in the struct that need a `require` statement at the top of the lua file
val requireStatements = struct.fields
.map(_.fieldType)
.flatMap(findRequireableStructTypes(_, struct.sid))
.map(genRequireStatement(_, namespace))
.distinct
.sorted
dictionary.update("requireStatements", requireStatements.mkString("\\n"))
dictionary
}
// Finagle support, not implemented
def genBaseFinagleService: CodeFragment = v("")
def getParentFinagleService(p: ServiceParent): CodeFragment = v("")
def getParentFinagleClient(p: ServiceParent): CodeFragment = v("")
}
private[this] object LuaGenerator {
object LuaKeywords {
private[this] val keywords = Set(
"and",
"break",
"do",
"else",
"elseif",
"end",
"false",
"goto",
"for",
"function",
"if",
"in",
"local",
"nil",
"not",
"or",
"repeat",
"return",
"then",
"true",
"until",
"while"
)
def contains(str: String): Boolean = keywords.contains(str.toLowerCase)
}
}
| twitter/scrooge | scrooge-generator/src/main/scala/com/twitter/scrooge/backend/lua/LuaGenerator.scala | Scala | apache-2.0 | 8,504 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import javax.servlet.http.HttpServletRequest
import scala.xml.Node
import org.apache.spark.ui.{WebUIPage, UIUtils}
private[history] class HistoryPage(parent: HistoryServer) extends WebUIPage("") {
private val pageSize = 20
private val plusOrMinus = 2
def render(request: HttpServletRequest): Seq[Node] = {
val requestedPage = Option(request.getParameter("page")).getOrElse("1").toInt
val requestedFirst = (requestedPage - 1) * pageSize
val requestedIncomplete =
Option(request.getParameter("showIncomplete")).getOrElse("false").toBoolean
val allApps = parent.getApplicationList()
.filter(_.attempts.head.completed != requestedIncomplete)
val allAppsSize = allApps.size
val actualFirst = if (requestedFirst < allAppsSize) requestedFirst else 0
val appsToShow = allApps.slice(actualFirst, actualFirst + pageSize)
val actualPage = (actualFirst / pageSize) + 1
val last = Math.min(actualFirst + pageSize, allAppsSize) - 1
val pageCount = allAppsSize / pageSize + (if (allAppsSize % pageSize > 0) 1 else 0)
val secondPageFromLeft = 2
val secondPageFromRight = pageCount - 1
val hasMultipleAttempts = appsToShow.exists(_.attempts.size > 1)
val appTable =
if (hasMultipleAttempts) {
// Sorting is disable here as table sort on rowspan has issues.
//排序是禁用的,因为表排序在rowspan有问题
// ref. SPARK-10172
UIUtils.listingTable(appWithAttemptHeader, appWithAttemptRow,
appsToShow, sortable = false)
} else {
UIUtils.listingTable(appHeader, appRow, appsToShow)
}
val providerConfig = parent.getProviderConfig()
val content =
<div class="row-fluid">
<div class="span12">
<ul class="unstyled">
{providerConfig.map { case (k, v) => <li><strong>{k}:</strong> {v}</li> }}
</ul>
{
// This displays the indices of pages that are within `plusOrMinus` pages of
// the current page. Regardless of where the current page is, this also links
// to the first and last page. If the current page +/- `plusOrMinus` is greater
// than the 2nd page from the first page or less than the 2nd page from the last
// page, `...` will be displayed.
if (allAppsSize > 0) {
val leftSideIndices =
rangeIndices(actualPage - plusOrMinus until actualPage, 1 < _, requestedIncomplete)
val rightSideIndices =
rangeIndices(actualPage + 1 to actualPage + plusOrMinus, _ < pageCount,
requestedIncomplete)
<h4>
Showing {actualFirst + 1}-{last + 1} of {allAppsSize}
{if (requestedIncomplete) "(Incomplete applications)"}
<span style="float: right">
{
if (actualPage > 1) {
<a href={makePageLink(actualPage - 1, requestedIncomplete)}>< </a>
<a href={makePageLink(1, requestedIncomplete)}>1</a>
}
}
{if (actualPage - plusOrMinus > secondPageFromLeft) " ... "}
{leftSideIndices}
{actualPage}
{rightSideIndices}
{if (actualPage + plusOrMinus < secondPageFromRight) " ... "}
{
if (actualPage < pageCount) {
<a href={makePageLink(pageCount, requestedIncomplete)}>{pageCount}</a>
<a href={makePageLink(actualPage + 1, requestedIncomplete)}> ></a>
}
}
</span>
</h4> ++
appTable
} else if (requestedIncomplete) {
<h4>No incomplete applications found!</h4>
} else {
<h4>No completed applications found!</h4> ++
<p>Did you specify the correct logging directory?
Please verify your setting of <span style="font-style:italic">
spark.history.fs.logDirectory</span> and whether you have the permissions to
access it.<br /> It is also possible that your application did not run to
completion or did not stop the SparkContext.
</p>
}
}
<a href={makePageLink(actualPage, !requestedIncomplete)}>
{
if (requestedIncomplete) {
"Back to completed applications"
} else {
"Show incomplete applications"
}
}
</a>
</div>
</div>
UIUtils.basicSparkPage(content, "History Server")
}
private val appHeader = Seq(
"App ID",
"App Name",
"Started",
"Completed",
"Duration",
"Spark User",
"Last Updated")
private val appWithAttemptHeader = Seq(
"App ID",
"App Name",
"Attempt ID",
"Started",
"Completed",
"Duration",
"Spark User",
"Last Updated")
private def rangeIndices(
range: Seq[Int],
condition: Int => Boolean,
showIncomplete: Boolean): Seq[Node] = {
range.filter(condition).map(nextPage =>
<a href={makePageLink(nextPage, showIncomplete)}> {nextPage} </a>)
}
private def attemptRow(
renderAttemptIdColumn: Boolean,
info: ApplicationHistoryInfo,
attempt: ApplicationAttemptInfo,
isFirst: Boolean): Seq[Node] = {
val uiAddress = HistoryServer.getAttemptURI(info.id, attempt.attemptId)
val startTime = UIUtils.formatDate(attempt.startTime)
val endTime = if (attempt.endTime > 0) UIUtils.formatDate(attempt.endTime) else "-"
val duration =
if (attempt.endTime > 0) {
UIUtils.formatDuration(attempt.endTime - attempt.startTime)
} else {
"-"
}
val lastUpdated = UIUtils.formatDate(attempt.lastUpdated)
<tr>
{
if (isFirst) {
if (info.attempts.size > 1 || renderAttemptIdColumn) {
<td rowspan={info.attempts.size.toString} style="background-color: #ffffff">
<a href={uiAddress}>{info.id}</a></td>
<td rowspan={info.attempts.size.toString} style="background-color: #ffffff">
{info.name}</td>
} else {
<td><a href={uiAddress}>{info.id}</a></td>
<td>{info.name}</td>
}
} else {
Nil
}
}
{
if (renderAttemptIdColumn) {
if (info.attempts.size > 1 && attempt.attemptId.isDefined) {
<td><a href={HistoryServer.getAttemptURI(info.id, attempt.attemptId)}>
{attempt.attemptId.get}</a></td>
} else {
<td> </td>
}
} else {
Nil
}
}
<td sorttable_customkey={attempt.startTime.toString}>{startTime}</td>
<td sorttable_customkey={attempt.endTime.toString}>{endTime}</td>
<td sorttable_customkey={(attempt.endTime - attempt.startTime).toString}>
{duration}</td>
<td>{attempt.sparkUser}</td>
<td sorttable_customkey={attempt.lastUpdated.toString}>{lastUpdated}</td>
</tr>
}
private def appRow(info: ApplicationHistoryInfo): Seq[Node] = {
attemptRow(false, info, info.attempts.head, true)
}
private def appWithAttemptRow(info: ApplicationHistoryInfo): Seq[Node] = {
attemptRow(true, info, info.attempts.head, true) ++
info.attempts.drop(1).flatMap(attemptRow(true, info, _, false))
}
private def makePageLink(linkPage: Int, showIncomplete: Boolean): String = {
"/?" + Array(
"page=" + linkPage,
"showIncomplete=" + showIncomplete
).mkString("&")
}
}
| tophua/spark1.52 | core/src/main/scala/org/apache/spark/deploy/history/HistoryPage.scala | Scala | apache-2.0 | 8,571 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.Properties
import java.util.concurrent.ExecutionException
import kafka.log.LogConfig
import kafka.utils.TestUtils
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.kafka.common.errors.{InvalidTimestampException, SerializationException}
import org.apache.kafka.common.record.TimestampType
import org.junit.Assert._
import org.junit.Test
class PlaintextProducerSendTest extends BaseProducerSendTest {
@Test(expected = classOf[SerializationException])
def testWrongSerializer() {
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer")
val producer = registerProducer(new KafkaProducer(producerProps))
val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, "key".getBytes, "value".getBytes)
producer.send(record)
}
@Test
def testBatchSizeZero() {
val producer = createProducer(brokerList = brokerList,
lingerMs = Int.MaxValue,
deliveryTimeoutMs = Int.MaxValue,
batchSize = 0)
sendAndVerify(producer)
}
@Test
def testSendCompressedMessageWithLogAppendTime() {
val producer = createProducer(brokerList = brokerList,
compressionType = "gzip",
lingerMs = Int.MaxValue,
deliveryTimeoutMs = Int.MaxValue)
sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME)
}
@Test
def testSendNonCompressedMessageWithLogAppendTime() {
val producer = createProducer(brokerList = brokerList, lingerMs = Int.MaxValue, deliveryTimeoutMs = Int.MaxValue)
sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME)
}
/**
* testAutoCreateTopic
*
* The topic should be created upon sending the first message
*/
@Test
def testAutoCreateTopic() {
val producer = createProducer(brokerList)
try {
// Send a message to auto-create the topic
val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes)
assertEquals("Should have offset 0", 0L, producer.send(record).get.offset)
// double check that the topic is created with leader elected
TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0)
} finally {
producer.close()
}
}
@Test
def testSendWithInvalidCreateTime() {
val topicProps = new Properties()
topicProps.setProperty(LogConfig.MessageTimestampDifferenceMaxMsProp, "1000")
createTopic(topic, 1, 2, topicProps)
val producer = createProducer(brokerList = brokerList)
try {
producer.send(new ProducerRecord(topic, 0, System.currentTimeMillis() - 1001, "key".getBytes, "value".getBytes)).get()
fail("Should throw CorruptedRecordException")
} catch {
case e: ExecutionException => assertTrue(e.getCause.isInstanceOf[InvalidTimestampException])
} finally {
producer.close()
}
// Test compressed messages.
val compressedProducer = createProducer(brokerList = brokerList, compressionType = "gzip")
try {
compressedProducer.send(new ProducerRecord(topic, 0, System.currentTimeMillis() - 1001, "key".getBytes, "value".getBytes)).get()
fail("Should throw CorruptedRecordException")
} catch {
case e: ExecutionException => assertTrue(e.getCause.isInstanceOf[InvalidTimestampException])
} finally {
compressedProducer.close()
}
}
}
| KevinLiLu/kafka | core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala | Scala | apache-2.0 | 4,459 |
/**
*
*/
package com.labjack
import com.sun.jna._
import com.sun.jna.win32.StdCallLibrary
import com.sun.jna.ptr._
/**
* LJM is the wrapper object to the LJM library's functions and constants.
* Refer to the LabJackM.h header file or online User's Guide for functions
* and constants documentation:<br>
* <br>
* <a href="http://labjack.com/support/ljm/users-guide">http://labjack.com/support/ljm/users-guide</a><br>
* <p>LJM library C to Scala differences:
* <ul>
* <li>C functions are implemented in the LJM object as static methods. The
* function name's "LJM_" prefix have been removed and the first letter have
* been changed to lowercase.
* <li>C constants can be found in the LJM.Constants object. The constant name's
* "LJM_" prefix have been removed.
* <li>C error code constants can be found in the LJM.Errors object. The constant
* name's "LJME_" prefix have been removed.
* <li>C function parameter names have had the first letter changed to lowercase.
* <li>If the wrapper method detects an error it will throw a LJMException
* exception, setting the error and errorAddress values, and exception message.
* <li>C parameters that are pass by reference are implemented in Scala as
* arrays, or JNA classes IntByReference, DoubleByReference, or LongByReference
* for single value references.
* <li>C string parameters are implemented in Scala as a String and pass by
* reference strings are a JNA class Pointer.<br>
* <li>When using a JNA Pointer for C strings, the JNA Memory class needs to be used
* to construct the pointer and allocate memory. For example:<br>
* <br>
* val stringPtr: Pointer = new Memory(100) //Allocates 100 bytes of memory which is a 100 character C string.
* </ul>
*
* <p>Version History
* <ul>
* <li>0.91 - Added Linux and Mac OS X support.
* <li>0.90 - Initial release tested with LJM v1.1.1. Windows support only.
* </ul>
* @author LabJack Corporation <[email protected]>
* @version 0.91
*
* Ported to Scala from Java by porl <[email protected]>
*/
object LJM {
private def handleError(error: Int) {
if (error != Errors.NOERROR) {
throw new LJMException(error)
}
}
private def handleError(error: Int, errorAddress: Int) {
if (error != Errors.NOERROR) throw new LJMException(error, errorAddress)
}
def listAll(deviceType: Int, connectionType: Int, numFound: IntByReference, aDeviceTypes: Array[Int], aConnectionTypes: Array[Int], aSerialNumbers: Array[Int], aIPAddresses: Array[Int]) = {
val error = LabJackM.INSTANCE.LJM_ListAll(deviceType, connectionType, numFound, aDeviceTypes, aConnectionTypes, aSerialNumbers, aIPAddresses)
handleError(error)
error
}
def listAllS(deviceType: String, connectionType: String, numFound: IntByReference, aDeviceTypes: Array[Int], aConnectionTypes: Array[Int], aSerialNumbers: Array[Int], aIPAddresses: Array[Int]) = {
val error = LabJackM.INSTANCE.LJM_ListAllS(deviceType, connectionType, numFound, aDeviceTypes, aConnectionTypes, aSerialNumbers, aIPAddresses)
handleError(error)
error
}
def openS(deviceType: String, connectionType: String, identifier: String, handle: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_OpenS(deviceType, connectionType, identifier, handle)
handleError(error)
error
}
def open(deviceType: Int, connectionType: Int, identifier: String, handle: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_Open(deviceType, connectionType, identifier, handle)
handleError(error)
error
}
def getHandleInfo(handle: Int, deviceType: IntByReference, connectionType: IntByReference, serialNumber: IntByReference, ipAddress: IntByReference, port: IntByReference, maxBytesPerMB: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_GetHandleInfo(handle, deviceType, connectionType, serialNumber, ipAddress, port, maxBytesPerMB)
handleError(error)
error
}
def close(handle: Int) = {
val error = LabJackM.INSTANCE.LJM_Close(handle)
handleError(error)
error
}
def closeAll = {
val error = LabJackM.INSTANCE.LJM_CloseAll
handleError(error)
error
}
def eWriteAddress(handle: Int, address: Int, `type`: Int, value: Double) = {
val error = LabJackM.INSTANCE.LJM_eWriteAddress(handle, address, `type`, value)
handleError(error)
error
}
def eReadAddress(handle: Int, address: Int, `type`: Int, value: DoubleByReference) = {
val error = LabJackM.INSTANCE.LJM_eReadAddress(handle, address, `type`, value)
handleError(error)
error
}
def eWriteName(handle: Int, name: String, value: Double) = {
val error = LabJackM.INSTANCE.LJM_eWriteName(handle, name, value)
handleError(error)
error
}
def eReadName(handle: Int, name: String, value: DoubleByReference) = {
val error = LabJackM.INSTANCE.LJM_eReadName(handle, name, value)
handleError(error)
error
}
def eReadAddresses(handle: Int, numFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eReadAddresses(handle, numFrames, aAddresses, aTypes, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eReadNames(handle: Int, numFrames: Int, aNames: Array[String], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eReadNames(handle, numFrames, aNames, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eWriteAddresses(handle: Int, numFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eWriteAddresses(handle, numFrames, aAddresses, aTypes, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eWriteNames(handle: Int, numFrames: Int, aNames: Array[String], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eWriteNames(handle, numFrames, aNames, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eAddresses(handle: Int, numFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eAddresses(handle, numFrames, aAddresses, aTypes, aWrites, aNumValues, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eNames(handle: Int, numFrames: Int, aNames: Array[String], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eNames(handle, numFrames, aNames, aWrites, aNumValues, aValues, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def eReadNameString(handle: Int, name: String, string: Pointer) = {
val error = LabJackM.INSTANCE.LJM_eReadNameString(handle, name, string)
handleError(error)
error
}
def eReadAddressString(handle: Int, address: Int, string: Pointer) = {
val error = LabJackM.INSTANCE.LJM_eReadAddressString(handle, address, string)
handleError(error)
error
}
def eWriteNameString(handle: Int, name: String, string: String) = {
val error = LabJackM.INSTANCE.LJM_eWriteNameString(handle, name, string)
handleError(error)
error
}
def eWriteAddressString(handle: Int, address: Int, string: String) = {
val error = LabJackM.INSTANCE.LJM_eWriteAddressString(handle, address, string)
handleError(error)
error
}
def eStreamStart(handle: Int, scansPerRead: Int, numAddresses: Int, aScanList: Array[Int], scanRate: DoubleByReference) = {
val error = LabJackM.INSTANCE.LJM_eStreamStart(handle, scansPerRead, numAddresses, aScanList, scanRate)
handleError(error)
error
}
def eStreamRead(handle: Int, aData: Array[Double], deviceScanBacklog: IntByReference, ljmScanBacklog: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_eStreamRead(handle, aData, deviceScanBacklog, ljmScanBacklog)
handleError(error)
error
}
def eStreamStop(handle: Int) = {
val error = LabJackM.INSTANCE.LJM_eStreamStop(handle)
handleError(error)
error
}
def writeRaw(handle: Int, data: Array[Byte], numBytes: Int) = {
val error = LabJackM.INSTANCE.LJM_WriteRaw(handle, data, numBytes)
handleError(error)
error
}
def readRaw(handle: Int, data: Array[Byte], numBytes: Int) = {
val error = LabJackM.INSTANCE.LJM_ReadRaw(handle, data, numBytes)
handleError(error)
error
}
def addressesToMBFB(maxBytesPerMBFB: Int, aAddresses: Array[Int], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], numFrames: IntByReference, aMBFBCommand: Array[Byte]) = {
val error = LabJackM.INSTANCE.LJM_AddressesToMBFB(maxBytesPerMBFB, aAddresses, aTypes, aWrites, aNumValues, aValues, numFrames, aMBFBCommand)
handleError(error)
error
}
def mbfbComm(handle: Int, unitID: Byte, aMBFB: Array[Byte], errorAddress: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_MBFBComm(handle, unitID, aMBFB, errorAddress)
handleError(error, errorAddress.getValue)
error
}
def updateValues(aMBFBResponse: Array[Byte], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], numFrames: Int, aValues: Array[Double]) = {
val error = LabJackM.INSTANCE.LJM_UpdateValues(aMBFBResponse, aTypes, aWrites, aNumValues, numFrames, aValues)
handleError(error)
error
}
def namesToAddresses(numFrames: Int, aNames: Array[String], aAddresses: Array[Int], aTypes: Array[Int]) = {
val error = LabJackM.INSTANCE.LJM_NamesToAddresses(numFrames, aNames, aAddresses, aTypes)
handleError(error)
error
}
def nameToAddress(name: String, address: IntByReference, `type`: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_NameToAddress(name, address, `type`)
handleError(error)
error
}
def addressesToTypes(numAddresses: Int, aAddresses: Array[Int], aTypes: Array[Int]) = {
val error = LabJackM.INSTANCE.LJM_AddressesToTypes(numAddresses, aAddresses, aTypes)
handleError(error)
error
}
def addressToType(address: Int, `type`: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_AddressToType(address, `type`)
handleError(error)
error
}
def errorToString(errorCode: Int, errorString: Pointer) {
LabJackM.INSTANCE.LJM_ErrorToString(errorCode, errorString)
}
def loadConstants() {
LabJackM.INSTANCE.LJM_LoadConstants()
}
def loadConstantsFromFile(fileName: String) = {
val error = LabJackM.INSTANCE.LJM_LoadConstantsFromFile(fileName)
handleError(error)
error
}
def loadConstantsFromString(jsonString: String) = {
val error = LabJackM.INSTANCE.LJM_LoadConstantsFromString(jsonString)
handleError(error)
error
}
def tcVoltsToTemp(tcType: Int, tcVolts: Double, cjTempK: Double, pTCTempK: DoubleByReference) = {
val error = LabJackM.INSTANCE.LJM_TCVoltsToTemp(tcType, tcVolts, cjTempK, pTCTempK)
handleError(error)
error
}
def float32ToByteArray(aFLOAT32: Array[Float], registerOffset: Int, numFLOAT32: Int, aBytes: Array[Byte]) {
LabJackM.INSTANCE.LJM_FLOAT32ToByteArray(aFLOAT32, registerOffset, numFLOAT32, aBytes)
}
def byteArrayToFLOAT32(aBytes: Array[Byte], registerOffset: Int, numFLOAT32: Int, aFLOAT32: Array[Float]) {
LabJackM.INSTANCE.LJM_ByteArrayToFLOAT32(aBytes, registerOffset, numFLOAT32, aFLOAT32)
}
def uint16ToByteArray(aUINT16: Array[Short], registerOffset: Int, numUINT16: Int, aBytes: Array[Byte]) {
LabJackM.INSTANCE.LJM_UINT16ToByteArray(aUINT16, registerOffset, numUINT16, aBytes)
}
def byteArrayToUINT16(aBytes: Array[Byte], registerOffset: Int, numUINT16: Int, aUINT16: Array[Short]) {
LabJackM.INSTANCE.LJM_ByteArrayToUINT16(aBytes, registerOffset, numUINT16, aUINT16)
}
def uint32ToByteArray(aUINT32: Array[Int], registerOffset: Int, numUINT32: Int, aBytes: Array[Byte]) {
LabJackM.INSTANCE.LJM_UINT32ToByteArray(aUINT32, registerOffset, numUINT32, aBytes)
}
def byteArrayToUINT32(aBytes: Array[Byte], registerOffset: Int, numUINT32: Int, aUINT32: Array[Int]) {
LabJackM.INSTANCE.LJM_ByteArrayToUINT32(aBytes, registerOffset, numUINT32, aUINT32)
}
def int32ToByteArray(aINT32: Array[Int], registerOffset: Int, numINT32: Int, aBytes: Array[Byte]) {
LabJackM.INSTANCE.LJM_INT32ToByteArray(aINT32, registerOffset, numINT32, aBytes)
}
def byteArrayToINT32(aBytes: Array[Byte], registerOffset: Int, numINT32: Int, aINT32: Array[Int]) {
LabJackM.INSTANCE.LJM_ByteArrayToINT32(aBytes, registerOffset, numINT32, aINT32)
}
def numberToIP(number: Int, ipv4String: Pointer) = {
val error = LabJackM.INSTANCE.LJM_NumberToIP(number, ipv4String)
handleError(error)
error
}
def ipToNumber(ipv4String: String, number: IntByReference) = {
val error = LabJackM.INSTANCE.LJM_IPToNumber(ipv4String, number)
handleError(error)
error
}
def numberToMAC(number: Long, macString: Pointer) = {
val error = LabJackM.INSTANCE.LJM_NumberToMAC(number, macString)
handleError(error)
error
}
def macToNumber(macString: String, number: LongByReference) = {
val error = LabJackM.INSTANCE.LJM_MACToNumber(macString, number)
handleError(error)
error
}
def writeLibraryConfigS(parameter: String, value: Double) = {
val error = LabJackM.INSTANCE.LJM_WriteLibraryConfigS(parameter, value)
handleError(error)
error
}
def writeLibraryConfigStringS(parameter: String, string: String) = {
val error = LabJackM.INSTANCE.LJM_WriteLibraryConfigStringS(parameter, string)
handleError(error)
error
}
def readLibraryConfigS(parameter: String, value: DoubleByReference) = {
val error = LabJackM.INSTANCE.LJM_ReadLibraryConfigS(parameter, value)
handleError(error)
error
}
def readLibraryConfigStringS(parameter: String, string: Pointer) = {
val error = LabJackM.INSTANCE.LJM_ReadLibraryConfigStringS(parameter, string)
handleError(error)
error
}
def loadConfigurationFile(fileName: String) = {
val error = LabJackM.INSTANCE.LJM_LoadConfigurationFile(fileName)
handleError(error)
error
}
def log(level: Int, string: String) = {
val error = LabJackM.INSTANCE.LJM_Log(level, string)
handleError(error)
error
}
def resetLog = {
val error = LabJackM.INSTANCE.LJM_ResetLog
handleError(error)
error
}
object LabJackM {
val INSTANCE = Native.loadLibrary("LabJackM", if (Platform.isWindows) classOf[WindowsLabJackM] else classOf[LabJackM]).asInstanceOf[LabJackM]
}
trait LabJackM extends Library {
def LJM_ListAll(DeviceType: Int, ConnectionType: Int, NumFound: IntByReference, aDeviceTypes: Array[Int], aConnectionTypes: Array[Int], aSerialNumbers: Array[Int], aIPAddresses: Array[Int]): Int
def LJM_ListAllS(DeviceType: String, ConnectionType: String, NumFound: IntByReference, aDeviceTypes: Array[Int], aConnectionTypes: Array[Int], aSerialNumbers: Array[Int], aIPAddresses: Array[Int]): Int
def LJM_OpenS(DeviceType: String, ConnectionType: String, Identifier: String, Handle: IntByReference): Int
def LJM_Open(DeviceType: Int, ConnectionType: Int, Identifier: String, Handle: IntByReference): Int
def LJM_GetHandleInfo(Handle: Int, DeviceType: IntByReference, ConnectionType: IntByReference, SerialNumber: IntByReference, IPAddress: IntByReference, Port: IntByReference, MaxBytesPerMB: IntByReference): Int
def LJM_Close(Handle: Int): Int
def LJM_CloseAll: Int
def LJM_eWriteAddress(Handle: Int, Address: Int, Type: Int, Value: Double): Int
def LJM_eReadAddress(Handle: Int, Address: Int, Type: Int, Value: DoubleByReference): Int
def LJM_eWriteName(Handle: Int, Name: String, Value: Double): Int
def LJM_eReadName(Handle: Int, Name: String, Value: DoubleByReference): Int
def LJM_eReadAddresses(Handle: Int, NumFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eReadNames(Handle: Int, NumFrames: Int, aNames: Array[String], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eWriteAddresses(Handle: Int, NumFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eWriteNames(Handle: Int, NumFrames: Int, aNames: Array[String], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eAddresses(Handle: Int, NumFrames: Int, aAddresses: Array[Int], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eNames(Handle: Int, NumFrames: Int, aNames: Array[String], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], ErrorAddress: IntByReference): Int
def LJM_eReadNameString(Handle: Int, Name: String, string: Pointer): Int
def LJM_eReadAddressString(Handle: Int, Address: Int, string: Pointer): Int
def LJM_eWriteNameString(Handle: Int, Name: String, string: String): Int
def LJM_eWriteAddressString(Handle: Int, Address: Int, string: String): Int
def LJM_eStreamStart(Handle: Int, ScansPerRead: Int, NumAddresses: Int, aScanList: Array[Int], ScanRate: DoubleByReference): Int
def LJM_eStreamRead(Handle: Int, aData: Array[Double], DeviceScanBacklog: IntByReference, LJMScanBacklog: IntByReference): Int
def LJM_eStreamStop(Handle: Int): Int
def LJM_WriteRaw(Handle: Int, Data: Array[Byte], NumBytes: Int): Int
def LJM_ReadRaw(Handle: Int, Data: Array[Byte], NumBytes: Int): Int
def LJM_AddressesToMBFB(MaxBytesPerMBFB: Int, aAddresses: Array[Int], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], aValues: Array[Double], NumFrames: IntByReference, aMBFBCommand: Array[Byte]): Int
def LJM_MBFBComm(Handle: Int, UnitID: Byte, aMBFB: Array[Byte], ErrorAddress: IntByReference): Int
def LJM_UpdateValues(aMBFBResponse: Array[Byte], aTypes: Array[Int], aWrites: Array[Int], aNumValues: Array[Int], NumFrames: Int, aValues: Array[Double]): Int
def LJM_NamesToAddresses(NumFrames: Int, aNames: Array[String], aAddresses: Array[Int], aTypes: Array[Int]): Int
def LJM_NameToAddress(Name: String, Address: IntByReference, Type: IntByReference): Int
def LJM_AddressesToTypes(NumAddresses: Int, aAddresses: Array[Int], aTypes: Array[Int]): Int
def LJM_AddressToType(Address: Int, Type: IntByReference): Int
def LJM_ErrorToString(ErrorCode: Int, ErrorString: Pointer)
def LJM_LoadConstants()
def LJM_LoadConstantsFromFile(FileName: String): Int
def LJM_LoadConstantsFromString(JsonString: String): Int
def LJM_TCVoltsToTemp(TCType: Int, TCVolts: Double, CJTempK: Double, pTCTempK: DoubleByReference): Int
def LJM_FLOAT32ToByteArray(aFLOAT32: Array[Float], RegisterOffset: Int, NumFLOAT32: Int, aBytes: Array[Byte])
def LJM_ByteArrayToFLOAT32(aBytes: Array[Byte], RegisterOffset: Int, NumFLOAT32: Int, aFLOAT32: Array[Float])
def LJM_UINT16ToByteArray(aUINT16: Array[Short], RegisterOffset: Int, NumUINT16: Int, aBytes: Array[Byte])
def LJM_ByteArrayToUINT16(aBytes: Array[Byte], RegisterOffset: Int, NumUINT16: Int, aUINT16: Array[Short])
def LJM_UINT32ToByteArray(aUINT32: Array[Int], RegisterOffset: Int, NumUINT32: Int, aBytes: Array[Byte])
def LJM_ByteArrayToUINT32(aBytes: Array[Byte], RegisterOffset: Int, NumUINT32: Int, aUINT32: Array[Int])
def LJM_INT32ToByteArray(aINT32: Array[Int], RegisterOffset: Int, NumINT32: Int, aBytes: Array[Byte])
def LJM_ByteArrayToINT32(aBytes: Array[Byte], RegisterOffset: Int, NumINT32: Int, aINT32: Array[Int])
def LJM_NumberToIP(Number: Int, IPv4String: Pointer): Int
def LJM_IPToNumber(IPv4String: String, Number: IntByReference): Int
def LJM_NumberToMAC(Number: Long, MACString: Pointer): Int
def LJM_MACToNumber(MACString: String, Number: LongByReference): Int
def LJM_WriteLibraryConfigS(Parameter: String, Value: Double): Int
def LJM_WriteLibraryConfigStringS(Parameter: String, string: String): Int
def LJM_ReadLibraryConfigS(Parameter: String, Value: DoubleByReference): Int
def LJM_ReadLibraryConfigStringS(Parameter: String, string: Pointer): Int
def LJM_LoadConfigurationFile(FileName: String): Int
def LJM_Log(Level: Int, string: String): Int
def LJM_ResetLog: Int
}
private trait WindowsLabJackM extends LabJackM with StdCallLibrary {
}
/**
* Errors is an object containing the error constants from the LJM library.
* The "LJME_" prefix have been removed from the original names.
*/
object Errors {
val NOERROR = 0
val WARNINGS_BEGIN = 200
val WARNINGS_END = 399
val FRAMES_OMITTED_DUE_TO_PACKET_SIZE = 201
val DEBUG_LOG_FAILURE = 202
val USING_DEFAULT_CALIBRATION = 203
val MODBUS_ERRORS_BEGIN = 1200
val MODBUS_ERRORS_END = 1216
val MBE1_ILLEGAL_FUNCTION = 1201
val MBE2_ILLEGAL_DATA_ADDRESS = 1202
val MBE3_ILLEGAL_DATA_VALUE = 1203
val MBE4_SLAVE_DEVICE_FAILURE = 1204
val MBE5_ACKNOWLEDGE = 1205
val MBE6_SLAVE_DEVICE_BUSY = 1206
val MBE8_MEMORY_PARITY_ERROR = 1208
val MBE10_GATEWAY_PATH_UNAVAILABLE = 1210
val MBE11_GATEWAY_TARGET_NO_RESPONSE = 1211
val LIBRARY_ERRORS_BEGIN = 1220
val LIBRARY_ERRORS_END = 1399
val UNKNOWN_ERROR = 1221
val INVALID_DEVICE_TYPE = 1222
val INVALID_HANDLE = 1223
val DEVICE_NOT_OPEN = 1224
val STREAM_NOT_INITIALIZED = 1225
val DEVICE_NOT_FOUND = 1227
val DEVICE_ALREADY_OPEN = 1229
val COULD_NOT_CLAIM_DEVICE = 1230
val CANNOT_CONNECT = 1231
val SOCKET_LEVEL_ERROR = 1233
val CANNOT_OPEN_DEVICE = 1236
val CANNOT_DISCONNECT = 1237
val WINSOCK_FAILURE = 1238
val DEVICE_RECONNECT_FAILED = 1239
val INVALID_ADDRESS = 1250
val INVALID_CONNECTION_TYPE = 1251
val INVALID_DIRECTION = 1252
val INVALID_FUNCTION = 1253
val INVALID_NUM_REGISTERS = 1254
val INVALID_PARAMETER = 1255
val INVALID_PROTOCOL_ID = 1256
val INVALID_TRANSACTION_ID = 1257
val INVALID_VALUE_TYPE = 1259
val MEMORY_ALLOCATION_FAILURE = 1260
val NO_COMMAND_BYTES_SENT = 1261
val INCORRECT_NUM_COMMAND_BYTES_SENT = 1262
val NO_RESPONSE_BYTES_RECEIVED = 1263
val INCORRECT_NUM_RESPONSE_BYTES_RECEIVED = 1264
val MIXED_FORMAT_IP_ADDRESS = 1265
val UNKNOWN_IDENTIFIER = 1266
val NOT_IMPLEMENTED = 1267
val INVALID_INDEX = 1268
val INVALID_LENGTH = 1269
val ERROR_BIT_SET = 1270
val INVALID_MAXBYTESPERMBFB = 1271
val NULL_POINTER = 1272
val NULL_OBJ = 1273
val RESERVED_NAME = 1274
val UNPARSABLE_DEVICE_TYPE = 1275
val UNPARSABLE_CONNECTION_TYPE = 1276
val UNPARSABLE_IDENTIFIER = 1277
val PACKET_SIZE_TOO_LARGE = 1278
val TRANSACTION_ID_ERR = 1279
val PROTOCOL_ID_ERR = 1280
val LENGTH_ERR = 1281
val UNIT_ID_ERR = 1282
val FUNCTION_ERR = 1283
val STARTING_REG_ERR = 1284
val NUM_REGS_ERR = 1285
val NUM_BYTES_ERR = 1286
val CONFIG_FILE_NOT_FOUND = 1289
val CONFIG_PARSING_ERROR = 1290
val INVALID_NUM_VALUES = 1291
val CONSTANTS_FILE_NOT_FOUND = 1292
val INVALID_CONSTANTS_FILE = 1293
val INVALID_NAME = 1294
val OVERSPECIFIED_PORT = 1296
val INTENT_NOT_READY = 1297
val ATTR_LOAD_COMM_FAILURE = 1298
val INVALID_CONFIG_NAME = 1299
val ERROR_RETRIEVAL_FAILURE = 1300
val LJM_BUFFER_FULL = 1301
val COULD_NOT_START_STREAM = 1302
val STREAM_NOT_RUNNING = 1303
val UNABLE_TO_STOP_STREAM = 1304
val INVALID_VALUE = 1305
val SYNCHRONIZATION_TIMEOUT = 1306
val OLD_FIRMWARE = 1307
}
/**
* Constants is an object containing the constants from the LJM library.
* The "LJM_" prefix have been removed from the original names.
*/
object Constants {
val READ = 0
val WRITE = 1
val UINT16 = 0
val UINT32 = 1
val INT32 = 2
val FLOAT32 = 3
val BYTE = 99
val STRING = 98
val STRING_MAX_SIZE = 49
val STRING_ALLOCATION_SIZE = 50
val INVALID_NAME_ADDRESS = -1
val MAX_NAME_SIZE = 256
val MAC_STRING_SIZE = 18
val IPv4_STRING_SIZE = 16
val dtANY = 0
val dtUE9 = 9
val dtU3 = 3
val dtU6 = 6
val dtT7 = 7
val dtSKYMOTE_BRIDGE = 1000
val dtDIGIT = 200
val ctANY = 0
val ctUSB = 1
val ctTCP = 2
val ctETHERNET = 3
val ctWIFI = 4
val NO_IP_ADDRESS = 0
val NO_PORT = 0
val DEFAULT_PORT = 502
val DEMO_MODE = "-1"
val idANY = 0
val DEFAULT_FEEDBACK_ALLOCATION_SIZE = 62
val USE_DEFAULT_MAXBYTESPERMBFB = 0
val LIST_ALL_SIZE = 128
val MAX_USB_PACKET_NUM_BYTES = 64
val MAX_TCP_PACKET_NUM_BYTES_T7 = 1040
val MAX_TCP_PACKET_NUM_BYTES_NON_T7 = 64
val NO_TIMEOUT = 0
val DEFAULT_TIMEOUT = 20000
val DUMMY_VALUE = -9999
val GND = 199
val ttB = 6001
val ttE = 6002
val ttJ = 6003
val ttK = 6004
val ttN = 6005
val ttR = 6006
val ttS = 6007
val ttT = 6008
val SEND_RECEIVE_TIMEOUT_MS = "LJM_SEND_RECEIVE_TIMEOUT_MS"
val OPEN_TCP_DEVICE_TIMEOUT_MS = "LJM_OPEN_TCP_DEVICE_TIMEOUT_MS"
val DEBUG_LOG_MODE = "LJM_DEBUG_LOG_MODE"
val DEBUG_LOG_MODE_NEVER = 1.0
val DEBUG_LOG_MODE_CONTINUOUS = 2.0
val DEBUG_LOG_MODE_ON_ERROR = 3.0
val DEBUG_LOG_LEVEL = "LJM_DEBUG_LOG_LEVEL"
val STREAM_PACKET = 1.0
val TRACE = 2.0
val DEBUG = 4.0
val INFO = 6.0
val PACKET = 7.0
val WARNING = 8.0
val USER = 9.0
val ERROR = 10.0
val FATAL = 12.0
val DEBUG_LOG_BUFFER_MAX_SIZE = "LJM_DEBUG_LOG_BUFFER_MAX_SIZE"
val DEBUG_LOG_SLEEP_TIME_MS = "LJM_DEBUG_LOG_SLEEP_TIME_MS"
val LIBRARY_VERSION = "LJM_LIBRARY_VERSION"
val ALLOWS_AUTO_MULTIPLE_FEEDBACKS = "LJM_ALLOWS_AUTO_MULTIPLE_FEEDBACKS"
val ALLOWS_AUTO_CONDENSE_ADDRESSES = "LJM_ALLOWS_AUTO_CONDENSE_ADDRESSES"
val OPEN_MODE = "LJM_OPEN_MODE"
val KEEP_OPEN = 1.0
val OPEN_CLOSE = 2.0
val MODBUS_MAP_CONSTANTS_FILE = "LJM_MODBUS_MAP_CONSTANTS_FILE"
val ERROR_CONSTANTS_FILE = "LJM_ERROR_CONSTANTS_FILE"
val DEBUG_LOG_FILE = "LJM_DEBUG_LOG_FILE"
val CONSTANTS_FILE = "LJM_CONSTANTS_FILE"
val DEBUG_LOG_FILE_MAX_SIZE = "LJM_DEBUG_LOG_FILE_MAX_SIZE"
val STREAM_TRANSFERS_PER_SECOND = "LJM_STREAM_TRANSFERS_PER_SECOND"
val RETRY_ON_TRANSACTION_ID_MISMATCH = "LJM_RETRY_ON_TRANSACTION_ID_MISMATCH"
val OLD_FIRMWARE_CHECK = "LJM_OLD_FIRMWARE_CHECK"
}
| porl/labjackscala | com/labjack/LJM.scala | Scala | mit | 26,766 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.jms.sink
import java.util
import com.datamountaineer.streamreactor.connect.errors.ErrorPolicyEnum
import com.datamountaineer.streamreactor.connect.jms.config.{JMSConfig, JMSConfigConstants, JMSSettings}
import com.datamountaineer.streamreactor.connect.jms.sink.writer.JMSWriter
import com.datamountaineer.streamreactor.connect.utils.{ProgressCounter, JarManifest}
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import scala.collection.JavaConversions._
/**
* <h1>JMSSinkTask</h1>
*
* Kafka Connect JMS sink task. Called by framework to put records to the target sink
**/
class JMSSinkTask extends SinkTask with StrictLogging {
var writer: Option[JMSWriter] = None
val progressCounter = new ProgressCounter
private var enableProgress: Boolean = false
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
/**
* Parse the configurations and setup the writer
**/
override def start(props: util.Map[String, String]): Unit = {
logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/jms-sink-ascii.txt")).mkString + s" v $version")
logger.info(manifest.printManifest())
JMSConfig.config.parse(props)
val sinkConfig = new JMSConfig(props)
val settings = JMSSettings(sinkConfig, sink = true)
enableProgress = sinkConfig.getBoolean(JMSConfigConstants.PROGRESS_COUNTER_ENABLED)
//if error policy is retry set retry interval
if (settings.errorPolicy.equals(ErrorPolicyEnum.RETRY)) {
context.timeout(sinkConfig.getInt(JMSConfigConstants.ERROR_RETRY_INTERVAL).toLong)
}
writer = Some(JMSWriter(settings))
}
/**
* Pass the SinkRecords to the writer for Writing
**/
override def put(records: util.Collection[SinkRecord]): Unit = {
val seq = records.toVector
writer.foreach(w => w.write(seq))
if (enableProgress) {
progressCounter.update(seq)
}
}
/**
* Clean up connections
**/
override def stop(): Unit = {
logger.info("Stopping JMS sink.")
writer.foreach(w => w.close())
}
override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
//TODO
//have the writer expose a is busy; can expose an await using a countdownlatch internally
}
override def version: String = manifest.version()
}
| CodeSmell/stream-reactor | kafka-connect-jms/src/main/scala/com/datamountaineer/streamreactor/connect/jms/sink/JMSSinkTask.scala | Scala | apache-2.0 | 3,126 |
package com.allaboutscala.learn.spark.functions
import com.allaboutscala.learn.spark.utils.Context
/**
* Created by Nadim Bahadoor on 28/06/2016.
*
* Tutorial: Learn How To Use Apache Spark
*
* [[http://allaboutscala.com/big-data/spark/]]
*
* Copyright 2016 Nadim Bahadoor (http://allaboutscala.com)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* [http://www.apache.org/licenses/LICENSE-2.0]
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
object Tutorial_14_DataFrameStringFunctions extends App with Context {
val donuts = Seq(("plain donut", 1.50, "2018-04-17"), ("vanilla donut", 2.0, "2018-04-01"), ("glazed donut", 2.50, "2018-04-02"))
val df = sparkSession
.createDataFrame(donuts)
.toDF("Donut Name", "Price", "Purchase Date")
import org.apache.spark.sql.functions._
import sparkSession.sqlContext.implicits._
df
.withColumn("Contains plain", instr($"Donut Name", "donut"))
.withColumn("Length", length($"Donut Name"))
.withColumn("Trim", trim($"Donut Name"))
.withColumn("LTrim", ltrim($"Donut Name"))
.withColumn("RTrim", rtrim($"Donut Name"))
.withColumn("Reverse", reverse($"Donut Name"))
.withColumn("Substring", substring($"Donut Name", 0, 5))
.withColumn("IsNull", isnull($"Donut Name"))
.withColumn("Concat", concat_ws(" - ", $"Donut Name", $"Price"))
.withColumn("InitCap", initcap($"Donut Name"))
.show()
}
| nadimbahadoor/learn-spark | source-code/learn-spark/src/main/scala/com/allaboutscala/learn/spark/functions/Tutorial_14_DataFrameStringFunctions.scala | Scala | apache-2.0 | 1,841 |
/**
* Copyright (C) 2009-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.io
import java.net.InetSocketAddress
import java.nio.channels.{ SelectionKey, DatagramChannel }
import akka.actor.{ ActorRef, ActorLogging, Actor }
import akka.io.Udp.{ CommandFailed, Send }
import akka.io.SelectionHandler._
import scala.util.control.NonFatal
/**
* INTERNAL API
*/
private[io] trait WithUdpSend {
me: Actor with ActorLogging ⇒
private var pendingSend: Send = null
private var pendingCommander: ActorRef = null
// If send fails first, we allow a second go after selected writable, but no more. This flag signals that
// pending send was already tried once.
private var retriedSend = false
private def hasWritePending = pendingSend ne null
def channel: DatagramChannel
def udp: UdpExt
val settings = udp.settings
import settings._
def sendHandlers(registration: ChannelRegistration): Receive = {
case send: Send if hasWritePending ⇒
if (TraceLogging) log.debug("Dropping write because queue is full")
sender() ! CommandFailed(send)
case send: Send if send.payload.isEmpty ⇒
if (send.wantsAck)
sender() ! send.ack
case send: Send ⇒
pendingSend = send
pendingCommander = sender()
if (send.target.isUnresolved) {
Dns.resolve(send.target.getHostName)(context.system, self) match {
case Some(r) ⇒
try {
pendingSend = pendingSend.copy(target = new InetSocketAddress(r.addr, pendingSend.target.getPort))
doSend(registration)
} catch {
case NonFatal(e) ⇒
sender() ! CommandFailed(send)
log.debug(
"Failure while sending UDP datagram to remote address [{}]: {}",
send.target, e)
retriedSend = false
pendingSend = null
pendingCommander = null
}
case None ⇒
sender() ! CommandFailed(send)
log.debug(
"Name resolution failed for remote address [{}]",
send.target)
retriedSend = false
pendingSend = null
pendingCommander = null
}
} else {
doSend(registration)
}
case ChannelWritable ⇒ if (hasWritePending) doSend(registration)
}
private def doSend(registration: ChannelRegistration): Unit = {
val buffer = udp.bufferPool.acquire()
try {
buffer.clear()
pendingSend.payload.copyToBuffer(buffer)
buffer.flip()
val writtenBytes = channel.send(buffer, pendingSend.target)
if (TraceLogging) log.debug("Wrote [{}] bytes to channel", writtenBytes)
// Datagram channel either sends the whole message, or nothing
if (writtenBytes == 0) {
if (retriedSend) {
pendingCommander ! CommandFailed(pendingSend)
retriedSend = false
pendingSend = null
pendingCommander = null
} else {
registration.enableInterest(SelectionKey.OP_WRITE)
retriedSend = true
}
} else {
if (pendingSend.wantsAck) pendingCommander ! pendingSend.ack
retriedSend = false
pendingSend = null
pendingCommander = null
}
} finally {
udp.bufferPool.release(buffer)
}
}
}
| rorygraves/perf_tester | corpus/akka/akka-actor/src/main/scala/akka/io/WithUdpSend.scala | Scala | apache-2.0 | 3,363 |
package com.github.gdefacci.briscola
import argonaut._
import scalaz.{ -\/, \/, \/- }
import org.obl.raz.Path
import com.github.gdefacci.briscola.web.util.ArgonautHelper
import com.github.gdefacci.briscola.presentation.sitemap.SiteMap
import com.github.gdefacci.briscola.presentation._
import com.github.gdefacci.briscola.presentation.competition._
import com.github.gdefacci.briscola.presentation.game._
import com.github.gdefacci.briscola.presentation.player._
import com.github.gdefacci.briscola.web.util.ArgonautHttp4sDecodeHelper.ScalazDecodeResult
import argonaut.JString
object TestDecoders {
import CommonJsonDecoders._
implicit val pathDecoder = ArgonautHelper.pathDecoder
case class OutPlayer(self: Path, name: String)
implicit lazy val siteMapDecode = DecodeJson.derive[SiteMap]
object PrivatePlayer {
implicit lazy val playerDecode = DecodeJson.derive[Player]
}
implicit lazy val outPlayerDecode = DecodeJson.derive[OutPlayer]
lazy val playersDecode = DecodeJson[Collection[OutPlayer]] { j =>
j.as[Map[String, Json]].flatMap { mp =>
mp.get("members") match {
case None =>
DecodeResult.fail(s"missing 'mebers' property ${j}", j.history)
case Some(j) =>
j.as[Seq[Json]].flatMap { seq =>
val z: (String, CursorHistory) \/ Seq[OutPlayer] = \/-(Nil)
seq.foldLeft(z) { (acc, ji) =>
acc.flatMap { players =>
outPlayerDecode.decodeJson(ji).toDisjunction.map(players :+ _)
}
} match {
case -\/((str, curs)) => DecodeResult.fail(str, curs)
case \/-(pls) => DecodeResult.ok(Collection(pls))
}
}
}
}
}
implicit lazy val competitionStateDecode = {
implicit val matchKindDecode = {
val singleMatchDecode = DecodeJson[MatchKind] { j =>
(j --\ "kind" ).as[String].flatMap {
case "singleMatch" => DecodeResult.ok[MatchKind](SingleMatch)
case x => DecodeResult.fail[MatchKind]("no SingleMatch", j.history)
}
}
singleMatchDecode |||
DecodeJson.derive[NumberOfGamesMatchKind].map(d => d: MatchKind) |||
DecodeJson.derive[TargetPointsMatchKind].map(d => d: MatchKind)
}
implicit val competitionStartDeadline = {
val allPlayersDecode = DecodeJson[CompetitionStartDeadline] { j =>
(j --\ "kind").as[String].flatMap {
case "allPlayers" => DecodeResult.ok(AllPlayers)
case x => DecodeResult.fail("no AllPlayers", j.history)
}
}
allPlayersDecode ||| DecodeJson.derive[OnPlayerCount].map(d => d: CompetitionStartDeadline)
}
implicit lazy val competitionDecode = DecodeJson.derive[Competition]
implicit lazy val competitionStateKindDecoder = ArgonautHelper.enumDecoder(CompetitionStateKind)
DecodeJson.derive[CompetitionState]
}
implicit lazy val byteDecode = DecodeJson[Byte] (js => js.as[Int].map(_.toByte))
implicit lazy val dropReason:DecodeJson[DropReason] = {
lazy val playerLeftDropReason = ofKind(DropReasonKind.playerLeft, DecodeJson.derive[PlayerLeft])
playerLeftDropReason.map( pl => pl:DropReason )
}
import GameJsonDecoders.seedDecoder
implicit val cardDecode = DecodeJson.derive[Card]
implicit val moveDecode = DecodeJson.derive[Move]
implicit val scoreDecode = DecodeJson.derive[Score]
implicit val gameResultDecode:DecodeJson[GameResult] = {
implicit val teamScoreDecode = DecodeJson.derive[TeamScore]
implicit val teamsGameResultDecode = DecodeJson.derive[TeamsGameResult]
implicit val playerFinalStateDecode = DecodeJson.derive[PlayerFinalState]
implicit val playersGameResultDecode = DecodeJson.derive[PlayersGameResult]
teamsGameResultDecode.map(p => p:GameResult) ||| playersGameResultDecode
}
implicit val playerStateDecode = DecodeJson.derive[PlayerState]
implicit val finishedGameStateDecode = ofKind(GameStateKind.finished, DecodeJson.derive[FinalGameState])
implicit val activeGameStateDecode = ofKind(GameStateKind.active, DecodeJson.derive[ActiveGameState])
implicit val droppedGameStateDecode = ofKind(GameStateKind.dropped, DecodeJson.derive[DroppedGameState])
implicit lazy val gameStateDecode:DecodeJson[GameState] = finishedGameStateDecode.map(p => p:GameState) |||
activeGameStateDecode.map(p => p:GameState) |||
droppedGameStateDecode.map(p => p:GameState)
def decode[T](str: String)(implicit dj: DecodeJson[T]) = {
\/.fromEither(JsonParser.parse(str)).flatMap(dj.decodeJson(_).toDisjunction)
}
private class JsonDecodePF[T](implicit dj: DecodeJson[T]) {
def unapply(text:String):Option[T] = {
\/.fromEither(JsonParser.parse(text)).flatMap( dj.decodeJson(_).toDisjunction ).toOption
}
}
def decodePF[T](implicit dj: DecodeJson[T]): PartialFunction[String, T] = {
val Dec = new JsonDecodePF[T]
return {
case Dec(v) => v
}
}
def ofKind[E <: Enumeration, T <: ADT[E]](e:E#Value, dj:DecodeJson[T]):DecodeJson[T] = DecodeJson[T] { js =>
val kstr = js.get[String]("kind")
val toDecode = kstr.map(_ == e.toString).getOr(false)
if (toDecode) dj.decode(js)
else DecodeResult.fail(s"not a $e, got $js", js.history)
}
object CompetitionEventDecoders {
implicit lazy val CreatedCompetitionDecode = ofKind(CompetitionEventKind.createdCompetition, DecodeJson.derive[CreatedCompetition])
implicit lazy val CompetitionAcceptedDecode = ofKind(CompetitionEventKind.playerAccepted, DecodeJson.derive[CompetitionAccepted])
implicit lazy val CompetitionDeclinedDecode = ofKind(CompetitionEventKind.playerDeclined, DecodeJson.derive[CompetitionDeclined])
}
object GameEventDecoders {
implicit lazy val GameStartedDecode = ofKind(BriscolaEventKind.gameStarted, DecodeJson.derive[GameStarted])
implicit lazy val GameDroppedDecode = ofKind(BriscolaEventKind.gameDropped, DecodeJson.derive[GameDropped])
implicit lazy val CardPlayedDecode = ofKind(BriscolaEventKind.cardPlayed, DecodeJson.derive[CardPlayed])
implicit lazy val briscolaEventDecode:DecodeJson[BriscolaEvent] = GameStartedDecode.map(p => p:BriscolaEvent) ||| GameDroppedDecode ||| CardPlayedDecode
}
object PlayerEventDecoders {
implicit lazy val PlayerLogOnDecode = ofKind(PlayerEventKind.playerLogOn, DecodeJson.derive[PlayerLogOn])
implicit lazy val PlayerLogOffDecode = ofKind(PlayerEventKind.playerLogOff, DecodeJson.derive[PlayerLogOff])
}
implicit def stateAndEventDecoder[E, S](implicit decodeEvent: DecodeJson[E], decodeState: DecodeJson[S]): DecodeJson[EventAndState[E, S]] = {
DecodeJson[EventAndState[E, S]] { j =>
j.as[Map[String, Json]].flatMap { mp =>
for {
ev <- decodeEvent.decodeJson(mp("event"))
state <- decodeState.decodeJson(mp("state"))
} yield EventAndState[E, S](ev, state)
}
}
}
} | gdefacci/briscola | ddd-briscola-web/src/test/scala/com/github/gdefacci/briscola/TestDecoders.scala | Scala | bsd-3-clause | 6,982 |
/*
* Copyright (c) 2010 e.e d3si9n
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package scalaxb.compiler.xsd
import javax.xml.namespace.QName
trait XsTypeSymbol extends scala.xml.TypeSymbol {
val name: String
override def toString(): String = name
}
object XsAnyType extends XsTypeSymbol {
val name = "XsAnyType"
}
object XsNillableAny extends XsTypeSymbol {
val name = "XsNillableAny"
}
object XsLongAll extends XsTypeSymbol {
val name = "XsLongAll"
}
object XsLongAttribute extends XsTypeSymbol {
val name = "XsLongAttribute"
}
object XsInterNamespace extends XsTypeSymbol {
val name = "XsInterNamespace"
}
object XsAnyAttribute extends XsTypeSymbol {
val name = "XsAnyAttribute"
}
object XsMixed extends XsTypeSymbol {
val name = "XsMixed"
}
case class XsWildcard(namespaceConstraint: List[String]) extends XsTypeSymbol {
val name = "XsWildcard(" + namespaceConstraint.mkString(",") + ")"
}
case class XsDataRecord(member: XsTypeSymbol) extends XsTypeSymbol {
val name = "XsDataRecord(" + member + ")"
}
object ReferenceTypeSymbol {
def unapply(value: ReferenceTypeSymbol): Option[TypeDecl] = Some(value.decl)
def apply(namespace: Option[String], localpart: String) =
new ReferenceTypeSymbol(new QName(namespace.orNull, localpart))
}
class ReferenceTypeSymbol(val qname: QName) extends XsTypeSymbol {
val namespace = masked.scalaxb.Helper.nullOrEmpty(qname.getNamespaceURI)
val localPart = qname.getLocalPart
val name: String = (namespace map {"{%s}" format _} getOrElse("")) + localPart
var decl: TypeDecl = null
override def toString(): String = {
if (decl == null) "ReferenceTypeSymbol(" + qname.toString + ",null)"
else "ReferenceTypeSymbol(" + qname.toString + ")"
}
}
object AnyType {
def unapply(value: XsTypeSymbol): Option[XsTypeSymbol] = value match {
case x: XsWildcard => Some(x)
case XsAnyType => Some(XsAnyType)
case XsAnySimpleType => Some(XsAnySimpleType)
case _ => None
}
}
case class XsXMLFormat(member: Decl) extends XsTypeSymbol {
val name = "XsXMLFormat(" + (member match {
case decl: ComplexTypeDecl => decl.name
case group: AttributeGroupDecl => group.name
case _ => "_"
}) + ")"
}
class BuiltInSimpleTypeSymbol(val name: String) extends XsTypeSymbol
case class AttributeGroupSymbol(namespace: Option[String],
name: String) extends XsTypeSymbol
abstract class DerivSym
case class Extends(sym: XsTypeSymbol) extends DerivSym
case class Restricts(sym: XsTypeSymbol) extends DerivSym
object XsAnySimpleType extends BuiltInSimpleTypeSymbol("XsAnySimpleType") {}
object XsUnknown extends BuiltInSimpleTypeSymbol("String") {}
object XsDuration extends BuiltInSimpleTypeSymbol("javax.xml.datatype.Duration") {}
object XsDateTime extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsTime extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsDate extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGYearMonth extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGYear extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGMonthDay extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGDay extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsGMonth extends BuiltInSimpleTypeSymbol("javax.xml.datatype.XMLGregorianCalendar") {}
object XsBoolean extends BuiltInSimpleTypeSymbol("Boolean") {}
object XsFloat extends BuiltInSimpleTypeSymbol("Float") {}
object XsBase64Binary extends BuiltInSimpleTypeSymbol("scalaxb.Base64Binary") {}
object XsHexBinary extends BuiltInSimpleTypeSymbol("scalaxb.HexBinary") {}
object XsDouble extends BuiltInSimpleTypeSymbol("Double") {}
object XsAnyURI extends BuiltInSimpleTypeSymbol("java.net.URI") {}
object XsQName extends BuiltInSimpleTypeSymbol("javax.xml.namespace.QName") {}
object XsNOTATION extends BuiltInSimpleTypeSymbol("javax.xml.namespace.QName") {}
object XsString extends BuiltInSimpleTypeSymbol("String") {}
object XsNormalizedString extends BuiltInSimpleTypeSymbol("String") {}
object XsToken extends BuiltInSimpleTypeSymbol("String") {}
object XsLanguage extends BuiltInSimpleTypeSymbol("String") {}
object XsName extends BuiltInSimpleTypeSymbol("String") {}
object XsNMTOKEN extends BuiltInSimpleTypeSymbol("String") {}
object XsNMTOKENS extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsNCName extends BuiltInSimpleTypeSymbol("String") {}
object XsID extends BuiltInSimpleTypeSymbol("String") {}
object XsIDREF extends BuiltInSimpleTypeSymbol("String") {}
object XsIDREFS extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsENTITY extends BuiltInSimpleTypeSymbol("String") {}
object XsENTITIES extends BuiltInSimpleTypeSymbol("Seq[String]") {}
object XsDecimal extends BuiltInSimpleTypeSymbol("BigDecimal") {}
object XsInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNonPositiveInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNegativeInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsNonNegativeInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsPositiveInteger extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsLong extends BuiltInSimpleTypeSymbol("Long") {}
object XsUnsignedLong extends BuiltInSimpleTypeSymbol("BigInt") {}
object XsInt extends BuiltInSimpleTypeSymbol("Int") {}
object XsUnsignedInt extends BuiltInSimpleTypeSymbol("Long") {}
object XsShort extends BuiltInSimpleTypeSymbol("Short") {}
object XsUnsignedShort extends BuiltInSimpleTypeSymbol("Int") {}
object XsByte extends BuiltInSimpleTypeSymbol("Byte") {}
object XsUnsignedByte extends BuiltInSimpleTypeSymbol("Int") {}
object XsTypeSymbol {
type =>?[A, B] = PartialFunction[A, B]
val LOCAL_ELEMENT = "http://scalaxb.org/local-element"
val toTypeSymbol: String =>? XsTypeSymbol = {
case "anyType" => XsAnyType
case "anySimpleType" => XsAnySimpleType
case "duration" => XsDuration
case "dateTime" => XsDateTime
case "time" => XsTime
case "date" => XsDate
case "gYearMonth" => XsGYearMonth
case "gYear" => XsGYear
case "gMonthDay" => XsGMonthDay
case "gDay" => XsGDay
case "gMonth" => XsGMonth
case "boolean" => XsBoolean
case "float" => XsFloat
case "base64Binary" => XsBase64Binary
case "hexBinary" => XsHexBinary
case "double" => XsDouble
case "anyURI" => XsAnyURI
case "QName" => XsQName
case "NOTATION" => XsNOTATION
case "string" => XsString
case "normalizedString" => XsNormalizedString
case "token" => XsToken
case "language" => XsLanguage
case "Name" => XsName
case "NMTOKEN" => XsNMTOKEN
case "NMTOKENS" => XsNMTOKENS
case "NCName" => XsNCName
case "ID" => XsID
case "IDREF" => XsIDREF
case "IDREFS" => XsIDREFS
case "ENTITY" => XsENTITY
case "ENTITIES" => XsENTITIES
case "decimal" => XsDecimal
case "integer" => XsInteger
case "nonPositiveInteger" => XsNonPositiveInteger
case "negativeInteger" => XsNegativeInteger
case "nonNegativeInteger" => XsNonNegativeInteger
case "positiveInteger" => XsPositiveInteger
case "long" => XsLong
case "unsignedLong" => XsUnsignedLong
case "int" => XsInt
case "unsignedInt" => XsUnsignedInt
case "short" => XsShort
case "unsignedShort" => XsUnsignedShort
case "byte" => XsByte
case "unsignedByte" => XsUnsignedByte
}
}
| justjoheinz/scalaxb | cli/src/main/scala/scalaxb/compiler/xsd/XsTypeSymbol.scala | Scala | mit | 9,231 |
import sbt._
import sbt.Keys._
import org.scalajs.sbtplugin.ScalaJSPlugin.autoImport._
object deps {
// https://github.com/lihaoyi/scalatags
val scalatags = "0.5.2"
// https://github.com/lloydmeta/enumeratum
val enumeratum = "1.3.7"
val akka = "2.5.1"
val akkaHttp = "10.0.5"
val jena = "3.0.1"
// https://github.com/scalaj/scalaj-http
val scalaj = "2.3.0"
// https://github.com/typesafehub/scala-logging
val scalaLogging = "3.1.0"
// https://github.com/ochrons/boopickle
val boopickle = "1.1.0"
// https://github.com/msgpack4z/msgpack4z-core
val msgpack4zCore = "0.3.7"
// https://github.com/msgpack4z/msgpack4z-java07
val msgpack4zJava = "0.3.5"
// https://github.com/antonkulaga/codemirror-facade
val codemirror = "5.5-0.5"
val jquery = "0.8.0"
val junitInterface = "0.11"
val coursier = "1.0.0-RC1"
lazy val protocol = Def.setting(Seq(
"me.chrons" %%% "boopickle" % boopickle
))
lazy val backend = Def.setting(Seq(
"com.typesafe.akka" %% "akka-http-core" % akkaHttp,
"com.typesafe.akka" %% "akka-http" % akkaHttp,
"com.typesafe.akka" %% "akka-http-spray-json" % akkaHttp,
"com.typesafe.akka" %% "akka-stream" % akka,
"com.typesafe.akka" %% "akka-slf4j" % akka,
"com.lihaoyi" %%% "scalatags" % scalatags,
"org.apache.jena" % "apache-jena-libs" % jena,
"io.get-coursier" %% "coursier" % coursier,
"io.get-coursier" %% "coursier-cache" % coursier,
"org.scalaj" %% "scalaj-http" % scalaj,
"com.novocode" % "junit-interface" % junitInterface % "test",
"com.typesafe.akka" %% "akka-http-testkit" % akkaHttp % "test"
))
lazy val nvim = Def.setting(Seq(
"com.github.xuwei-k" %% "msgpack4z-core" % msgpack4zCore,
"com.github.xuwei-k" % "msgpack4z-java" % msgpack4zJava,
"com.beachape" %% "enumeratum" % enumeratum,
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"com.typesafe.scala-logging" %% "scala-logging" % scalaLogging
))
lazy val sjs = Def.setting(Seq(
"be.doeraene" %%% "scalajs-jquery" % jquery,
"org.denigma" %%% "codemirror-facade" % codemirror,
"com.lihaoyi" %%% "scalatags" % scalatags
))
lazy val webUi = Def.setting(Seq(
"be.doeraene" %%% "scalajs-jquery" % jquery,
"com.lihaoyi" %%% "scalatags" % scalatags
))
lazy val webjars = Def.setting(Seq(
"org.webjars" % "codemirror" % "5.5" / "codemirror.js",
// https://github.com/chjj/marked
"org.webjars.bower" % "marked" % "0.3.3" / "marked.js",
"org.webjars" % "d3js" % "3.5.5-1" / "d3.js",
// https://github.com/fgnass/spin.js
"org.webjars.bower" % "spin.js" % "2.3.1" / "spin.js"
))
lazy val firefoxPlugin = Def.setting(Seq(
"be.doeraene" %%% "scalajs-jquery" % jquery,
"com.lihaoyi" %%% "scalatags" % scalatags
))
lazy val scalacConverter = Def.setting(Seq(
"org.scala-lang" % "scala-compiler" % scalaVersion.value,
"org.scala-refactoring" %% "org.scala-refactoring.library" % "0.11.0" cross CrossVersion.full
))
lazy val javacConverter = Def.setting(Seq(
"org.ow2.asm" % "asm-commons" % "5.0.4",
"org.ow2.asm" % "asm-util" % "5.0.4"
))
lazy val dotcConverter = Def.setting(Seq(
"ch.epfl.lamp" %% "dotty" % "0.1-SNAPSHOT",
"me.d-d" % "scala-compiler" % "2.11.5-20160322-171045-e19b30b3cd"
))
lazy val scalaCompilerService = Def.setting(Seq(
"com.novocode" % "junit-interface" % junitInterface % "test"
))
lazy val nlp = Def.setting(Seq(
// https://github.com/extjwnl/extjwnl
"net.sf.extjwnl" % "extjwnl" % "1.9.1",
"net.sf.extjwnl" % "extjwnl-data-wn31" % "1.2",
"org.parboiled" %% "parboiled" % "2.1.3"
))
lazy val amoraSbtPlugin = Def.setting(Seq(
"io.get-coursier" %% "coursier" % coursier,
"io.get-coursier" %% "coursier-cache" % coursier
))
}
| sschaef/tooling-research | project/deps.scala | Scala | mit | 5,561 |
package feh.tec.visual.api
import java.awt.{Window, Component}
/**
* Draws `app` in `drawComponent` of `appWindow`
*/
trait AwtWindowedApp {
def appWindow: Window
def drawComponent: Component
def app: AgentApp with AppBasicControlApi
}
| fehu/agent-tareas | agent/src/main/scala/feh/tec/visual/api/AwtWindowedApp.scala | Scala | mit | 246 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util.TimeZone
import scala.collection.JavaConverters._
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import org.apache.log4j.Level
import org.scalatest.matchers.must.Matchers
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{AliasIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, Count, Sum}
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser.parsePlan
import org.apache.spark.sql.catalyst.plans.{Cross, Inner}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning, RangePartitioning, RoundRobinPartitioning}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.connector.catalog.InMemoryTable
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
class AnalysisSuite extends AnalysisTest with Matchers {
import org.apache.spark.sql.catalyst.analysis.TestRelations._
test("fail for unresolved plan") {
intercept[AnalysisException] {
// `testRelation` does not have column `b`.
testRelation.select('b).analyze
}
}
test("fail if a leaf node has char/varchar type output") {
val schema1 = new StructType().add("c", CharType(5))
val schema2 = new StructType().add("c", VarcharType(5))
val schema3 = new StructType().add("c", ArrayType(CharType(5)))
Seq(schema1, schema2, schema3).foreach { schema =>
val table = new InMemoryTable("t", schema, Array.empty, Map.empty[String, String].asJava)
intercept[IllegalStateException] {
DataSourceV2Relation(
table, schema.toAttributes, None, None, CaseInsensitiveStringMap.empty()).analyze
}
}
}
test("union project *") {
val plan = (1 to 120)
.map(_ => testRelation)
.fold[LogicalPlan](testRelation) { (a, b) =>
a.select(UnresolvedStar(None)).select($"a").union(b.select(UnresolvedStar(None)))
}
assertAnalysisSuccess(plan)
}
test("check project's resolved") {
assert(Project(testRelation.output, testRelation).resolved)
assert(!Project(Seq(UnresolvedAttribute("a")), testRelation).resolved)
val explode = Explode(AttributeReference("a", IntegerType, nullable = true)())
assert(!Project(Seq(Alias(explode, "explode")()), testRelation).resolved)
assert(!Project(Seq(Alias(count(Literal(1)), "count")()), testRelation).resolved)
}
test("analyze project") {
checkAnalysis(
Project(Seq(UnresolvedAttribute("a")), testRelation),
Project(testRelation.output, testRelation))
checkAnalysisWithoutViewWrapper(
Project(Seq(UnresolvedAttribute("TbL.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation))
assertAnalysisError(
Project(Seq(UnresolvedAttribute("tBl.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Seq("cannot resolve"))
checkAnalysisWithoutViewWrapper(
Project(Seq(UnresolvedAttribute("TbL.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation),
caseSensitive = false)
checkAnalysisWithoutViewWrapper(
Project(Seq(UnresolvedAttribute("tBl.a")),
SubqueryAlias("TbL", UnresolvedRelation(TableIdentifier("TaBlE")))),
Project(testRelation.output, testRelation),
caseSensitive = false)
}
test("resolve sort references - filter/limit") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
// Case 1: one missing attribute is in the leaf node and another is in the unary node
val plan1 = testRelation2
.where($"a" > "str").select($"a", $"b")
.where($"b" > "str").select($"a")
.sortBy($"b".asc, $"c".desc)
val expected1 = testRelation2
.where(a > "str").select(a, b, c)
.where(b > "str").select(a, b, c)
.sortBy(b.asc, c.desc)
.select(a)
checkAnalysis(plan1, expected1)
// Case 2: all the missing attributes are in the leaf node
val plan2 = testRelation2
.where($"a" > "str").select($"a")
.where($"a" > "str").select($"a")
.sortBy($"b".asc, $"c".desc)
val expected2 = testRelation2
.where(a > "str").select(a, b, c)
.where(a > "str").select(a, b, c)
.sortBy(b.asc, c.desc)
.select(a)
checkAnalysis(plan2, expected2)
}
test("resolve sort references - join") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
val h = testRelation3.output(3)
// Case: join itself can resolve all the missing attributes
val plan = testRelation2.join(testRelation3)
.where($"a" > "str").select($"a", $"b")
.sortBy($"c".desc, $"h".asc)
val expected = testRelation2.join(testRelation3)
.where(a > "str").select(a, b, c, h)
.sortBy(c.desc, h.asc)
.select(a, b)
checkAnalysis(plan, expected)
}
test("resolve sort references - aggregate") {
val a = testRelation2.output(0)
val b = testRelation2.output(1)
val c = testRelation2.output(2)
val alias_a3 = count(a).as("a3")
val alias_b = b.as("aggOrder")
// Case 1: when the child of Sort is not Aggregate,
// the sort reference is handled by the rule ResolveSortReferences
val plan1 = testRelation2
.groupBy($"a", $"c", $"b")($"a", $"c", count($"a").as("a3"))
.select($"a", $"c", $"a3")
.orderBy($"b".asc)
val expected1 = testRelation2
.groupBy(a, c, b)(a, c, alias_a3, b)
.select(a, c, alias_a3.toAttribute, b)
.orderBy(b.asc)
.select(a, c, alias_a3.toAttribute)
checkAnalysis(plan1, expected1)
// Case 2: when the child of Sort is Aggregate,
// the sort reference is handled by the rule ResolveAggregateFunctions
val plan2 = testRelation2
.groupBy($"a", $"c", $"b")($"a", $"c", count($"a").as("a3"))
.orderBy($"b".asc)
val expected2 = testRelation2
.groupBy(a, c, b)(a, c, alias_a3, alias_b)
.orderBy(alias_b.toAttribute.asc)
.select(a, c, alias_a3.toAttribute)
checkAnalysis(plan2, expected2)
}
test("resolve relations") {
assertAnalysisError(UnresolvedRelation(TableIdentifier("tAbLe")), Seq())
checkAnalysisWithoutViewWrapper(UnresolvedRelation(TableIdentifier("TaBlE")), testRelation)
checkAnalysisWithoutViewWrapper(
UnresolvedRelation(TableIdentifier("tAbLe")), testRelation, caseSensitive = false)
checkAnalysisWithoutViewWrapper(
UnresolvedRelation(TableIdentifier("TaBlE")), testRelation, caseSensitive = false)
}
test("divide should be casted into fractional types") {
val plan = getAnalyzer.execute(
testRelation2.select(
$"a" / Literal(2) as "div1",
$"a" / $"b" as "div2",
$"a" / $"c" as "div3",
$"a" / $"d" as "div4",
$"e" / $"e" as "div5"))
val pl = plan.asInstanceOf[Project].projectList
assert(pl(0).dataType == DoubleType)
assert(pl(1).dataType == DoubleType)
assert(pl(2).dataType == DoubleType)
assert(pl(3).dataType == DoubleType)
assert(pl(4).dataType == DoubleType)
}
test("pull out nondeterministic expressions from RepartitionByExpression") {
val plan = RepartitionByExpression(Seq(Rand(33)), testRelation, numPartitions = 10)
val projected = Alias(Rand(33), "_nondeterministic")()
val expected =
Project(testRelation.output,
RepartitionByExpression(Seq(projected.toAttribute),
Project(testRelation.output :+ projected, testRelation),
numPartitions = 10))
checkAnalysis(plan, expected)
}
test("pull out nondeterministic expressions from Sort") {
val plan = Sort(Seq(SortOrder(Rand(33), Ascending)), false, testRelation)
val projected = Alias(Rand(33), "_nondeterministic")()
val expected =
Project(testRelation.output,
Sort(Seq(SortOrder(projected.toAttribute, Ascending)), false,
Project(testRelation.output :+ projected, testRelation)))
checkAnalysis(plan, expected)
}
test("SPARK-9634: cleanup unnecessary Aliases in LogicalPlan") {
val a = testRelation.output.head
var plan = testRelation.select(((a + 1).as("a+1") + 2).as("col"))
var expected = testRelation.select((a + 1 + 2).as("col"))
checkAnalysis(plan, expected)
plan = testRelation.groupBy(a.as("a1").as("a2"))((min(a).as("min_a") + 1).as("col"))
expected = testRelation.groupBy(a)((min(a) + 1).as("col"))
checkAnalysis(plan, expected)
// CreateStruct is a special case that we should not trim Alias for it.
plan = testRelation.select(CreateStruct(Seq(a, (a + 1).as("a+1"))).as("col"))
expected = testRelation.select(CreateNamedStruct(Seq(
Literal(a.name), a,
Literal("a+1"), (a + 1))).as("col"))
checkAnalysis(plan, expected)
}
test("Analysis may leave unnecessary aliases") {
val att1 = testRelation.output.head
var plan = testRelation.select(
CreateStruct(Seq(att1, ((att1.as("aa")) + 1).as("a_plus_1"))).as("col"),
att1
)
val prevPlan = getAnalyzer.execute(plan)
plan = prevPlan.select(CreateArray(Seq(
CreateStruct(Seq(att1, (att1 + 1).as("a_plus_1"))).as("col1"),
/** alias should be eliminated by [[CleanupAliases]] */
"col".attr.as("col2")
)).as("arr"))
plan = getAnalyzer.execute(plan)
val expectedPlan = prevPlan.select(
CreateArray(Seq(
CreateNamedStruct(Seq(
Literal(att1.name), att1,
Literal("a_plus_1"), (att1 + 1))),
Symbol("col").struct(prevPlan.output(0).dataType.asInstanceOf[StructType]).notNull
)).as("arr")
)
checkAnalysis(plan, expectedPlan)
}
test("SPARK-10534: resolve attribute references in order by clause") {
val a = testRelation2.output(0)
val c = testRelation2.output(2)
val plan = testRelation2.select($"c").orderBy(Floor($"a").asc)
val expected = testRelation2.select(c, a)
.orderBy(Floor(Cast(a, DoubleType, Option(TimeZone.getDefault().getID))).asc).select(c)
checkAnalysis(plan, expected)
}
test("self intersect should resolve duplicate expression IDs") {
val plan = testRelation.intersect(testRelation, isAll = false)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: invalid CAST in NULL IN(...) expression") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(1), Literal(2))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: different types in inlist but can be converted to a common type") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(1), Literal(1.2345))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisSuccess(plan)
}
test("SPARK-8654: check type compatibility error") {
val plan = Project(Alias(In(Literal(null), Seq(Literal(true), Literal(1))), "a")() :: Nil,
LocalRelation()
)
assertAnalysisError(plan, Seq("data type mismatch: Arguments must be same type"))
}
test("SPARK-11725: correctly handle null inputs for ScalaUDF") {
def resolvedEncoder[T : TypeTag](): ExpressionEncoder[T] = {
ExpressionEncoder[T]().resolveAndBind()
}
val testRelation = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", DoubleType)(),
AttributeReference("c", ShortType)(),
AttributeReference("d", DoubleType, nullable = false)())
val string = testRelation.output(0)
val double = testRelation.output(1)
val short = testRelation.output(2)
val nonNullableDouble = testRelation.output(3)
val nullResult = Literal.create(null, StringType)
def checkUDF(udf: Expression, transformed: Expression): Unit = {
checkAnalysis(
Project(Alias(udf, "")() :: Nil, testRelation),
Project(Alias(transformed, "")() :: Nil, testRelation)
)
}
// non-primitive parameters do not need special null handling
val udf1 = ScalaUDF((s: String) => "x", StringType, string :: Nil,
Option(resolvedEncoder[String]()) :: Nil)
val expected1 = udf1
checkUDF(udf1, expected1)
// only primitive parameter needs special null handling
val udf2 = ScalaUDF((s: String, d: Double) => "x", StringType, string :: double :: Nil,
Option(resolvedEncoder[String]()) :: Option(resolvedEncoder[Double]()) :: Nil)
val expected2 =
If(IsNull(double), nullResult, udf2.copy(children = string :: KnownNotNull(double) :: Nil))
checkUDF(udf2, expected2)
// special null handling should apply to all primitive parameters
val udf3 = ScalaUDF((s: Short, d: Double) => "x", StringType, short :: double :: Nil,
Option(resolvedEncoder[Short]()) :: Option(resolvedEncoder[Double]()) :: Nil)
val expected3 = If(
IsNull(short) || IsNull(double),
nullResult,
udf3.copy(children = KnownNotNull(short) :: KnownNotNull(double) :: Nil))
checkUDF(udf3, expected3)
// we can skip special null handling for primitive parameters that are not nullable
val udf4 = ScalaUDF(
(s: Short, d: Double) => "x",
StringType,
short :: nonNullableDouble :: Nil,
Option(resolvedEncoder[Short]()) :: Option(resolvedEncoder[Double]()) :: Nil)
val expected4 = If(
IsNull(short),
nullResult,
udf4.copy(children = KnownNotNull(short) :: nonNullableDouble :: Nil))
checkUDF(udf4, expected4)
}
test("SPARK-24891 Fix HandleNullInputsForUDF rule") {
val a = testRelation.output(0)
val func = (x: Int, y: Int) => x + y
val udf1 = ScalaUDF(func, IntegerType, a :: a :: Nil,
Option(ExpressionEncoder[java.lang.Integer]()) ::
Option(ExpressionEncoder[java.lang.Integer]()) :: Nil)
val udf2 = ScalaUDF(func, IntegerType, a :: udf1 :: Nil,
Option(ExpressionEncoder[java.lang.Integer]()) ::
Option(ExpressionEncoder[java.lang.Integer]()) :: Nil)
val plan = Project(Alias(udf2, "")() :: Nil, testRelation)
comparePlans(plan.analyze, plan.analyze.analyze)
}
test("SPARK-11863 mixture of aliases and real columns in order by clause - tpcds 19,55,71") {
val a = testRelation2.output(0)
val c = testRelation2.output(2)
val alias1 = a.as("a1")
val alias2 = c.as("a2")
val alias3 = count(a).as("a3")
val plan = testRelation2
.groupBy($"a", $"c")($"a".as("a1"), $"c".as("a2"), count($"a").as("a3"))
.orderBy($"a1".asc, $"c".asc)
val expected = testRelation2
.groupBy(a, c)(alias1, alias2, alias3)
.orderBy(alias1.toAttribute.asc, alias2.toAttribute.asc)
.select(alias1.toAttribute, alias2.toAttribute, alias3.toAttribute)
checkAnalysis(plan, expected)
}
test("Eliminate the unnecessary union") {
val plan = Union(testRelation :: Nil)
val expected = testRelation
checkAnalysis(plan, expected)
}
test("SPARK-12102: Ignore nullability when comparing two sides of case") {
val relation = LocalRelation(Symbol("a").struct(Symbol("x").int),
Symbol("b").struct(Symbol("x").int.withNullability(false)))
val plan = relation.select(
CaseWhen(Seq((Literal(true), Symbol("a").attr)), Symbol("b")).as("val"))
assertAnalysisSuccess(plan)
}
test("Keep attribute qualifiers after dedup") {
val input = LocalRelation(Symbol("key").int, Symbol("value").string)
val query =
Project(Seq($"x.key", $"y.key"),
Join(
Project(Seq($"x.key"), SubqueryAlias("x", input)),
Project(Seq($"y.key"), SubqueryAlias("y", input)),
Cross, None, JoinHint.NONE))
assertAnalysisSuccess(query)
}
private def assertExpressionType(
expression: Expression,
expectedDataType: DataType): Unit = {
val afterAnalyze =
Project(Seq(Alias(expression, "a")()), OneRowRelation()).analyze.expressions.head
if (!afterAnalyze.dataType.equals(expectedDataType)) {
fail(
s"""
|data type of expression $expression doesn't match expected:
|Actual data type:
|${afterAnalyze.dataType}
|
|Expected data type:
|${expectedDataType}
""".stripMargin)
}
}
test("SPARK-15776: test whether Divide expression's data type can be deduced correctly by " +
"analyzer") {
assertExpressionType(sum(Divide(1, 2)), DoubleType)
assertExpressionType(sum(Divide(1.0, 2)), DoubleType)
assertExpressionType(sum(Divide(1, 2.0)), DoubleType)
assertExpressionType(sum(Divide(1.0, 2.0)), DoubleType)
assertExpressionType(sum(Divide(1, 2.0f)), DoubleType)
assertExpressionType(sum(Divide(1.0f, 2)), DoubleType)
assertExpressionType(sum(Divide(1, Decimal(2))), DecimalType(22, 11))
assertExpressionType(sum(Divide(Decimal(1), 2)), DecimalType(26, 6))
assertExpressionType(sum(Divide(Decimal(1), 2.0)), DoubleType)
assertExpressionType(sum(Divide(1.0, Decimal(2.0))), DoubleType)
}
test("SPARK-18058: union and set operations shall not care about the nullability" +
" when comparing column types") {
val firstTable = LocalRelation(
AttributeReference("a",
StructType(Seq(StructField("a", IntegerType, nullable = true))), nullable = false)())
val secondTable = LocalRelation(
AttributeReference("a",
StructType(Seq(StructField("a", IntegerType, nullable = false))), nullable = false)())
val unionPlan = Union(firstTable, secondTable)
assertAnalysisSuccess(unionPlan)
val r1 = Except(firstTable, secondTable, isAll = false)
val r2 = Intersect(firstTable, secondTable, isAll = false)
assertAnalysisSuccess(r1)
assertAnalysisSuccess(r2)
}
test("resolve as with an already existed alias") {
checkAnalysis(
Project(Seq(UnresolvedAttribute("tbl2.a")),
SubqueryAlias("tbl", testRelation).as("tbl2")),
Project(testRelation.output, testRelation),
caseSensitive = false)
checkAnalysis(SubqueryAlias("tbl", testRelation).as("tbl2"), testRelation)
}
test("SPARK-20311 range(N) as alias") {
def rangeWithAliases(args: Seq[Int], outputNames: Seq[String]): LogicalPlan = {
SubqueryAlias("t", UnresolvedTableValuedFunction("range", args.map(Literal(_)), outputNames))
.select(star())
}
assertAnalysisSuccess(rangeWithAliases(3 :: Nil, "a" :: Nil))
assertAnalysisSuccess(rangeWithAliases(1 :: 4 :: Nil, "b" :: Nil))
assertAnalysisSuccess(rangeWithAliases(2 :: 6 :: 2 :: Nil, "c" :: Nil))
assertAnalysisError(
rangeWithAliases(3 :: Nil, "a" :: "b" :: Nil),
Seq("Number of given aliases does not match number of output columns. "
+ "Function name: range; number of aliases: 2; number of output columns: 1."))
}
test("SPARK-20841 Support table column aliases in FROM clause") {
def tableColumnsWithAliases(outputNames: Seq[String]): LogicalPlan = {
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias("t", UnresolvedRelation(TableIdentifier("TaBlE3")))
).select(star())
}
assertAnalysisSuccess(tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
tableColumnsWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-20962 Support subquery column aliases in FROM clause") {
def tableColumnsWithAliases(outputNames: Seq[String]): LogicalPlan = {
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias(
"t",
UnresolvedRelation(TableIdentifier("TaBlE3")))
).select(star())
}
assertAnalysisSuccess(tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
tableColumnsWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
tableColumnsWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-20963 Support aliases for join relations in FROM clause") {
def joinRelationWithAliases(outputNames: Seq[String]): LogicalPlan = {
val src1 = LocalRelation(Symbol("id").int, Symbol("v1").string).as("s1")
val src2 = LocalRelation(Symbol("id").int, Symbol("v2").string).as("s2")
UnresolvedSubqueryColumnAliases(
outputNames,
SubqueryAlias(
"dst",
src1.join(src2, Inner, Option(Symbol("s1.id") === Symbol("s2.id"))))
).select(star())
}
assertAnalysisSuccess(joinRelationWithAliases("col1" :: "col2" :: "col3" :: "col4" :: Nil))
assertAnalysisError(
joinRelationWithAliases("col1" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 1; number of columns: 4."))
assertAnalysisError(
joinRelationWithAliases("col1" :: "col2" :: "col3" :: "col4" :: "col5" :: Nil),
Seq("Number of column aliases does not match number of columns. " +
"Number of column aliases: 5; number of columns: 4."))
}
test("SPARK-22614 RepartitionByExpression partitioning") {
def checkPartitioning[T <: Partitioning: ClassTag](
numPartitions: Int, exprs: Expression*): Unit = {
val partitioning = RepartitionByExpression(exprs, testRelation2, numPartitions).partitioning
val clazz = implicitly[ClassTag[T]].runtimeClass
assert(clazz.isInstance(partitioning))
}
checkPartitioning[HashPartitioning](numPartitions = 10, exprs = Literal(20))
checkPartitioning[HashPartitioning](numPartitions = 10,
exprs = Symbol("a").attr, Symbol("b").attr)
checkPartitioning[RangePartitioning](numPartitions = 10,
exprs = SortOrder(Literal(10), Ascending))
checkPartitioning[RangePartitioning](numPartitions = 10,
exprs = SortOrder(Symbol("a").attr, Ascending), SortOrder(Symbol("b").attr, Descending))
checkPartitioning[RoundRobinPartitioning](numPartitions = 10, exprs = Seq.empty: _*)
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = 0, exprs = Literal(20))
}
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = -1, exprs = Literal(20))
}
intercept[IllegalArgumentException] {
checkPartitioning(numPartitions = 10, exprs =
SortOrder(Symbol("a").attr, Ascending), Symbol("b").attr)
}
}
test("SPARK-24208: analysis fails on self-join with FlatMapGroupsInPandas") {
val pythonUdf = PythonUDF("pyUDF", null,
StructType(Seq(StructField("a", LongType))),
Seq.empty,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
true)
val output = pythonUdf.dataType.asInstanceOf[StructType].toAttributes
val project = Project(Seq(UnresolvedAttribute("a")), testRelation)
val flatMapGroupsInPandas = FlatMapGroupsInPandas(
Seq(UnresolvedAttribute("a")), pythonUdf, output, project)
val left = SubqueryAlias("temp0", flatMapGroupsInPandas)
val right = SubqueryAlias("temp1", flatMapGroupsInPandas)
val join = Join(left, right, Inner, None, JoinHint.NONE)
assertAnalysisSuccess(
Project(Seq(UnresolvedAttribute("temp0.a"), UnresolvedAttribute("temp1.a")), join))
}
test("SPARK-34319: analysis fails on self-join with FlatMapCoGroupsInPandas") {
val pythonUdf = PythonUDF("pyUDF", null,
StructType(Seq(StructField("a", LongType))),
Seq.empty,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
true)
val output = pythonUdf.dataType.asInstanceOf[StructType].toAttributes
val project1 = Project(Seq(UnresolvedAttribute("a")), testRelation)
val project2 = Project(Seq(UnresolvedAttribute("a")), testRelation2)
val flatMapGroupsInPandas = FlatMapCoGroupsInPandas(
1,
1,
pythonUdf,
output,
project1,
project2)
val left = SubqueryAlias("temp0", flatMapGroupsInPandas)
val right = SubqueryAlias("temp1", flatMapGroupsInPandas)
val join = Join(left, right, Inner, None, JoinHint.NONE)
assertAnalysisSuccess(
Project(Seq(UnresolvedAttribute("temp0.a"), UnresolvedAttribute("temp1.a")), join))
}
test("SPARK-34319: analysis fails on self-join with MapInPandas") {
val pythonUdf = PythonUDF("pyUDF", null,
StructType(Seq(StructField("a", LongType))),
Seq.empty,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
true)
val output = pythonUdf.dataType.asInstanceOf[StructType].toAttributes
val project = Project(Seq(UnresolvedAttribute("a")), testRelation)
val mapInPandas = MapInPandas(
pythonUdf,
output,
project)
val left = SubqueryAlias("temp0", mapInPandas)
val right = SubqueryAlias("temp1", mapInPandas)
val join = Join(left, right, Inner, None, JoinHint.NONE)
assertAnalysisSuccess(
Project(Seq(UnresolvedAttribute("temp0.a"), UnresolvedAttribute("temp1.a")), join))
}
test("SPARK-34741: Avoid ambiguous reference in MergeIntoTable") {
val cond = 'a > 1
assertAnalysisError(
MergeIntoTable(
testRelation,
testRelation,
cond,
UpdateAction(Some(cond), Assignment('a, 'a) :: Nil) :: Nil,
Nil
),
"Reference 'a' is ambiguous" :: Nil)
}
test("SPARK-24488 Generator with multiple aliases") {
assertAnalysisSuccess(
listRelation.select(Explode($"list").as("first_alias").as("second_alias")))
assertAnalysisSuccess(
listRelation.select(MultiAlias(MultiAlias(
PosExplode($"list"), Seq("first_pos", "first_val")), Seq("second_pos", "second_val"))))
}
test("SPARK-24151: CURRENT_DATE, CURRENT_TIMESTAMP should be case insensitive") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "true") {
val input = Project(Seq(
UnresolvedAttribute("current_date"),
UnresolvedAttribute("CURRENT_DATE"),
UnresolvedAttribute("CURRENT_TIMESTAMP"),
UnresolvedAttribute("current_timestamp")), testRelation)
val expected = Project(Seq(
Alias(CurrentDate(), toPrettySQL(CurrentDate()))(),
Alias(CurrentDate(), toPrettySQL(CurrentDate()))(),
Alias(CurrentTimestamp(), toPrettySQL(CurrentTimestamp()))(),
Alias(CurrentTimestamp(), toPrettySQL(CurrentTimestamp()))()), testRelation).analyze
checkAnalysis(input, expected)
}
}
test("CTE with non-existing column alias") {
assertAnalysisError(parsePlan("WITH t(x) AS (SELECT 1) SELECT * FROM t WHERE y = 1"),
Seq("cannot resolve 'y' given input columns: [t.x]"))
}
test("CTE with non-matching column alias") {
assertAnalysisError(parsePlan("WITH t(x, y) AS (SELECT 1) SELECT * FROM t WHERE x = 1"),
Seq("Number of column aliases does not match number of columns. Number of column aliases: " +
"2; number of columns: 1."))
}
test("SPARK-28251: Insert into non-existing table error message is user friendly") {
assertAnalysisError(parsePlan("INSERT INTO test VALUES (1)"),
Seq("Table not found: test"))
}
test("check CollectMetrics resolved") {
val a = testRelation.output.head
val sum = Sum(a).toAggregateExpression().as("sum")
val random_sum = Sum(Rand(1L)).toAggregateExpression().as("rand_sum")
val literal = Literal(1).as("lit")
// Ok
assert(CollectMetrics("event", literal :: sum :: random_sum :: Nil, testRelation).resolved)
// Bad name
assert(!CollectMetrics("", sum :: Nil, testRelation).resolved)
assertAnalysisError(CollectMetrics("", sum :: Nil, testRelation),
"observed metrics should be named" :: Nil)
// No columns
assert(!CollectMetrics("evt", Nil, testRelation).resolved)
def checkAnalysisError(exprs: Seq[NamedExpression], errors: String*): Unit = {
assertAnalysisError(CollectMetrics("event", exprs, testRelation), errors)
}
// Unwrapped attribute
checkAnalysisError(
a :: Nil,
"Attribute", "can only be used as an argument to an aggregate function")
// Unwrapped non-deterministic expression
checkAnalysisError(
Rand(10).as("rnd") :: Nil,
"non-deterministic expression", "can only be used as an argument to an aggregate function")
// Distinct aggregate
checkAnalysisError(
Sum(a).toAggregateExpression(isDistinct = true).as("sum") :: Nil,
"distinct aggregates are not allowed in observed metrics, but found")
// Nested aggregate
checkAnalysisError(
Sum(Sum(a).toAggregateExpression()).toAggregateExpression().as("sum") :: Nil,
"nested aggregates are not allowed in observed metrics, but found")
// Windowed aggregate
val windowExpr = WindowExpression(
RowNumber(),
WindowSpecDefinition(Nil, a.asc :: Nil,
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, CurrentRow)))
checkAnalysisError(
windowExpr.as("rn") :: Nil,
"window expressions are not allowed in observed metrics, but found")
}
test("check CollectMetrics duplicates") {
val a = testRelation.output.head
val sum = Sum(a).toAggregateExpression().as("sum")
val count = Count(Literal(1)).toAggregateExpression().as("cnt")
// Same result - duplicate names are allowed
assertAnalysisSuccess(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", count :: Nil, testRelation) :: Nil))
// Same children, structurally different metrics - fail
assertAnalysisError(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", sum :: Nil, testRelation) :: Nil),
"Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Different children, same metrics - fail
val b = Symbol("b").string
val tblB = LocalRelation(b)
assertAnalysisError(Union(
CollectMetrics("evt1", count :: Nil, testRelation) ::
CollectMetrics("evt1", count :: Nil, tblB) :: Nil),
"Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Subquery different tree - fail
val subquery = Aggregate(Nil, sum :: Nil, CollectMetrics("evt1", count :: Nil, testRelation))
val query = Project(
b :: ScalarSubquery(subquery, Nil).as("sum") :: Nil,
CollectMetrics("evt1", count :: Nil, tblB))
assertAnalysisError(query, "Multiple definitions of observed metrics" :: "evt1" :: Nil)
// Aggregate with filter predicate - fail
val sumWithFilter = sum.transform {
case a: AggregateExpression => a.copy(filter = Some(true))
}.asInstanceOf[NamedExpression]
assertAnalysisError(
CollectMetrics("evt1", sumWithFilter :: Nil, testRelation),
"aggregates with filter predicate are not allowed" :: Nil)
}
test("Analysis exceed max iterations") {
// RuleExecutor only throw exception or log warning when the rule is supposed to run
// more than once.
val maxIterations = 2
withSQLConf(SQLConf.ANALYZER_MAX_ITERATIONS.key -> maxIterations.toString) {
val testAnalyzer = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin))
val plan = testRelation2.select(
$"a" / Literal(2) as "div1",
$"a" / $"b" as "div2",
$"a" / $"c" as "div3",
$"a" / $"d" as "div4",
$"e" / $"e" as "div5")
val message = intercept[RuntimeException] {
testAnalyzer.execute(plan)
}.getMessage
assert(message.startsWith(s"Max iterations ($maxIterations) reached for batch Resolution, " +
s"please set '${SQLConf.ANALYZER_MAX_ITERATIONS.key}' to a larger value."))
}
}
test("SPARK-30886 Deprecate two-parameter TRIM/LTRIM/RTRIM") {
Seq("trim", "ltrim", "rtrim").foreach { f =>
val logAppender = new LogAppender("deprecated two-parameter TRIM/LTRIM/RTRIM functions")
def check(count: Int): Unit = {
val message = "Two-parameter TRIM/LTRIM/RTRIM function signatures are deprecated."
assert(logAppender.loggingEvents.size == count)
assert(logAppender.loggingEvents.exists(
e => e.getLevel == Level.WARN &&
e.getRenderedMessage.contains(message)))
}
withLogAppender(logAppender) {
val testAnalyzer1 = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin))
val plan1 = testRelation2.select(
UnresolvedFunction(f, $"a" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan1)
// One-parameter is not deprecated.
assert(logAppender.loggingEvents.isEmpty)
val plan2 = testRelation2.select(
UnresolvedFunction(f, $"a" :: $"b" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan2)
// Deprecation warning is printed out once.
check(1)
val plan3 = testRelation2.select(
UnresolvedFunction(f, $"b" :: $"a" :: Nil, isDistinct = false))
testAnalyzer1.execute(plan3)
// There is no change in the log.
check(1)
// New analyzer from new SessionState
val testAnalyzer2 = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin))
val plan4 = testRelation2.select(
UnresolvedFunction(f, $"c" :: $"d" :: Nil, isDistinct = false))
testAnalyzer2.execute(plan4)
// Additional deprecation warning from new analyzer
check(2)
val plan5 = testRelation2.select(
UnresolvedFunction(f, $"c" :: $"d" :: Nil, isDistinct = false))
testAnalyzer2.execute(plan5)
// There is no change in the log.
check(2)
}
}
}
test("SPARK-32131: Fix wrong column index when we have more than two columns" +
" during union and set operations" ) {
val firstTable = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", DoubleType)(),
AttributeReference("c", IntegerType)(),
AttributeReference("d", FloatType)())
val secondTable = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", TimestampType)(),
AttributeReference("c", IntegerType)(),
AttributeReference("d", FloatType)())
val thirdTable = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", DoubleType)(),
AttributeReference("c", TimestampType)(),
AttributeReference("d", FloatType)())
val fourthTable = LocalRelation(
AttributeReference("a", StringType)(),
AttributeReference("b", DoubleType)(),
AttributeReference("c", IntegerType)(),
AttributeReference("d", TimestampType)())
val r1 = Union(firstTable, secondTable)
val r2 = Union(firstTable, thirdTable)
val r3 = Union(firstTable, fourthTable)
val r4 = Except(firstTable, secondTable, isAll = false)
val r5 = Intersect(firstTable, secondTable, isAll = false)
assertAnalysisError(r1,
Seq("Union can only be performed on tables with the compatible column types. " +
"timestamp <> double at the second column of the second table"))
assertAnalysisError(r2,
Seq("Union can only be performed on tables with the compatible column types. " +
"timestamp <> int at the third column of the second table"))
assertAnalysisError(r3,
Seq("Union can only be performed on tables with the compatible column types. " +
"timestamp <> float at the 4th column of the second table"))
assertAnalysisError(r4,
Seq("Except can only be performed on tables with the compatible column types. " +
"timestamp <> double at the second column of the second table"))
assertAnalysisError(r5,
Seq("Intersect can only be performed on tables with the compatible column types. " +
"timestamp <> double at the second column of the second table"))
}
test("SPARK-31975: Throw user facing error when use WindowFunction directly") {
assertAnalysisError(testRelation2.select(RowNumber()),
Seq("Window function row_number() requires an OVER clause."))
assertAnalysisError(testRelation2.select(Sum(RowNumber())),
Seq("Window function row_number() requires an OVER clause."))
assertAnalysisError(testRelation2.select(RowNumber() + 1),
Seq("Window function row_number() requires an OVER clause."))
}
test("SPARK-32237: Hint in CTE") {
val plan = With(
Project(
Seq(UnresolvedAttribute("cte.a")),
UnresolvedRelation(TableIdentifier("cte"))
),
Seq(
(
"cte",
SubqueryAlias(
AliasIdentifier("cte"),
UnresolvedHint(
"REPARTITION",
Seq(Literal(3)),
Project(testRelation.output, testRelation)
)
)
)
)
)
assertAnalysisSuccess(plan)
}
test("SPARK-33197: Make sure changes to ANALYZER_MAX_ITERATIONS take effect at runtime") {
// RuleExecutor only throw exception or log warning when the rule is supposed to run
// more than once.
val maxIterations = 2
val maxIterationsEnough = 5
withSQLConf(SQLConf.ANALYZER_MAX_ITERATIONS.key -> maxIterations.toString) {
val testAnalyzer = new Analyzer(
new SessionCatalog(new InMemoryCatalog, FunctionRegistry.builtin))
val plan = testRelation2.select(
$"a" / Literal(2) as "div1",
$"a" / $"b" as "div2",
$"a" / $"c" as "div3",
$"a" / $"d" as "div4",
$"e" / $"e" as "div5")
val message1 = intercept[RuntimeException] {
testAnalyzer.execute(plan)
}.getMessage
assert(message1.startsWith(s"Max iterations ($maxIterations) reached for batch Resolution, " +
s"please set '${SQLConf.ANALYZER_MAX_ITERATIONS.key}' to a larger value."))
withSQLConf(SQLConf.ANALYZER_MAX_ITERATIONS.key -> maxIterationsEnough.toString) {
try {
testAnalyzer.execute(plan)
} catch {
case ex: AnalysisException
if ex.getMessage.contains(SQLConf.ANALYZER_MAX_ITERATIONS.key) =>
fail("analyzer.execute should not reach max iterations.")
}
}
val message2 = intercept[RuntimeException] {
testAnalyzer.execute(plan)
}.getMessage
assert(message2.startsWith(s"Max iterations ($maxIterations) reached for batch Resolution, " +
s"please set '${SQLConf.ANALYZER_MAX_ITERATIONS.key}' to a larger value."))
}
}
test("SPARK-33733: PullOutNondeterministic should check and collect deterministic field") {
val reflect =
CallMethodViaReflection(Seq("java.lang.Math", "abs", testRelation.output.head))
val udf = ScalaUDF(
(s: String) => s,
StringType,
Literal.create(null, StringType) :: Nil,
Option(ExpressionEncoder[String]().resolveAndBind()) :: Nil,
udfDeterministic = false)
Seq(reflect, udf).foreach { e: Expression =>
val plan = Sort(Seq(e.asc), false, testRelation)
val projected = Alias(e, "_nondeterministic")()
val expect =
Project(testRelation.output,
Sort(Seq(projected.toAttribute.asc), false,
Project(testRelation.output :+ projected,
testRelation)))
checkAnalysis(plan, expect)
}
}
test("SPARK-33857: Unify the default seed of random functions") {
Seq(new Rand(), new Randn(), Shuffle(Literal(Array(1))), Uuid()).foreach { r =>
assert(r.seedExpression == UnresolvedSeed)
val p = getAnalyzer.execute(Project(Seq(r.as("r")), testRelation))
assert(
p.asInstanceOf[Project].projectList.head.asInstanceOf[Alias]
.child.asInstanceOf[ExpressionWithRandomSeed]
.seedExpression.isInstanceOf[Literal]
)
}
}
test("SPARK-22748: Analyze __grouping__id as a literal function") {
assertAnalysisSuccess(parsePlan(
"""
|SELECT grouping__id FROM (
| SELECT grouping__id FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b WITH ROLLUP
| )
|)
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT grouping__id FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b WITH CUBE
|)
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT grouping__id FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b GROUPING SETS ((a, b), ())
|)
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT a, b, count(1) FROM TaBlE2
| GROUP BY CUBE(a, b) HAVING grouping__id > 0
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT * FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b GROUPING SETS ((a, b), ())
|) WHERE grouping__id > 0
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT * FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b GROUPING SETS ((a, b), ())
|) ORDER BY grouping__id > 0
""".stripMargin), false)
assertAnalysisSuccess(parsePlan(
"""
|SELECT a, b, count(1) FROM TaBlE2
| GROUP BY a, b GROUPING SETS ((a, b), ())
| ORDER BY grouping__id > 0
""".stripMargin), false)
assertAnalysisError(parsePlan(
"""
|SELECT grouping__id FROM (
| SELECT a, b, count(1), grouping__id FROM TaBlE2
| GROUP BY a, b
|)
""".stripMargin),
Seq("grouping_id() can only be used with GroupingSets/Cube/Rollup"),
false)
}
}
| cloud-fan/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisSuite.scala | Scala | apache-2.0 | 43,542 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras
import com.intel.analytics.bigdl.nn.Maxout
import com.intel.analytics.bigdl.tensor.Tensor
class MaxoutSpec extends KerasBaseSpec {
"Maxout" should "generate corrent result when batchsize == 1" in {
val inputSize = 2
val outputSize = 4
val maxoutNumber = 3
val batchSize = 1
val sigmoidCode =
s"""
|input_tensor = Input(shape=[${inputSize}])
|input = np.random.uniform(0, 1, [${batchSize}, ${inputSize}])
|output_tensor = MaxoutDense(output_dim=${outputSize}, input_dim=${inputSize},
|nb_feature=${maxoutNumber})(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val maxout = Maxout[Float](inputSize, outputSize, maxoutNumber)
val wc = (data: Array[Tensor[Float]]) => {
val out = new Array[Tensor[Float]](data.length)
out(0) = Tensor(inputSize, maxoutNumber * outputSize)
val weight = out.head.storage().array()
var index = 0
for (i <- 1 to maxoutNumber) {
val sliceW = data(0).select(1, i).t.clone().storage().array()
System.arraycopy(sliceW, 0, weight, index, sliceW.size)
index += sliceW.size
}
if (data.length > 1) {
out(1) = data(1)
}
out
}
checkOutputAndGrad(maxout, sigmoidCode, weightConverter = wc)
}
"Maxout" should "generate corrent result when batchsize != 1" in {
val inputSize = 5
val outputSize = 4
val maxoutNumber = 3
val batchSize = 4
val sigmoidCode =
s"""
|#w1 = np.array([[[1.0, 2.0, 3.0, 4.0],
|# [5, 6, 7, 8.0]],
|# [[-1, -2, -3, -4],
|# [-5, -6, -7, -8]],
|# [[9, 10, 11, 12],
|# [-9, -10, -11, -12]]])
|#b = np.array([[ 0.0, 0.0, 0.0, 0.0],
|# [ 0.0, 0.0, 0.0, 0.0],
|# [ 0.0, 0.0, 0.0, 0.0]])
|# w = [w1, b]
|
|input_tensor = Input(shape=[${inputSize}])
|input = np.random.uniform(0, 1, [${batchSize}, ${inputSize}])
|#output_tensor=MaxoutDense(output_dim=4,input_dim=2,nb_feature=3,weights=w)(input_tensor)
|output_tensor = MaxoutDense(output_dim=${outputSize}, input_dim=${inputSize},
|nb_feature=${maxoutNumber})(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val maxout = Maxout[Float](inputSize, outputSize, maxoutNumber)
val wc = (data: Array[Tensor[Float]]) => {
val out = new Array[Tensor[Float]](data.length)
out(0) = Tensor(inputSize, maxoutNumber * outputSize)
val weight = out.head.storage().array()
var index = 0
for (i <- 1 to maxoutNumber) {
val sliceW = data(0).select(1, i).t.clone().storage().array()
System.arraycopy(sliceW, 0, weight, index, sliceW.size)
index += sliceW.size
}
if (data.length > 1) {
out(1) = data(1)
}
out
}
checkOutputAndGrad(maxout, sigmoidCode, weightConverter = wc)
}
}
| qiuxin2012/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/nn/MaxoutSpec.scala | Scala | apache-2.0 | 3,709 |
package com.agecomp.dts
import com.agecomp.Environment
import com.agecomp.AgentRef
import com.agecomp.InputComponent
import scala.concurrent.duration._
import scala.language.postfixOps
import akka.actor.PoisonPill
import javafx.stage.Stage
class FixedTimeEnvironment(val fps: Double, stage: Stage) extends Environment {
import context._
def sendPerceptions() = {}
var step = 0
def timestep() {
var period = 1000/fps
if (step == 0) {
period = 5000
}
context.system.scheduler.scheduleOnce(period millis, self, "Tick")
manage
processors.foreach(p => p.run(step))
step += 1
val agents = scene.container("com.agecomp.AgentRef")
sendPerceptions()
}
def cache(m: InputComponent, name: String) {
scene.addComponent(name.toInt, m)
}
def receive = {
case "Tick" => timestep()
case m: InputComponent => cache(m, sender.path.name)
case _ => println("Unrecognized message")
}
}
| abdielbrilhante/agecomp | src/main/scala/com/agecomp/dts/FixedTimeEnvironment.scala | Scala | mit | 950 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.util
import java.io.File
import java.io.PrintStream
import scala.collection.mutable.Buffer
import scala.sys.process._ // Include the process builder/execution DSL.
import org.kiji.annotations.ApiAudience
import org.kiji.annotations.ApiStability
import org.kiji.annotations.Inheritance
import org.kiji.schema.shell.Environment
import org.kiji.schema.shell.JarLocation
import org.kiji.schema.shell.LocalJarFile
/**
* Utility for DDLCommands that need to run code in a subprocess. This trait
* defines one method `forkJvm()` that allows the caller to invoke
* an arbitrary main() method in a Java subprocess.
*/
@ApiAudience.Framework
@ApiStability.Experimental
@Inheritance.Extensible
trait ForkJvm {
/**
* Spawns a Java subprocess to run the specified main method with an argv array.
*
* <p>This method spawns a Java process with the following specification:</p>
* <ul>
* <li>The Java process will be invoked from the `bin/java` program in
* the `java.home` system property.</li>
* <li>The current operating system environment variables for this process will be sent
* to the subprocess.</li>
* <li>stdout and stderr will be forwarded from the current process' handles, although you
* may override this and capture stdout if you specify the stdout argument to this method.
* stdin is suppressed in the subprocess.</li>
* <li>The subprocess' main method will be specified by the mainMethod parameter.</li>
* <li>The classpath will include all libJars from the specified environment, followed
* by the entries in this process' java.class.path system property.</li>
* <li>No specific Java system properties are specified by default. System properties
* may be set by specifying them using `"-Dprop=val"` entries in the jvmArgs
* argument.</li>
* </ul>
*
* <p>This method blocks until the subprocess terminates, and then returns its exit status.</p>
*
* @param env the current Kiji shell environment.
* @param mainClass the name of the main Java class to run in the subprocess.
* @param jvmArgs any arguments to specify to the JVM itself (e.g., system properties).
* The `-classpath` argument will be provided by this method, but you may
* include other arguments here if you wish.
* @param userArgs the list of argv elements to forward to the main class.
* @param stdout the PrintStream to use for stdout (e.g., System.out).
* @return the exit status from the child process. Per POSIX, 0 is success, nonzero is failure.
*/
def forkJvm(env: Environment, mainClass: String, jvmArgs: List[String],
userArgs: List[String], stdout: PrintStream = System.out): Int = {
val argv: Buffer[String] = Buffer() // Construct the argv to execute in this buffer.
// Start with the path to $JAVA_HOME/bin/java.
val javaHome: String = System.getProperty("java.home")
val dirSep: String = System.getProperty("file.separator")
val javaBin: String = javaHome + dirSep + "bin" + dirSep + "java"
argv.append(javaBin)
// If there is a boot classpath (e.g., including scala), add it to argv here.
val bootClassPath: String = Option(System.getProperty("sun.boot.class.path")).getOrElse("")
if (!bootClassPath.isEmpty()) {
// Using '/a:', specify that the elements listed here are to be appended to the JVM's
// internal bootclasspath. (See 'java -X -help')
argv.append("-Xbootclasspath/a:" + bootClassPath)
}
// Child JVM's classpath contains the libJars, followed by our own classpath.
val sysClasspath: String = System.getProperty("java.class.path")
val libJarClasspath: String = libJarsToClasspath(env.libJars) // Terminated by ':' or empty.
val childClasspath: String = libJarClasspath + sysClasspath
argv.append("-classpath")
argv.append(childClasspath)
// Add user jvm args, then the main class to execute, followed by the user args.
argv.appendAll(jvmArgs)
argv.append(mainClass)
argv.appendAll(userArgs)
// At this point, the buffer should contain something like
// $JAVA_HOME/bin/java -Xbootclasspath/a:...scala-rt.jar -classpath foo.jar:bar.jar \
// jvmarg1 jvmarg2 jvmarg3... MyMain arg1 arg2 arg3...
// Redirect the subprocess' stdout and stderr to our own, unless the caller of this method
// specified a non-default value for the "stdout" argument.
val outputLogger: ProcessLogger = ProcessLogger(
line => stdout.println(line),
line => System.err.println(line))
// The ProcessBuilder object in the scala.sys.process package lets you run
// someSeq! to block and return the exit code. The current OS environment variables
// are exported to the subprocess. and the I/O streams are redirected through the outputLogger.
return argv ! outputLogger
}
/**
* Process a set of JarLocation objects and reify this to a string suitable for use in
* a Java `-classpath` argument.
*
* @param libJars is list of JarLocation objects to reify, i.e., from the current Environment.
* @return a colon-separated list of paths to local jar files to load on the classpath.
* If non-empty, this list will terminate with a ':' character.
*/
private[shell] def libJarsToClasspath(libJars: List[JarLocation]): String = {
val localPaths: List[String] = libJars.map({ libJar: JarLocation =>
libJar match {
case LocalJarFile(path) => new File(path).getAbsolutePath()
}
})
// Delimiter between classpath entries, ":" on most systems.
val pathSep: String = System.getProperty("path.separator")
// Fold over the individual elements, concatenating them, delimited by ':' characters.
// Use foldRight so it terminates in a ":" rather than starts with one.
val finalClasspath: String = localPaths.foldRight("")({ (classpath: String, path: String) =>
classpath + pathSep + path
})
return finalClasspath
}
}
| kijiproject/kiji-schema-shell | src/main/scala/org/kiji/schema/shell/util/ForkJvm.scala | Scala | apache-2.0 | 6,741 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactBootstrap
import scala.scalajs.js
import scala.scalajs.js._
import com.glipka.easyReactJS.react._
import ReactBootstrap._
@js.native trait InputGroupButtonProps extends HTMLProps[InputGroupButton] with js.Any{ }
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactBootstrap/InputGroupButton.scala | Scala | apache-2.0 | 853 |
import scala.reflect.runtime.universe._
import scala.tools.reflect.Eval
object Test extends dotty.runtime.LegacyApp {
reify {
val RegexParser = """(.*) \\d+([A-Z]+) \\| (.*) \\|.*""".r
val RegexParser(name, shortname, value) = "American Dollar 1USD | 2,8567 | sometext"
println("name = %s, shortname = %s, value = %s".format(name, shortname, value))
}.eval
}
| yusuke2255/dotty | tests/disabled/macro/run/t5273_2b_oldpatmat.scala | Scala | bsd-3-clause | 373 |
package service
import util.Directory._
import util.ControlUtil._
import SystemSettingsService._
import javax.servlet.http.HttpServletRequest
trait SystemSettingsService {
def baseUrl(implicit request: HttpServletRequest): String = loadSystemSettings().baseUrl(request)
def saveSystemSettings(settings: SystemSettings): Unit = {
defining(new java.util.Properties()){ props =>
settings.baseUrl.foreach(x => props.setProperty(BaseURL, x.replaceFirst("/\\Z", "")))
props.setProperty(AllowAccountRegistration, settings.allowAccountRegistration.toString)
props.setProperty(Gravatar, settings.gravatar.toString)
props.setProperty(Notification, settings.notification.toString)
props.setProperty(Ssh, settings.ssh.toString)
settings.sshPort.foreach(x => props.setProperty(SshPort, x.toString))
if(settings.notification) {
settings.smtp.foreach { smtp =>
props.setProperty(SmtpHost, smtp.host)
smtp.port.foreach(x => props.setProperty(SmtpPort, x.toString))
smtp.user.foreach(props.setProperty(SmtpUser, _))
smtp.password.foreach(props.setProperty(SmtpPassword, _))
smtp.ssl.foreach(x => props.setProperty(SmtpSsl, x.toString))
smtp.fromAddress.foreach(props.setProperty(SmtpFromAddress, _))
smtp.fromName.foreach(props.setProperty(SmtpFromName, _))
}
}
props.setProperty(LdapAuthentication, settings.ldapAuthentication.toString)
if(settings.ldapAuthentication){
settings.ldap.map { ldap =>
props.setProperty(LdapHost, ldap.host)
ldap.port.foreach(x => props.setProperty(LdapPort, x.toString))
ldap.bindDN.foreach(x => props.setProperty(LdapBindDN, x))
ldap.bindPassword.foreach(x => props.setProperty(LdapBindPassword, x))
props.setProperty(LdapBaseDN, ldap.baseDN)
props.setProperty(LdapUserNameAttribute, ldap.userNameAttribute)
ldap.additionalFilterCondition.foreach(x => props.setProperty(LdapAdditionalFilterCondition, x))
ldap.fullNameAttribute.foreach(x => props.setProperty(LdapFullNameAttribute, x))
ldap.mailAttribute.foreach(x => props.setProperty(LdapMailAddressAttribute, x.toString))
ldap.tls.foreach(x => props.setProperty(LdapTls, x.toString))
ldap.keystore.foreach(x => props.setProperty(LdapKeystore, x))
ldap.controlledFilterCondition.foreach(x => props.setProperty(LdapControlledFilterCondition, x))
}
}
using(new java.io.FileOutputStream(GitBucketConf)){ out =>
props.store(out, null)
}
}
}
def loadSystemSettings(): SystemSettings = {
defining(new java.util.Properties()){ props =>
if(GitBucketConf.exists){
using(new java.io.FileInputStream(GitBucketConf)){ in =>
props.load(in)
}
}
SystemSettings(
getOptionValue[String](props, BaseURL, None).map(x => x.replaceFirst("/\\Z", "")),
getValue(props, AllowAccountRegistration, false),
getValue(props, Gravatar, true),
getValue(props, Notification, false),
getValue(props, Ssh, false),
getOptionValue(props, SshPort, Some(DefaultSshPort)),
if(getValue(props, Notification, false)){
Some(Smtp(
getValue(props, SmtpHost, ""),
getOptionValue(props, SmtpPort, Some(DefaultSmtpPort)),
getOptionValue(props, SmtpUser, None),
getOptionValue(props, SmtpPassword, None),
getOptionValue[Boolean](props, SmtpSsl, None),
getOptionValue(props, SmtpFromAddress, None),
getOptionValue(props, SmtpFromName, None)))
} else {
None
},
getValue(props, LdapAuthentication, false),
if(getValue(props, LdapAuthentication, false)){
Some(Ldap(
getValue(props, LdapHost, ""),
getOptionValue(props, LdapPort, Some(DefaultLdapPort)),
getOptionValue(props, LdapBindDN, None),
getOptionValue(props, LdapBindPassword, None),
getValue(props, LdapBaseDN, ""),
getValue(props, LdapUserNameAttribute, ""),
getOptionValue(props, LdapAdditionalFilterCondition, None),
getOptionValue(props, LdapFullNameAttribute, None),
getOptionValue(props, LdapMailAddressAttribute, None),
getOptionValue[Boolean](props, LdapTls, None),
getOptionValue(props, LdapKeystore, None),
getOptionValue(props, LdapControlledFilterCondition, None)))
} else {
None
}
)
}
}
}
object SystemSettingsService {
import scala.reflect.ClassTag
case class SystemSettings(
baseUrl: Option[String],
allowAccountRegistration: Boolean,
gravatar: Boolean,
notification: Boolean,
ssh: Boolean,
sshPort: Option[Int],
smtp: Option[Smtp],
ldapAuthentication: Boolean,
ldap: Option[Ldap]){
def baseUrl(request: HttpServletRequest): String = baseUrl.getOrElse {
defining(request.getRequestURL.toString){ url =>
url.substring(0, url.length - (request.getRequestURI.length - request.getContextPath.length))
}
}.stripSuffix("/")
}
case class Ldap(
host: String,
port: Option[Int],
bindDN: Option[String],
bindPassword: Option[String],
baseDN: String,
userNameAttribute: String,
additionalFilterCondition: Option[String],
fullNameAttribute: Option[String],
mailAttribute: Option[String],
tls: Option[Boolean],
keystore: Option[String],
controlledFilterCondition: Option[String])
case class Smtp(
host: String,
port: Option[Int],
user: Option[String],
password: Option[String],
ssl: Option[Boolean],
fromAddress: Option[String],
fromName: Option[String])
val DefaultSshPort = 29418
val DefaultSmtpPort = 25
val DefaultLdapPort = 389
private val BaseURL = "base_url"
private val AllowAccountRegistration = "allow_account_registration"
private val Gravatar = "gravatar"
private val Notification = "notification"
private val Ssh = "ssh"
private val SshPort = "ssh.port"
private val SmtpHost = "smtp.host"
private val SmtpPort = "smtp.port"
private val SmtpUser = "smtp.user"
private val SmtpPassword = "smtp.password"
private val SmtpSsl = "smtp.ssl"
private val SmtpFromAddress = "smtp.from_address"
private val SmtpFromName = "smtp.from_name"
private val LdapAuthentication = "ldap_authentication"
private val LdapHost = "ldap.host"
private val LdapPort = "ldap.port"
private val LdapBindDN = "ldap.bindDN"
private val LdapBindPassword = "ldap.bind_password"
private val LdapBaseDN = "ldap.baseDN"
private val LdapUserNameAttribute = "ldap.username_attribute"
private val LdapAdditionalFilterCondition = "ldap.additional_filter_condition"
private val LdapFullNameAttribute = "ldap.fullname_attribute"
private val LdapMailAddressAttribute = "ldap.mail_attribute"
private val LdapTls = "ldap.tls"
private val LdapKeystore = "ldap.keystore"
private val LdapControlledFilterCondition = "ldap.controlled_filter_condition"
private def getValue[A: ClassTag](props: java.util.Properties, key: String, default: A): A =
defining(props.getProperty(key)){ value =>
if(value == null || value.isEmpty) default
else convertType(value).asInstanceOf[A]
}
private def getOptionValue[A: ClassTag](props: java.util.Properties, key: String, default: Option[A]): Option[A] =
defining(props.getProperty(key)){ value =>
if(value == null || value.isEmpty) default
else Some(convertType(value)).asInstanceOf[Option[A]]
}
private def convertType[A: ClassTag](value: String) =
defining(implicitly[ClassTag[A]].runtimeClass){ c =>
if(c == classOf[Boolean]) value.toBoolean
else if(c == classOf[Int]) value.toInt
else value
}
// TODO temporary flag
val enablePluginSystem = Option(System.getProperty("enable.plugin")).getOrElse("false").toBoolean
}
| tily/gitbucket2 | src/main/scala/service/SystemSettingsService.scala | Scala | apache-2.0 | 8,051 |
package org.litis.relief
import scala.math._
import scala.io.BufferedSource
import org.sofa.math.{Point3, Triangle, ConstTriangle, Rgba}
import java.io.{File, InputStream, FileInputStream, FileOutputStream, PrintStream, IOException}
import javax.imageio.ImageIO
/** Techniques of resizing of the heightmap. */
object ResizeMethod extends Enumeration {
val Lanczos2 = Value
val Lanczos3 = Value
val Lanczos4 = Value
val Hq3x = Value
val Hq4x = Value
val Unknown = Value
type ResizeMethod = Value
def fromString(s:String):ResizeMethod = s.toLowerCase match {
case "lanczos2" => Lanczos2
case "lanczos3" => Lanczos3
case "lanczos4" => Lanczos4
case "hq3x" => Hq3x
case "hq4x" => Hq4x
case _ => Unknown
}
}
/** HeightMap companion object allowing to create height maps from CSV and PNG files. */
object HeightMap {
// Parsing
final val NCols = """ncols;([0-9]+);+""".r
final val NColsSp = """ncols\\s+([0-9]+)\\s*""".r
final val NRows = """nrows(;|\\s+)([0-9]+)(;+|\\s*)""".r
final val Yll = """yll(corner|center)(;|\\s+)([0-9]+[\\.,]?[0-9]*)(;+|\\s*)""".r
final val Xll = """xll(corner|center)(;|\\s+)([0-9]+[\\.,]?[0-9]*)(;+|\\s*)""".r
final val CellSize = """cellsize(;|\\s+)([0-9]+[\\.,]?[0-9]*)(;+|\\s*)""".r
final val NoData = """(NODATA|nodata)_value(;|\\s+)(-?[0-9]+)(;+|\\s*)""".r
// HeightMap Creation from files
def apply(fileName:String, startx:Int, endx:Int, starty:Int, endy:Int, scaleFactor:Double, yFactor:Double, iMin:Double, iMax:Double, cellSize:Double, greyData:Boolean, clamp:(Double,Double)):HeightMap = {
if(fileName.endsWith(".csv") || fileName.endsWith(".asc")) {
readFileCSV(fileName, startx, endx, starty, endy, scaleFactor, yFactor, cellSize, clamp)
} else if(fileName.endsWith(".png")) {
readFileImage(fileName, startx, endx, starty, endy, scaleFactor, yFactor, iMin, iMax, cellSize, greyData, clamp)
} else {
throw new RuntimeException("only '.csv', '.asc' and '.png' files are accepted")
}
}
/** Created a [[HeightMap]] from a CSV or ASC file. */
def readFileCSV(fileName:String, startx:Int, endx:Int, starty:Int, endy:Int, scaleFactor:Double, yFactor:Double, cellSize:Double, clamp:(Double,Double)):HeightMap = {
var heightMap:HeightMap = null
val src = new BufferedSource(new FileInputStream(fileName))
var ncols = 0
var nrows = 0
var nodata = 0.0
var cellsize = cellSize
var curRow = 0
var sx = startx
var ex = endx
var sy = starty
var ey = endy
var spaceSeparated = false
var latm = 0.0
var lonm = 0.0
src.getLines.foreach { _ match {
case NCols(cols) => { ncols = cols.toInt; if(sx < 0) sx = 0; if(ex < 0 || ex > ncols) ex = ncols; spaceSeparated = false }
case NColsSp(cols) => { ncols = cols.toInt; if(sx < 0) sx = 0; if(ex < 0 || ex > ncols) ex = ncols; spaceSeparated = true }
case NRows(_,rows,_) => { nrows = rows.toInt; if(sy < 0) sy = 0; if(ey < 0 || ey > nrows) ey = nrows }
case Xll(c,_,xll,_) => { latm = xll.toDouble /* easting coordinate. */ }
case Yll(c,_,yll,_) => { lonm = yll.toDouble /* northing coordinate. */ }
case CellSize(_,size,_) => { if(cellSize == 1.0) cellsize = size.toDouble }
case NoData(a,b,value,c) => { nodata = value.toDouble }
case line => {
// The format ensure informations will have been read before ?
if(heightMap eq null) {
latm = 49.4899
lonm = 0.1099
printf("[(%f %f) -> %f lat %f lon -> %f lat %f lon]%n", latm, lonm, y2lat_m(latm), x2lon_m(lonm), lat2y_m(latm), lon2x_m(lonm))
heightMap = new HeightMap(ex-sx, ey-sy, nodata, cellsize, scaleFactor, yFactor, clamp)
print("[%d x %d -> %d x %d (spaces=%b)]".format(ncols, nrows, ex-sx, ey-sy, spaceSeparated))
heightMap.translate(sx, sy)
//printf("sx=%d ex=%d sy=%d ey=%d ncols=%d nrows=%d size=%f nodata=%f%n", sx, ex, sy, ey, ncols, nrows, cellSize, nodata)
}
if(curRow % 100 == 0) print("[row %d]".format(curRow))
if(curRow >= sy && curRow < ey) {
val values = if(spaceSeparated)
line.trim.split("\\\\s+").slice(sx,ex).map { _.replace(",", ".").toDouble }
else line.trim.split(";").slice(sx,ex).map { _.replace(",", ".").toDouble }
heightMap.setLine(curRow-sy, values)
}
curRow += 1
}
}}
heightMap
}
def deg2rad(d:Double) = (((d)*Pi)/180.0)
def rad2deg(d:Double) = (((d)*180.0)/Pi)
val earth_radius = 6378137
// /* The following functions take or return there results in degrees */
// double y2lat_d(double y) { return rad2deg(2 * atan(exp( deg2rad(y) ) ) - M_PI/2); }
// double x2lon_d(double x) { return x; }
// double lat2y_d(double lat) { return rad2deg(log(tan(M_PI/4+ deg2rad(lat)/2))); }
// double lon2x_d(double lon) { return lon; }
/* The following functions take or return there results in something close to meters, along the equator */
def y2lat_m(y:Double) = rad2deg(2 * atan(exp( (y / earth_radius ) )) - Pi/2)
def x2lon_m(x:Double) = rad2deg(x / earth_radius)
def lat2y_m(lat:Double) = earth_radius * log(tan(Pi/4+ deg2rad(lat)/2))
def lon2x_m(lon:Double) = deg2rad(lon) * earth_radius
//def y2lat(aY:Double) = Math.toDegrees(2* Math.atan(Math.exp(Math.toRadians(aY))) - Math.PI/2)
//def lat2y(aLat:Double) = Math.toDegrees(Math.log(Math.tan(Math.PI/4+Math.toRadians(aLat)/2)))
/** Create a [[HeigtMap]] from a PNG image. */
def readFileImage(fileName:String, startx:Int, endx:Int, starty:Int, endy:Int, scaleFactor:Double, yFactor:Double, iMin:Double, iMax:Double, cellSize:Double, greyData:Boolean, clamp:(Double,Double)):HeightMap = {
val image = ImageIO.read(new File(fileName))
var sx = startx
var ex = endx
var sy = starty
var ey = endy
print("[%d x %d -> %d x %d]".format(image.getWidth, image.getHeight, ex-sx, ey-sy))
if(sx < 0) sx = 0; if(ex < 0 || ex > image.getWidth) ex = image.getWidth
if(sy < 0) sy = 0; if(ey < 0 || ey > image.getHeight) ey = image.getHeight
var heightMap = new HeightMap(ex-sx, ey-sy, 0, cellSize, scaleFactor, yFactor, clamp)
var row = sy
while(row < ey) {
var col = sx
while(col < ex) {
if(greyData)
heightMap.setCell(col, row, pixelToValueFromGrey(image.getRGB(col, row), iMin, iMax))
else heightMap.setCell(col, row, pixelToValueFromHue(image.getRGB(col, row), iMin, iMax))
col += 1
}
if(row % 100 == 0) print("[row %d]".format(row))
row += 1
}
heightMap
}
/** Convert a `rgb` considered as grey into an elevation using the red component only.
* The resulting value is scaled between `iMin` and `iMax`. */
protected def pixelToValueFromGrey(rgb:Int, iMin:Double, iMax:Double):Double = {
val g = ((rgb >> 16) & 0xFF) // The red component...
val c = g / 255.0
iMin + (c * (iMax - iMin))
}
/** Convert a `rgb` pixel into an elevation using the hue only (not the saturation, nor the value).
* The resulting hue is scaled between `iMin` and `iMax`. */
protected def pixelToValueFromHue(rgb:Int, iMin:Double, iMax:Double):Double = {
val r = ((rgb >> 16) & 0xFF)
val g = ((rgb >> 8) & 0xFF)
val b = ((rgb ) & 0xFF)
val (hue, saturation, value) = Rgba(r/255.0, g/255.0, b/255.0, 1).toHSV
var res = (1.0-(hue/(2*Pi)))
if(res == 1.0) res = 0.0 // Special case of white pixels = nodata
res = iMin + (res * (iMax - iMin))
res
}
}
/** A height map under the form of a cloud of points all aligned as a grid.
*
* This allows to:
* - read the point cloud from CSV (see companion object),
* - normalize it,
* - triangulate it,
* - save it to STL.
*/
class HeightMap(val cols:Int, val rows:Int, val nodata:Double, val cellSize:Double, val scaleFactor:Double=1.0, val yFactor:Double=1.0, val clamp:(Double, Double)) {
import ResizeMethod._
/** When creating a volume during triangulation, adds a base this height.
* This is set during triangulation. */
protected var baseDepth = 1.0
/** The point cloud representing the height map. */
protected val data = Array.ofDim[Point3](rows, cols)
/** The output for the heightmap during triangulation. */
protected var output:HeightMapOutput = null
/** The X position of this heightmap in a global file, this is used as a translation. */
protected var startx = 0.0
/** The Y position of this heightmap in a global file, this is used as a translation. */
protected var starty = 0.0
/** Computed minimum value in the heightmap. */
protected var minValue = Double.MaxValue
/** Computed maximum value in the heightmap. */
protected var maxValue = Double.MinValue
if(clamp ne null) {
minValue = clamp._1
maxValue = clamp._2
}
/** Number of stored points. */
def pointCount:Int = data.size
/** Number of triangles that will be generated. Use `setVolume()` before calling
* this if you wan the base triangles to be counted. */
def triangleCount:Int = surfaceTriangleCount + (if(baseDepth <= 0) 0 else baseTriangleCount)
/** Number of triangles per surface. */
def surfaceTriangleCount():Int = (rows-1) * (cols-1) * 2
/** Number of triangles on the base. */
def baseTriangleCount():Int = ((cols-1) * 6) + ((rows-1) * 6)
/** Offset of the whole surface. This allows to translate the whole generated model,
* if the model is a part of a larger one, so that the sub-model is at the correct
* coordinates inside the larger one. */
def offset:(Double,Double) = (startx, starty)
/** Scale factor for the whole generated model. */
def scale:Double = scaleFactor
/** If the heightmap represents a part of a larger map, the offset
* (`startx`, `starty`) allows to place the generated model inside the larger one. */
def translate(startx:Int, starty:Int) {
this.startx = startx
this.starty = starty
}
/** Add a base to the surface. If set to <= 0, do not produce a base. The base
* creates a volume instead of only a surface when triangulating. The heigh of the
* base is added to the full height of the surface.
* @param baseDepth the height of the base added to make a volume the total height
* is thus this base depth plus the max height of the point cloud.
* if zero or negative, only the surface is created. */
def setVolume(baseDepth:Double) {
this.baseDepth = baseDepth
}
/** Fill a complete line of the heightmap at `row` with `values`. The
* values are scaled by `scaleFactor` and `yFactor`. */
def setLine(row:Int, line:Array[Double]) {
val n = min(cols, line.length)
var i = 0
while(i < n) {
setCell(i, row, line(i))
i += 1
}
}
/** Value of the cell at (`col`, `row`). */
def cell(col:Int, row:Int):Point3 = data(row)(col)
/** Height at (`col`, `row`). */
def height(col:Int, row:Int):Double = cell(col, row).y
/** Lowest height. */
def minHeight:Double = minValue
/** Highest height. */
def maxHeight:Double = maxValue
/** Set a cell at (`col`, `row`) in the heightmap with `value`. The `value` is
* scaled by `scaleFactor` and `yFactor`. */
def setCell(col:Int, row:Int, value:Double) {
var v = value * scaleFactor * yFactor
if(clamp ne null) {
if(value < clamp._1) v = clamp._1
if(value > clamp._2) v = clamp._2
}
if(value != nodata) {
if(v < minValue) minValue = v
if(v > maxValue) maxValue = v
}
data(row)(col) = Point3(/*X*/ (this.starty + row * cellSize) * scaleFactor,
/*Y*/ v,
/*Z*/ (this.startx + col * cellSize) * scaleFactor)
}
/** Normalize the point cloud by aligning nodata points to the minimum point. */
def normalize() {
var y = 0
// var avg = 0.0
while(y < rows) {
var x = 0
while(x < cols) {
val d = (data(y)(x)).y
if(d == nodata * scaleFactor * yFactor && d < minValue) {
(data(y)(x)).y = minValue
// print("[%f]".format(d))
}
// avg += (data(y)(x)).y
x += 1
}
y += 1
}
// avg /= (cols*rows)
// printf("<normalize min=%f max=%f avg=%f>%n", minValue, maxValue, avg)
}
/** Interpolate a new height-map with a different resolution. */
def resize(factor:Double, resizeMethod:ResizeMethod = Lanczos2):HeightMap = {
val colsTo = round(cols * factor).toInt
val rowsTo = round(rows * factor).toInt
val hmap = new HeightMap(colsTo, rowsTo, nodata, cellSize, scaleFactor, yFactor, clamp)
var row = 0
var col = 0
val interpolator = chooseInterpolator(resizeMethod)
while(row < rowsTo) {
col = 0
while(col < colsTo) {
hmap.setCell(col, row, interpolator(col, row, colsTo, rowsTo))
col += 1
}
row += 1
if(row % 100 == 0)
printf("[row %d]", row)
}
hmap
}
private def chooseInterpolator(resizeMethod:ResizeMethod):(Int,Int,Int,Int)=>Double = resizeMethod match {
case Lanczos2 => interpolateLanczos(_, _, _, _, 2)
case Lanczos3 => interpolateLanczos(_, _, _, _, 3)
case Lanczos4 => interpolateLanczos(_, _, _, _, 4)
case Hq3x => throw new RuntimeException("HQ3x TODO")
case Hq4x => throw new RuntimeException("HQ4x TODO")
case _ => throw new RuntimeException("Unknown resize method")
}
private def interpolateLanczos(xTo:Int, yTo:Int, colsTo:Int, rowsTo:Int, a:Int = 2):Double = {
val xratio = cols.toDouble / colsTo
val yratio = rows.toDouble / rowsTo
val x = xTo * xratio
val y = yTo * yratio
val x_ = floor(x).toInt
val y_ = floor(y).toInt
var acc = 0.0
var i = 0 // along X
var j = y_ - a + 1 // along Y
var w = 0.0
while(j <= y_ + a) {
i = x_ - a + 1
while(i <= x_ + a) {
if(i >= 0 && i < cols && j >= 0 && j < rows) {
val l = lanczos(x - i, a) * lanczos(y - j, a)
acc += ((height(i, j) / scaleFactor) / yFactor) * l
w += l
}
i += 1
}
j += 1
}
if(w != 0)
acc / w // The lanczos coefficients do not always add to 1.
else acc
}
private def lanczos(x:Double, a:Double):Double = {
//private def lanczos(x:Double, a:Double):Double = if(x != 0) (a * sin(Pi * x) * sin(Pi * x / a) / (Pi*Pi * x*x)) else 1
if(x == 0) 1.0
else if(x <= a && x >= a) 0.0
else {
val pi_x = x * Pi
a * sin(pi_x) * sin(pi_x / a) / (pi_x * pi_x)
}
}
/** Triangulate the heightmap.
*
* This triangulate a surface from the point cloud, and adds a closed base
* with sides and a back so that the result is a volume if `setVolume()` as
* been set. */
def triangulate() {
triangulateSurface
if(baseDepth > 0) {
triangulateSides
triangulateBack
}
}
/** Triangulate the surface of the heightmap. */
def triangulateSurface() {
// p0 p2
// +----+ CW
// | /|
// | / |
// | / |
// |/ |
// +----+
// p1 p3
var y = 0
while(y < rows-1) {
var x = 0
while(x < cols-1) {
val p0 = data(y)(x)
val p1 = data(y)(x+1)
val p2 = data(y+1)(x)
val p3 = data(y+1)(x+1)
triangle(ConstTriangle(p0, p2, p1))
triangle(ConstTriangle(p1, p2, p3))
x += 1
}
if((y % 100) == 0)
print("[row %d]".format(y))
y += 1
}
}
/** Triangulate the sides of the base. */
def triangulateSides() {
val base = minValue - (baseDepth * scaleFactor * yFactor)
var x = 0
print("[sides]")
// Front and back.
while(x < cols-1) {
var p0 = data(0)(x)
var p1 = Point3(p0.x, base, p0.z)
var p2 = data(0)(x+1)
var p3 = Point3(p2.x, base, p2.z)
triangle(ConstTriangle(p0, p2, p1))
triangle(ConstTriangle(p1, p2, p3))
p0 = data(rows-1)(x)
p2 = Point3(p0.x, base, p0.z)
p1 = data(rows-1)(x+1)
p3 = Point3(p1.x, base, p1.z)
triangle(ConstTriangle(p0, p2, p1))
triangle(ConstTriangle(p1, p2, p3))
x += 1
//if(triangles.size % 100 == 0) print("[%d]".format(triangles.size))
}
// Left and Right.
var y = 0
while(y < rows-1) {
var p0 = data(y)(0)
var p2 = Point3(p0.x, base, p0.z)
var p1 = data(y+1)(0)
var p3 = Point3(p1.x, base, p1.z)
triangle(ConstTriangle(p0, p2, p1))
triangle(ConstTriangle(p1, p2, p3))
p0 = data(y)(cols-1)
p1 = Point3(p0.x, base, p0.z)
p2 = data(y+1)(cols-1)
p3 = Point3(p2.x, base, p2.z)
triangle(ConstTriangle(p0, p2, p1))
triangle(ConstTriangle(p1, p2, p3))
y += 1
//if(triangles.size % 100 == 0) print("[%d]".format(triangles.size))
}
}
/** Triangulate the back of the base. */
def triangulateBack() {
val base = minValue - (baseDepth * scaleFactor * yFactor)
val center = Point3((starty + (rows/2))*scaleFactor, base, (startx + (cols/2))*scaleFactor)
var x = 0
print("[back]")
// Center toward front and back.
while(x < cols-1) {
var p0 = data(0)(x)
var p1 = data(0)(x+1)
triangle(ConstTriangle(Point3(p0.x, base, p0.z), Point3(p1.x, base, p1.z), center))
p0 = data(rows-1)(x)
p1 = data(rows-1)(x+1)
triangle(ConstTriangle(Point3(p1.x, base, p1.z), Point3(p0.x, base, p0.z), center))
x += 1
//if(triangles.size % 100 == 0) print("[%d]".format(triangles.size))
}
// Center toward left and right.
var y = 0
while(y < rows-1) {
var p0 = data(y)(0)
var p1 = data(y+1)(0)
triangle(ConstTriangle(Point3(p1.x, base, p1.z), Point3(p0.x, base, p0.z), center))
p0 = data(y)(cols-1)
p1 = data(y+1)(cols-1)
triangle(ConstTriangle(Point3(p0.x, base, p0.z), Point3(p1.x, base, p1.z), center))
y += 1
//if(triangles.size % 100 == 0) print("[%d]".format(triangles.size))
}
}
/** Start the output of triangles generated during the triangulation phase to a STL file.
* Follow this call by several calls to `triangle()` or call `triangulate()`. Finish the
* output using `endSTL()`.
* @param name The name of the mesh.
* @param fileName The output file name, if null and the output is binary, the result is sent to the standard output.
* @param binary If true (the default) output a more compact binary file, else an ascii file. */
def beginSTL(name:String, fileName:String, binary:Boolean = true) {
if(output eq null) {
if(binary) {
output = new BinarySTLOutput(name, fileName, triangleCount)
} else {
output = new AsciiSTLOutput(name, fileName)
}
output.begin
}
}
/** Output a triangle to the current STL file, `beginSTL()` must have been called. */
def triangle(t:Triangle) {
if(output ne null) output.triangle(t)
}
/** End the output to the current STL file, started by `beginSTL()`. */
def endSTL() {
if(output ne null) {
output.end
output = null
}
}
/** Convert the heightmap surface to a PNG image with the given `fileName`.
* If `grey` is true, the values are between 0 and 255. Else use a chromatic
* circle to code color. */
def toPNG(fileName:String, grey:Boolean = false) {
val converter = if(grey) new GreyPNGConverter(fileName) else new ColorPNGConverter(fileName)
converter.begin(cols, rows, minValue, maxValue)
var x = 0
var y = 0
while(y < rows) {
x = 0
while(x < cols) {
converter.point(x, y, cell(x, y))
x += 1
}
y += 1
}
converter.end
}
}
| Ant01n3/ReliefExtruder | src/main/scala/org/litis/relief/HeightMap.scala | Scala | gpl-2.0 | 18,937 |
import com.typesafe.tools.mima.core._
import com.typesafe.tools.mima.core.ProblemFilters._
object BinaryIncompatibilities {
val IR = Seq(
)
val Tools = Seq(
)
val JSEnvs = Seq(
)
val SbtPlugin = Seq(
)
val TestAdapter = Seq(
)
val CLI = Seq(
)
}
| CapeSepias/scala-js | project/BinaryIncompatibilities.scala | Scala | bsd-3-clause | 276 |
class A
class B extends A
class C extends A
var a: A = new B()
a = new C()
// It works. Because a is A explicitly.
var b = new B()
b = new C()
// error: type mismatch;
// found : this.C
// required: this.B
// Because variable a is inferenced as B on line 5.
| sgkim126/snippets | scala/InferenceVariableType.scala | Scala | bsd-2-clause | 266 |
package cz.kamenitxan.jakon.utils.mail
import javax.mail.Message
trait EmailTypeHandler {
def handle(emailType: String): (Message, Map[String, Any]) => Unit
def afterSend(emailType: String): Unit
}
| kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/utils/mail/EmailTypeHandler.scala | Scala | bsd-3-clause | 205 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package tools.nsc
package transform
import scala.annotation.{nowarn, tailrec}
import scala.collection.mutable
import scala.tools.nsc.symtab.Flags
import scala.tools.nsc.Reporting.WarningCategory
import scala.util.chaining._
/** Specialize code on types.
*
* Make sure you've read the thesis:
*
* Iulian Dragos: Compiling Scala for Performance (chapter 4)
*
* There are some things worth noting, (possibly) not mentioned there:
* 0) Make sure you understand the meaning of various `SpecializedInfo` descriptors
* defined below.
*
* 1) Specializing traits by introducing bridges in specialized methods
* of the specialized trait may introduce problems during mixin composition.
* Concretely, it may cause cyclic calls and result in a stack overflow.
* See ticket #4351.
* This was solved by introducing an `Abstract` specialized info descriptor.
* Instead of generating a bridge in the trait, an abstract method is generated.
*
* 2) Specialized private members sometimes have to be switched to protected.
* In some cases, even this is not enough. Example:
*
* {{{
* class A[@specialized T](protected val d: T) {
* def foo(that: A[T]) = that.d
* }
* }}}
*
* Specialization will generate a specialized class and a specialized method:
*
* {{{
* class A\\$mcI\\$sp(protected val d: Int) extends A[Int] {
* def foo(that: A[Int]) = foo\\$mcI\\$sp(that)
* def foo(that: A[Int]) = that.d
* }
* }}}
*
* Above, `A\\$mcI\\$sp` cannot access `d`, so the method cannot be typechecked.
*/
abstract class SpecializeTypes extends InfoTransform with TypingTransformers {
import global._
import definitions._
import Flags._
private val inlineFunctionExpansion = settings.Ydelambdafy.value == "inline"
/** the name of the phase: */
val phaseName: String = "specialize"
/** The following flags may be set by this phase: */
override def phaseNewFlags: Long = notPRIVATE
/** This phase changes base classes. */
override def changesBaseClasses = true
override def keepsTypeParams = true
type TypeEnv = Map[Symbol, Type]
def emptyEnv: TypeEnv = Map[Symbol, Type]()
private implicit val typeOrdering: Ordering[Type] = Ordering[String] on ("" + _.typeSymbol.name)
/** TODO - this is a lot of maps.
*/
/** For a given class and concrete type arguments, give its specialized class */
val specializedClass = perRunCaches.newAnyRefMap[Symbol, mutable.AnyRefMap[TypeEnv, Symbol]]()
/** Map a method symbol to a list of its specialized overloads in the same class. */
private val overloads = perRunCaches.newMap[Symbol, List[Overload]]() withDefaultValue Nil
/** Map a symbol to additional information on specialization. */
private val info = perRunCaches.newMap[Symbol, SpecializedInfo]()
/** Map class symbols to the type environments where they were created. */
private val typeEnv = perRunCaches.newMap[Symbol, TypeEnv]() withDefaultValue emptyEnv
// Key: a specialized class or method
// Value: a map from tparams in the original class to tparams in the specialized class.
private val anyrefSpecCache = perRunCaches.newMap[Symbol, mutable.Map[Symbol, Symbol]]()
// holds mappings from members to the type variables in the class
// that they were already specialized for, so that they don't get
// specialized twice (this is for AnyRef specializations)
private val wasSpecializedForTypeVars = perRunCaches.newMap[Symbol, Set[Symbol]]() withDefaultValue Set()
/** Concrete methods that use a specialized type, or override such methods. */
private val concreteSpecMethods = perRunCaches.newWeakSet[Symbol]()
private def specializedOn(sym: Symbol): List[Symbol] = {
val GroupOfSpecializable = currentRun.runDefinitions.GroupOfSpecializable
def expandGroup(tp: Type): List[Symbol] =
tp.baseType(GroupOfSpecializable) match {
case TypeRef(_, GroupOfSpecializable, arg :: Nil) => arg.typeArgs.map(_.typeSymbol)
case _ => tp.typeSymbol :: Nil
}
sym.getAnnotation(SpecializedClass) match {
case Some(AnnotationInfo(_, Nil, _)) => specializableTypes.map(_.typeSymbol)
case Some(AnnotationInfo(_, args, _)) => args.map(_.tpe).flatMap(expandGroup)
case _ => Nil
}
}
@annotation.tailrec private def findSymbol[T](candidates: List[T], f: T => Symbol): Symbol = {
if (candidates.isEmpty) NoSymbol
else f(candidates.head) match {
case NoSymbol => findSymbol(candidates.tail, f)
case sym => sym
}
}
private def hasNewParents(tree: Tree) = {
val parents = tree.symbol.info.parents
val prev = enteringPrevPhase(tree.symbol.info.parents)
(parents != prev) && {
debuglog(s"$tree parents changed from: $prev to: $parents")
true
}
}
// If we replace `isBoundedGeneric` with (tp <:< AnyRefTpe),
// then pos/spec-List.scala fails - why? Does this kind of check fail
// for similar reasons? Does `sym.isAbstractType` make a difference?
private def isSpecializedAnyRefSubtype(tp: Type, sym: Symbol) = {
specializedOn(sym).exists(s => !isPrimitiveValueClass(s)) &&
!isPrimitiveValueClass(tp.typeSymbol) &&
isBoundedGeneric(tp)
//(tp <:< AnyRefTpe)
}
object TypeEnv {
/** Return a new type environment binding specialized type parameters of sym to
* the given args. Expects the lists to have the same length.
*/
def fromSpecialization(sym: Symbol, args: List[Type]): TypeEnv = {
ifDebug(assert(sym.info.typeParams.sizeCompare(args) == 0, "" + sym + " args: " + args))
emptyEnv ++ collectMap2(sym.info.typeParams, args)((k, v) => k.isSpecialized)
}
/** Does typeenv `t1` include `t2`? All type variables in `t1`
* are defined in `t2` and:
* - are bound to the same type, or
* - are an AnyRef specialization and `t2` is bound to a subtype of AnyRef
*/
def includes(t1: TypeEnv, t2: TypeEnv) = t1 forall {
case (sym, tpe) =>
t2 get sym exists { t2tp =>
(tpe == t2tp) || !(isPrimitiveValueType(tpe) || isPrimitiveValueType(t2tp)) // u.t.b. (t2tp <:< AnyRefTpe)
}
}
/** Reduce the given environment to contain mappings only for type variables in tps. */
def restrict(env: TypeEnv, tps: Set[Symbol]): TypeEnv =
env.view.filterKeys(tps).toMap
/** Is the given environment a valid specialization for sym?
* It is valid if each binding is from a @specialized type parameter in sym (or its owner)
* to a type for which `sym` is specialized.
*/
def isValid(env: TypeEnv, sym: Symbol): Boolean = {
env forall { case (tvar, tpe) =>
tvar.isSpecialized && (concreteTypes(tvar) contains tpe) && {
(sym.typeParams contains tvar) ||
(sym.owner != rootMirror.RootClass && (sym.owner.typeParams contains tvar))
}
}
}
}
case class Overload(sym: Symbol, env: TypeEnv) {
override def toString = "specialized overload " + sym + " in " + env
def matchesSym(sym1: Symbol) = sym.info =:= sym1.info
def matchesEnv(env1: TypeEnv) = TypeEnv.includes(env, env1)
}
private def newOverload(method: Symbol, specializedMethod: Symbol, env: TypeEnv) = {
assert(!specializedMethod.isOverloaded, specializedMethod.defString)
val om = Overload(specializedMethod, env)
overloads(method) ::= om
om
}
/** Just to mark uncheckable */
override def newPhase(prev: scala.tools.nsc.Phase): StdPhase = new SpecializationPhase(prev)
class SpecializationPhase(prev: scala.tools.nsc.Phase) extends InfoPhase(prev) {
override def checkable = false
override def run(): Unit = {
super.run()
exitingSpecialize {
FunctionClass.seq.take(MaxFunctionAritySpecialized + 1).foreach(_.info)
TupleClass.seq.take(MaxTupleAritySpecialized).foreach(_.info)
}
// Remove the final modifier and @inline annotation from anything in the
// original class (since it's being overridden in at least one subclass).
//
// We do this here so that the specialized subclasses will correctly copy
// final and @inline.
//
// TODO Try to move this logic back to the info transform.
info.foreach {
case (sym, SpecialOverload(target, _)) =>
sym.resetFlag(FINAL)
target.resetFlag(FINAL)
sym.removeAnnotation(ScalaInlineClass)
target.removeAnnotation(ScalaInlineClass)
case _ =>
}
}
}
protected def newTransformer(unit: CompilationUnit): AstTransformer =
new SpecializationTransformer(unit)
sealed abstract class SpecializedInfo {
def target: Symbol
/** Are type bounds of @specialized type parameters of 'target' now in 'env'? */
def typeBoundsIn(env: TypeEnv) = false
/** A degenerated method has @specialized type parameters that appear only in
* type bounds of other @specialized type parameters (and not in its result type).
*/
def degenerate = false
}
/** Symbol is a special overloaded method of 'original', in the environment env. */
case class SpecialOverload(original: Symbol, env: TypeEnv) extends SpecializedInfo {
def target = original
}
/** Symbol is a method that should be forwarded to 't' */
case class Forward(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a specialized abstract method, either specialized or original. The original `t` is abstract. */
case class Abstract(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a special overload of the super accessor. Treated like an abstract method with no specialized overload. */
case class SpecialSuperAccessor(t: Symbol) extends SpecializedInfo {
def target = t
}
/** Symbol is a specialized accessor for the `target` field. */
case class SpecializedAccessor(target: Symbol) extends SpecializedInfo { }
/** Symbol is a specialized method whose body should be the target's method body. */
case class Implementation(target: Symbol) extends SpecializedInfo
/** Symbol is a specialized override paired with `target`. */
case class SpecialOverride(target: Symbol) extends SpecializedInfo
/** A specialized inner class that specializes original inner class `target` on a type parameter of the enclosing class, in the typeenv `env`. */
case class SpecializedInnerClass(target: Symbol, env: TypeEnv) extends SpecializedInfo
/** Symbol is a normalized member obtained by specializing 'target'. */
case class NormalizedMember(target: Symbol) extends SpecializedInfo {
/** Type bounds of a @specialized type var are now in the environment. */
override def typeBoundsIn(env: TypeEnv): Boolean = {
target.info.typeParams exists { tvar =>
tvar.isSpecialized && (specializedTypeVars(tvar.info.bounds) exists env.isDefinedAt)
}
}
override lazy val degenerate = {
val stvTypeParams = specializedTypeVars(target.info.typeParams map (_.info))
val stvResult = specializedTypeVars(target.info.resultType)
debuglog("degenerate: " + target + " stv tparams: " + stvTypeParams + " stv info: " + stvResult)
(stvTypeParams diff stvResult).nonEmpty
}
}
/** Has `clazz` any type parameters that need be specialized? */
def hasSpecializedParams(clazz: Symbol) =
clazz.info.typeParams exists (_.isSpecialized)
/** Return specialized type parameters. */
def specializedParams(sym: Symbol): List[Symbol] =
sym.info.typeParams filter (_.isSpecialized)
/** Given an original class symbol and a list of types its type parameters are instantiated at
* returns a list of type parameters that should remain in the TypeRef when instantiating a
* specialized type.
*/
def survivingArgs(sym: Symbol, args: List[Type]): List[Type] =
for ((tvar, tpe) <- sym.info.typeParams.zip(args) if !tvar.isSpecialized || !isPrimitiveValueType(tpe))
yield tpe
/** Is `member` potentially affected by specialization? This is a gross overapproximation,
* but it should be okay for use outside of specialization.
*/
def possiblySpecialized(sym: Symbol) = specializedTypeVars(sym).nonEmpty
/** Refines possiblySpecialized taking into account the instantiation of the specialized type variables at `site` */
def isSpecializedIn(sym: Symbol, site: Type) =
specializedTypeVars(sym) exists { tvar =>
val concretes = concreteTypes(tvar)
(concretes contains AnyRefTpe) || (concretes contains site.memberType(tvar))
}
val specializedType = new TypeMap {
override def apply(tp: Type): Type = tp match {
case TypeRef(pre, sym, args) if args.nonEmpty =>
val pre1 = this(pre)
// when searching for a specialized class, take care to map all
// type parameters that are subtypes of AnyRef to AnyRef
val args1 = map2(args, sym.info.typeParams)((tp, orig) =>
if (isSpecializedAnyRefSubtype(tp, orig)) AnyRefTpe
else tp
)
specializedClass.getOrElse(sym, Map.empty[TypeEnv, Symbol]).get(TypeEnv.fromSpecialization(sym, args1)) match {
case Some(sym1) => typeRef(pre1, sym1, survivingArgs(sym, args))
case None => typeRef(pre1, sym, args)
}
case _ => tp
}
}
def specializedFunctionName(sym: Symbol, args: List[Type]) = exitingSpecialize {
require(isFunctionSymbol(sym), sym)
val env: TypeEnv = TypeEnv.fromSpecialization(sym, args)
specializedClass.getOrElse(sym, Map.empty[TypeEnv, Symbol]).get(env) match {
case Some(x) =>
x.name
case None =>
sym.name
}
}
/** Return the specialized name of 'sym' in the given environment. It
* guarantees the same result regardless of the map order by sorting
* type variables alphabetically.
*
* !!! Is this safe in the face of the following?
* scala> trait T { def foo[A] = 0}; object O extends T { override def foo[B] = 0 }
*/
private def specializedName(sym: Symbol, env: TypeEnv): TermName = {
val tvars = (
if (sym.isClass) env.keySet
else specializedTypeVars(sym).intersect(env.keySet)
)
specializedName(sym.name, tvars, env)
}
private def specializedName(name: Name, tvars: Set[Symbol], env: TypeEnv): TermName = {
val (methparams, others) = tvars.toList sortBy ("" + _.name) partition (_.owner.isMethod)
// debuglog("specName(" + sym + ") env: " + env + " tvars: " + tvars)
specializedName(name, methparams map env, others map env)
}
/** Specialize name for the two list of types. The first one denotes
* specialization on method type parameters, the second on outer environment.
*/
private def specializedName(name: Name, types1: List[Type], types2: List[Type]): TermName = (
if (name == nme.CONSTRUCTOR || (types1.isEmpty && types2.isEmpty))
name.toTermName
else if (nme.isSetterName(name))
specializedName(name.getterName, types1, types2).setterName
else if (nme.isLocalName(name))
specializedName(name.getterName, types1, types2).localName
else {
val (base, cs, ms) = nme.splitSpecializedName(name)
newTermName(base.toString + "$"
+ "m" + ms + types1.map(t => abbrvTag(t.typeSymbol)).mkString("", "", "")
+ "c" + cs + types2.map(t => abbrvTag(t.typeSymbol)).mkString("", "", "$sp"))
}
)
private lazy val specializableTypes = ScalaValueClasses.map(_.tpe).sorted
/** If the symbol is the companion of a value class, the value class.
* Otherwise, AnyRef.
*/
def specializesClass(sym: Symbol): Symbol = {
val c = sym.companionClass
if (isPrimitiveValueClass(c)) c else AnyRefClass
}
/** Return the types `sym` should be specialized at. This may be some of the primitive types
* or AnyRef. AnyRef means that a new type parameter T will be generated later, known to be a
* subtype of AnyRef (T <: AnyRef).
* These are in a meaningful order for stability purposes.
*/
def concreteTypes(sym: Symbol): List[Type] = {
val types = if (!sym.isSpecialized)
Nil // no @specialized Annotation
else
specializedOn(sym).map(s => specializesClass(s).tpe).sorted
if (isBoundedGeneric(sym.tpe) && (types contains AnyRefTpe))
runReporting.warning(sym.pos, s"$sym is always a subtype of $AnyRefTpe.", WarningCategory.Other, sym)
types
}
/** Return a list of all type environments for all specializations
* of @specialized types in `tps`.
*/
private def specializations(tps: List[Symbol]): List[TypeEnv] = {
// the keys in each TypeEnv
val keys: List[Symbol] = tps filter (_.isSpecialized)
// creating each permutation of concrete types
def loop(ctypes: List[List[Type]]): List[List[Type]] = ctypes match {
case Nil => Nil
case set :: Nil => set map (_ :: Nil)
case set :: sets => for (x <- set ; xs <- loop(sets)) yield x :: xs
}
// zip the keys with each permutation to create a TypeEnv.
// If we don't exclude the "all AnyRef" specialization, we will
// incur duplicate members and crash during mixin.
loop(keys map concreteTypes) filterNot (_ forall (_ <:< AnyRefTpe)) map (xss => Map(keys zip xss: _*))
}
/** Does the given 'sym' need to be specialized in the environment 'env'?
* Specialization is needed for
* - members with specialized type parameters found in the given environment
* - constructors of specialized classes
* - normalized members whose type bounds appear in the environment
* But suppressed for:
* - any member with the @unspecialized annotation, or which has an
* enclosing member with the annotation.
*/
private def needsSpecialization(env: TypeEnv, sym: Symbol): Boolean = (
!hasUnspecializableAnnotation(sym) && (
specializedTypeVars(sym).intersect(env.keySet).diff(wasSpecializedForTypeVars(sym)).nonEmpty
|| sym.isClassConstructor && sym.enclClass.typeParams.exists(_.isSpecialized)
|| isNormalizedMember(sym) && info(sym).typeBoundsIn(env)
)
)
private def hasUnspecializableAnnotation(sym: Symbol): Boolean =
sym.ownersIterator.exists(_.hasAnnotation(UnspecializedClass))
def isNormalizedMember(m: Symbol) = m.isSpecialized && info.get(m).exists {
case NormalizedMember(_) => true
case _ => false
}
def specializedTypeVars(tpes: List[Type]): Set[Symbol] = {
val result = mutable.ListBuffer.empty[Symbol]
tpes.foreach(specializedTypeVarsBuffer(_, result))
result.toSet
}
def specializedTypeVars(sym: Symbol): Set[Symbol] = {
val result = mutable.ListBuffer.empty[Symbol]
specializedTypeVarsBuffer(sym, result)
result.toSet
}
/** Return the set of @specialized type variables mentioned by the given type.
* It only counts type variables that appear:
* - naked
* - as arguments to type constructors in @specialized positions
* (arrays are considered as Array[@specialized T])
*/
def specializedTypeVars(tpe: Type): Set[Symbol] = {
val result = new mutable.ListBuffer[Symbol]()
specializedTypeVarsBuffer(tpe, result)
result.toSet
}
def specializedTypeVarsBuffer(sym: Symbol, result: mutable.Buffer[Symbol]): Unit =
if (!neverHasTypeParameters(sym))
enteringTyper(specializedTypeVarsBuffer(sym.info, result))
/** Return the set of @specialized type variables mentioned by the given type.
* It only counts type variables that appear:
* - naked
* - as arguments to type constructors in @specialized positions
* (arrays are considered as Array[@specialized T])
*/
def specializedTypeVarsBuffer(tpe: Type, result: mutable.Buffer[Symbol]): Unit = tpe match {
case TypeRef(pre, sym, args) =>
if (sym.isAliasType)
specializedTypeVarsBuffer(tpe.dealiasWiden, result)
else if (sym.isTypeParameter && sym.isSpecialized || (sym.isTypeSkolem && sym.deSkolemize.isSpecialized))
result += sym
else if (sym == ArrayClass)
args.foreach(tp => specializedTypeVarsBuffer(tp, result))
else if (!args.isEmpty)
enteringTyper {
foreach2(sym.typeParams, args) { (tp, arg) =>
if (tp.isSpecialized)
specializedTypeVarsBuffer(arg, result)
}
}
case PolyType(tparams, resTpe) => specializedTypeVarsBuffer(resTpe, result); tparams.foreach(sym => specializedTypeVarsBuffer(sym.info, result))
// since this method may be run at phase typer (before uncurry, where NMTs are eliminated)
case NullaryMethodType(resTpe) => specializedTypeVarsBuffer(resTpe, result)
case MethodType(argSyms, resTpe) => specializedTypeVarsBuffer(resTpe, result); argSyms.foreach(sym => specializedTypeVarsBuffer(sym.tpe, result))
case ExistentialType(_, res) => specializedTypeVarsBuffer(res, result)
case AnnotatedType(_, tp) => specializedTypeVarsBuffer(tp, result)
case TypeBounds(lo, hi) => specializedTypeVarsBuffer(lo, result); specializedTypeVarsBuffer(hi, result)
case RefinedType(parents, _) => parents.foreach(p => specializedTypeVarsBuffer(p, result))
case _ => ()
}
/** Returns the type parameter in the specialized class `sClass` that corresponds to type parameter
* `tparam` in the original class. It will create it if needed or use the one from the cache.
*/
private def typeParamSubAnyRef(tparam: Symbol, sClass: Symbol): Type = {
val sClassMap = anyrefSpecCache.getOrElseUpdate(sClass, mutable.Map[Symbol, Symbol]())
sClassMap.getOrElseUpdate(tparam,
tparam.cloneSymbol(sClass, tparam.flags, tparam.name append tpnme.SPECIALIZED_SUFFIX)
modifyInfo (info => TypeBounds(info.lowerBound, AnyRefTpe))
).tpe
}
/** Cleans the anyrefSpecCache of all type parameter symbols of a class.
*/
private def cleanAnyRefSpecCache(clazz: Symbol, decls: List[Symbol]): Unit = {
// remove class type parameters and those of normalized members.
clazz :: decls foreach (anyrefSpecCache remove _)
}
/** Type parameters that survive when specializing in the specified environment. */
def survivingParams(params: List[Symbol], env: TypeEnv) =
params filter {
p =>
!p.isSpecialized ||
!env.contains(p) ||
!isPrimitiveValueType(env(p))
}
/** Produces the symbols from type parameters `syms` of the original owner,
* in the given type environment `env`. The new owner is `nowner`.
*
* Non-specialized type parameters are cloned into new ones.
* Type parameters specialized on AnyRef have preexisting symbols.
*
* For instance, a @specialized(AnyRef) T, will become T\\$sp <: AnyRef.
*/
def produceTypeParameters(syms: List[Symbol], nowner: Symbol, env: TypeEnv) = {
val cloned = for (s <- syms) yield if (!env.contains(s)) s.cloneSymbol(nowner) else env(s).typeSymbol
// log("producing type params: " + cloned.map(t => (t, t.tpe.upperBound)))
foreach2(syms, cloned) { (orig, cln) =>
cln.removeAnnotation(SpecializedClass)
if (env.contains(orig))
cln modifyInfo (info => TypeBounds(info.lowerBound, AnyRefTpe))
}
cloned.foreach(_.substInfo(syms, cloned))
cloned
}
/** Maps AnyRef bindings from a raw environment (holding AnyRefs) into type parameters from
* the specialized symbol (class (specialization) or member (normalization)), leaves everything else as-is.
*/
private def mapAnyRefsInSpecSym(env: TypeEnv, origsym: Symbol, specsym: Symbol): TypeEnv = env transform {
case (sym, AnyRefTpe) if sym.owner == origsym => typeParamSubAnyRef(sym, specsym)
case (k, v) => v
}
/** Maps AnyRef bindings from a raw environment (holding AnyRefs) into type parameters from
* the original class, leaves everything else as-is.
*/
private def mapAnyRefsInOrigCls(env: TypeEnv, origcls: Symbol): TypeEnv = env transform {
case (sym, AnyRefTpe) if sym.owner == origcls => sym.tpe
case (k, v) => v
}
/** Specialize 'clazz', in the environment `outerEnv`. The outer
* environment contains bindings for specialized types of enclosing
* classes.
*
* A class C is specialized w.r.t to its own specialized type params
* `stps`, by specializing its members, and creating a new class for
* each combination of `stps`.
*/
def specializeClass(clazz: Symbol, outerEnv: TypeEnv): List[Symbol] = {
def specializedClass(env0: TypeEnv, normMembers: List[Symbol]): Symbol = {
/* It gets hard to follow all the clazz and cls, and specializedClass
* was both already used for a map and mucho long. So "sClass" is the
* specialized subclass of "clazz" throughout this file.
*/
val clazzName = specializedName(clazz, env0).toTypeName
// scala/bug#5545: Eliminate classes with the same name loaded from the bytecode already present - all we need to do is
// to force .info on them, as their lazy type will be evaluated and the symbols will be eliminated. Unfortunately
// evaluating the info after creating the specialized class will mess the specialized class signature, so we'd
// better unlink the the class-file backed symbol before creating the new class symbol
val bytecodeClazz = clazz.owner.info.decl(clazzName)
// debuglog("Specializing " + clazz + ", but found " + bytecodeClazz + " already there")
def unlink(sym: Symbol): Unit = if (sym != NoSymbol) {
devWarningIf(sym.hasCompleteInfo)("Stale specialized symbol has been accessed: " + sym)
sym.setInfo(NoType)
sym.owner.info.decls.unlink(sym)
}
unlink(bytecodeClazz)
val companionModule = bytecodeClazz.companionModule
unlink(companionModule.moduleClass)
unlink(companionModule)
val sClass = {
val sc = clazz.owner.newClass(clazzName, clazz.pos, (clazz.flags | SPECIALIZED) & ~CASE)
sc.setAnnotations(clazz.annotations)
sc
}
def cloneInSpecializedClass(member: Symbol, flagFn: Long => Long, newName: Name = null) =
member.cloneSymbol(sClass, flagFn(member.flags | SPECIALIZED), newName)
clazz.sourceFile match {
case null =>
case file =>
sClass.associatedFile = file
currentRun.symSource(sClass) = file // needed later on by mixin
}
val env = mapAnyRefsInSpecSym(env0, clazz, sClass)
typeEnv(sClass) = env
this.specializedClass.getOrElseUpdate(clazz, new mutable.AnyRefMap()).update(env0, sClass)
val decls1 = newScope // declarations of the newly specialized class 'sClass'
var oldClassTParams: List[Symbol] = Nil // original unspecialized type parameters
var newClassTParams: List[Symbol] = Nil // unspecialized type parameters of 'specializedClass' (cloned)
// has to be a val in order to be computed early. It is later called
// within 'enteringPhase(next)', which would lead to an infinite cycle otherwise
val specializedInfoType: Type = {
oldClassTParams = survivingParams(clazz.info.typeParams, env)
newClassTParams = produceTypeParameters(oldClassTParams, sClass, env) map subst(env)
// log("new tparams " + newClassTParams.zip(newClassTParams map {s => (s.tpe, s.tpe.upperBound)}) + ", in env: " + env)
def applyContext(tpe: Type) =
subst(env, tpe).instantiateTypeParams(oldClassTParams, newClassTParams.map(_.tpeHK))
/* Return a list of specialized parents to be re-mixed in a specialized subclass.
* Assuming env = [T -> Int] and
* class Integral[@specialized T] extends Numeric[T]
* and Numeric[U] is specialized on U, this produces List(Numeric$mcI).
*
* so that class Integral$mci extends Integral[Int] with Numeric$mcI.
*/
def specializedParents(parents: List[Type]): List[Type] = {
var res: List[Type] = Nil
// log(specializedClass + ": seeking specialized parents of class with parents: " + parents.map(_.typeSymbol))
for (p <- parents) {
val stp = exitingSpecialize(specializedType(p))
if (stp != p)
if (p.typeSymbol.isTrait) res ::= stp
else if (currentRun.compiles(clazz))
// TODO change to error
runReporting.warning(clazz.pos,
s"${p.typeSymbol} must be a trait. Specialized version of $clazz will inherit generic $p",
WarningCategory.Other,
clazz)
}
res
}
var parents = List(applyContext(enteringTyper(clazz.tpe_*)))
// log("!!! Parents: " + parents + ", sym: " + parents.map(_.typeSymbol))
if (parents.head.typeSymbol.isTrait)
parents = parents.head.parents.head :: parents
val extraSpecializedMixins = specializedParents(clazz.info.parents map applyContext)
if (extraSpecializedMixins.nonEmpty)
debuglog("extra specialized mixins for %s: %s".format(clazz.name.decode, extraSpecializedMixins.mkString(", ")))
// If the class being specialized has a self-type, the self type may
// require specialization. First exclude classes whose self types have
// the same type constructor as the class itself, since they will
// already be covered. Then apply the current context to the self-type
// as with the parents and assign it to typeOfThis.
if (clazz.typeOfThis.typeConstructor ne clazz.typeConstructor) {
sClass.typeOfThis = applyContext(clazz.typeOfThis)
debuglog("Rewriting self-type for specialized class:\\n" +
" " + clazz.defStringSeenAs(clazz.typeOfThis) + "\\n" +
" => " + sClass.defStringSeenAs(sClass.typeOfThis)
)
}
GenPolyType(newClassTParams, ClassInfoType(parents ::: extraSpecializedMixins, decls1, sClass))
}
exitingSpecialize(sClass setInfo specializedInfoType)
val fullEnv = outerEnv ++ env
/* Enter 'sym' in the scope of the current specialized class. Its type is
* mapped through the active environment, binding type variables to concrete
* types. The existing typeEnv for `sym` is composed with the current active
* environment
*/
def enterMember(sym: Symbol): Symbol = {
typeEnv(sym) = fullEnv ++ typeEnv(sym) // append the full environment
sym.modifyInfo(_.substThis(clazz, sClass).instantiateTypeParams(oldClassTParams, newClassTParams.map(_.tpeHK)))
// we remove any default parameters. At this point, they have been all
// resolved by the type checker. Later on, erasure re-typechecks everything and
// chokes if it finds default parameters for specialized members, even though
// they are never needed.
foreachParamss(sym)(_ resetFlag DEFAULTPARAM)
decls1 enter subst(fullEnv)(sym)
}
/* Create and enter in scope an overridden symbol m1 for `m` that forwards
* to `om`. `om` is a fresh, special overload of m1 that is an implementation
* of `m`. For example, for a
*
* class Foo[@specialized A] {
* def m(x: A) = <body> // m
* }
* , for class Foo$I extends Foo[Int], this method enters two new symbols in
* the scope of Foo$I:
*
* def m(x: Int) = m$I(x) // m1
* def m$I(x: Int) = <body>/adapted to env {A -> Int} // om
*/
def forwardToOverload(m: Symbol): Symbol = {
val specMember = enterMember(cloneInSpecializedClass(m, f => (f | OVERRIDE) & ~(DEFERRED | CASEACCESSOR)))
val om = specializedOverload(sClass, m, env).setFlag(OVERRIDE)
val original = info.get(m) match {
case Some(NormalizedMember(tg)) => tg
case _ => m
}
info(specMember) = Forward(om)
info(om) = if (original.isDeferred) Forward(original) else Implementation(original)
typeEnv(om) = env ++ typeEnv(m) // add the environment for any method tparams
newOverload(specMember, om, typeEnv(om))
enterMember(om)
}
@tailrec def isTraitValSetter(sym: Symbol): Boolean =
sym.isSetter && sym.getterIn(sym.owner).isStable &&
(sym.hasFlag(SYNTHESIZE_IMPL_IN_SUBCLASS) || isTraitValSetter(sym.nextOverriddenSymbol))
for (m <- normMembers) {
if (!needsSpecialization(fullEnv, m)) {
if (m.isValue && !m.isMutable && !m.isMethod && !m.isDeferred && !m.isLazy && !m.isParamAccessor) {
// non-specialized `val` fields are made mutable (in Constructors) and assigned from the
// constructors of specialized subclasses. See PR scala/scala#9704.
clazz.primaryConstructor.updateAttachment(ConstructorNeedsFence)
sClass.primaryConstructor.updateAttachment(ConstructorNeedsFence)
}
} else if (satisfiable(fullEnv)) {
if (!m.isDeferred)
addConcreteSpecMethod(m)
// specialized members have to be overridable.
if (m.isPrivate)
m.resetFlag(PRIVATE).setFlag(PROTECTED)
if (m.isConstructor) {
val specCtor = enterMember(cloneInSpecializedClass(m, x => x))
info(specCtor) = Forward(m)
}
else if (isNormalizedMember(m)) { // methods added by normalization
val NormalizedMember(original) = info(m): @unchecked
if (nonConflicting(env ++ typeEnv(m))) {
if (info(m).degenerate) {
debuglog("degenerate normalized member " + m.defString)
val specMember = enterMember(cloneInSpecializedClass(m, _ & ~DEFERRED))
info(specMember) = Implementation(original)
typeEnv(specMember) = env ++ typeEnv(m)
} else {
val om = forwardToOverload(m)
debuglog("normalizedMember " + m + " om: " + om + " " + pp(typeEnv(om)))
}
}
else
debuglog("conflicting env for " + m + " env: " + env)
}
else if (m.isDeferred && m.isSpecialized) { // abstract methods
val specMember = enterMember(cloneInSpecializedClass(m, _ | DEFERRED))
// debuglog("deferred " + specMember.fullName + " remains abstract")
info(specMember) = Abstract(specMember)
// was: new Forward(specMember) {
// override def target = m.owner.info.member(specializedName(m, env))
// }
} else if (m.hasFlag(SUPERACCESSOR)) { // basically same as abstract case
// we don't emit a specialized overload for the super accessor because we can't jump back and forth
// between specialized and non-specialized methods during an invokespecial for the super call,
// so, we must jump immediately into the non-specialized world to find our super
val specMember = enterMember(cloneInSpecializedClass(m, f => f))
// rebindSuper in mixins knows how to rejigger this
// (basically it skips this specialized class in the base class seq, and then also never rebinds to a specialized method)
specMember.asInstanceOf[TermSymbol].referenced = m.alias
info(specMember) = SpecialSuperAccessor(specMember)
} else if (m.isMethod && !m.isDeferred && (!m.isAccessor || m.isLazy || isTraitValSetter(m))) { // other concrete methods
forwardToOverload(m)
} else if (m.isValue && !m.isMethod) { // concrete value definition
def mkAccessor(field: Symbol, name: Name) = {
val newFlags = (SPECIALIZED | m.getterIn(clazz).flags) & ~(LOCAL | CASEACCESSOR | PARAMACCESSOR)
// we rely on the super class to initialize param accessors
val sym = sClass.newMethod(name.toTermName, field.pos, newFlags)
info(sym) = SpecializedAccessor(field)
sym
}
def overrideIn(clazz: Symbol, sym: Symbol) = {
val newFlags = (sym.flags | OVERRIDE | SPECIALIZED) & ~(DEFERRED | CASEACCESSOR | PARAMACCESSOR)
val sym1 = sym.cloneSymbol(clazz, newFlags)
sym1.modifyInfo(_.asSeenFrom(clazz.tpe, sym1.owner))
}
val specVal = specializedOverload(sClass, m, env)
addConcreteSpecMethod(m)
specVal.asInstanceOf[TermSymbol].setAlias(m)
enterMember(specVal)
// create accessors
if (m.isLazy) {
// no getters needed (we'll specialize the compute method and accessor separately), can stay private
// m.setFlag(PRIVATE) -- TODO: figure out how to leave the non-specialized lazy var private
// (the implementation needs it to be visible while duplicating and retypechecking,
// but it really could be private in bytecode)
specVal.setFlag(PRIVATE)
}
else if (nme.isLocalName(m.name)) {
val specGetter = mkAccessor(specVal, specVal.getterName) setInfo MethodType(Nil, specVal.info)
val origGetter = overrideIn(sClass, m.getterIn(clazz))
info(origGetter) = Forward(specGetter)
enterMember(specGetter)
enterMember(origGetter)
debuglog("specialize accessor in %s: %s -> %s".format(sClass.name.decode, origGetter.name.decode, specGetter.name.decode))
clazz.caseFieldAccessors.find(_.name.startsWith(m.name)) foreach { cfa =>
val cfaGetter = overrideIn(sClass, cfa)
info(cfaGetter) = SpecializedAccessor(specVal)
enterMember(cfaGetter)
debuglog("override case field accessor %s -> %s".format(m.name.decode, cfaGetter.name.decode))
}
if (specVal.isVariable && m.setterIn(clazz) != NoSymbol) {
val specSetter = mkAccessor(specVal, specGetter.setterName)
.resetFlag(STABLE)
specSetter.setInfo(MethodType(specSetter.newSyntheticValueParams(List(specVal.info)),
UnitTpe))
val origSetter = overrideIn(sClass, m.setterIn(clazz))
info(origSetter) = Forward(specSetter)
enterMember(specSetter)
enterMember(origSetter)
}
}
else { // if there are no accessors, specialized methods will need to access this field in specialized subclasses
m.resetFlag(PRIVATE)
specVal.resetFlag(PRIVATE)
debuglog("no accessors for %s/%s, specialized methods must access field in subclass".format(
m.name.decode, specVal.name.decode))
}
}
else if (m.isClass) {
val specClass: Symbol = cloneInSpecializedClass(m, x => x)
typeEnv(specClass) = fullEnv
specClass setName specializedName(specClass, fullEnv).toTypeName
enterMember(specClass)
debuglog("entered specialized class " + specClass.fullName)
info(specClass) = SpecializedInnerClass(m, fullEnv)
}
}
}
sClass
}
val decls1 = clazz.info.decls.toList flatMap { m: Symbol =>
if (m.isAnonymousClass) List(m) else {
normalizeMember(m.owner, m, outerEnv) flatMap { normalizedMember =>
val ms = specializeMember(m.owner, normalizedMember, outerEnv, clazz.info.typeParams)
// interface traits have concrete members now
if (ms.nonEmpty && clazz.isTrait && clazz.isInterface)
clazz.resetFlag(INTERFACE)
if (normalizedMember.isMethod) {
val newTpe = subst(outerEnv, normalizedMember.info)
// only do it when necessary, otherwise the method type might be at a later phase already
if (newTpe != normalizedMember.info) {
normalizedMember updateInfo newTpe
}
}
normalizedMember :: ms
}
}
}
val subclasses = specializations(clazz.info.typeParams) filter satisfiable
subclasses foreach {
env =>
val spc = specializedClass(env, decls1)
val existing = clazz.owner.info.decl(spc.name)
// a symbol for the specialized class already exists if there's a classfile for it.
// keeping both crashes the compiler on test/files/pos/spec-Function1.scala
if (existing != NoSymbol)
clazz.owner.info.decls.unlink(existing)
exitingSpecialize(clazz.owner.info.decls enter spc) //!!! assumes fully specialized classes
}
if (subclasses.nonEmpty) clazz.resetFlag(FINAL)
cleanAnyRefSpecCache(clazz, decls1)
decls1
}
/** Expand member `sym` to a set of normalized members. Normalized members
* are monomorphic or polymorphic only in non-specialized types.
*
* Given method m[@specialized T, U](x: T, y: U) it returns
* m[T, U](x: T, y: U),
* m\\$I[ U](x: Int, y: U),
* m\\$D[ U](x: Double, y: U)
* // etc.
*/
private def normalizeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv): List[Symbol] = {
sym :: (
if (!sym.isMethod || enteringTyper(sym.typeParams.isEmpty)) Nil
else if (sym.hasDefault) {
/* Specializing default getters is useless, also see scala/bug#7329 . */
sym.resetFlag(SPECIALIZED)
Nil
} else {
// debuglog("normalizeMember: " + sym.fullNameAsName('.').decode)
var specializingOn = specializedParams(sym)
val unusedStvars = specializingOn filterNot specializedTypeVars(sym.info)
// I think the last condition should be !sym.isArtifact, but that made the
// compiler start warning about Tuple1.scala and Tuple2.scala claiming
// their type parameters are used in non-specializable positions. Why is
// unusedStvars.nonEmpty for these classes???
if (unusedStvars.nonEmpty && currentRun.compiles(sym) && !sym.isSynthetic) {
runReporting.warning(sym.pos,
"%s %s unused or used in non-specializable positions.".format(
unusedStvars.mkString("", ", ", ""),
if (unusedStvars.lengthIs == 1) "is" else "are"),
WarningCategory.Other,
sym)
unusedStvars foreach (_ removeAnnotation SpecializedClass)
specializingOn = specializingOn filterNot (unusedStvars contains _)
}
for (env0 <- specializations(specializingOn) if needsSpecialization(env0, sym)) yield {
// !!! Can't this logic be structured so that the new symbol's name is
// known when the symbol is cloned? It is much cleaner not to be mutating
// names after the fact. And it adds about a billion lines of
// "Renaming value _1 in class Tuple2 to _1$mcZ$sp" to obscure the small
// number of other (important) actual symbol renamings.
val tps = survivingParams(sym.info.typeParams, env0)
val specMember = sym.cloneSymbol(owner, (sym.flags | SPECIALIZED) & ~DEFERRED) // <-- this needs newName = ...
val env = mapAnyRefsInSpecSym(env0, sym, specMember)
val (keys, vals) = env.toList.unzip
specMember setName specializedName(sym, env) // <-- but the name is calculated based on the cloned symbol
// debuglog("%s normalizes to %s%s".format(sym, specMember,
// if (tps.isEmpty) "" else " with params " + tps.mkString(", ")))
typeEnv(specMember) = outerEnv ++ env
val tps1 = produceTypeParameters(tps, specMember, env)
tps1 foreach (_ modifyInfo (_.instantiateTypeParams(keys, vals)))
// the cloneInfo is necessary so that method parameter symbols are cloned at the new owner
val methodType = sym.info.resultType.instantiateTypeParams(keys ++ tps, vals ++ tps1.map(_.tpeHK)).cloneInfo(specMember)
specMember setInfo GenPolyType(tps1, methodType)
debuglog("%s expands to %s in %s".format(sym, specMember.name.decode, pp(env)))
info(specMember) = NormalizedMember(sym)
newOverload(sym, specMember, env)
specMember
}
}
)
}
// concise printing of type env
private def pp(env: TypeEnv): String = {
env.toList.sortBy(_._1.name).map {
case (k, v) =>
val vsym = v.typeSymbol
if (k == vsym) "" + k.name
else "" + k.name + ":" + vsym.name
}.mkString("env(", ", ", ")")
}
/** Specialize member `m` w.r.t. to the outer environment and the type
* parameters of the innermost enclosing class.
*
* Turns 'private' into 'protected' for members that need specialization.
*
* Return a list of symbols that are specializations of 'sym', owned by 'owner'.
*/
private def specializeMember(owner: Symbol, sym: Symbol, outerEnv: TypeEnv, tps: List[Symbol]): List[Symbol] = {
def specializeOn(tparams: List[Symbol]): List[Symbol] = specializations(tparams) map { spec0 =>
val spec = mapAnyRefsInOrigCls(spec0, owner)
if (sym.isPrivate) {
sym.resetFlag(PRIVATE).setFlag(PROTECTED)
debuglog("Set %s to private[%s]".format(sym, sym.enclosingPackage))
}
val specMember = subst(outerEnv)(specializedOverload(owner, sym, spec))
typeEnv(specMember) = typeEnv(sym) ++ outerEnv ++ spec
wasSpecializedForTypeVars(specMember) ++= spec collect { case (s, tp) if s.tpe == tp => s }
val wasSpec = wasSpecializedForTypeVars(specMember)
if (wasSpec.nonEmpty)
debuglog("specialized overload for %s in %s".format(specMember, pp(typeEnv(specMember))))
newOverload(sym, specMember, spec)
info(specMember) = SpecialOverload(sym, typeEnv(specMember))
specMember
}
if (!sym.isMethod || sym.isConstructor || hasUnspecializableAnnotation(sym) || sym.isSuperAccessor) {
Nil
} else {
val stvars = specializedTypeVars(sym)
if (stvars.nonEmpty)
debuglog("specialized %s on %s".format(sym.fullLocationString, stvars.map(_.name).mkString(", ")))
if (!sym.isDeferred)
addConcreteSpecMethod(sym)
specializeOn(tps filter stvars)
}
}
/** Return the specialized overload of `m`, in the given environment. */
private def specializedOverload(owner: Symbol, sym: Symbol, env: TypeEnv, nameSymbol: Symbol = NoSymbol): Symbol = {
val newFlags = (sym.flags | SPECIALIZED) & ~(DEFERRED | CASEACCESSOR | LAZY)
// this method properly duplicates the symbol's info
val specname = specializedName(nameSymbol orElse sym, env)
( sym.cloneSymbol(owner, newFlags, newName = specname)
modifyInfo (info => subst(env, info.asSeenFrom(owner.thisType, sym.owner)))
)
}
/** For each method m that overrides an inherited method m', add a special
* overload method `om` that overrides the corresponding overload in the
* superclass. For the following example:
*
* class IntFun extends Function1[Int, Int] {
* def apply(x: Int): Int = ..
* }
*
* this method will return List('apply\\$mcII\\$sp')
*/
private def specialOverrides(clazz: Symbol) = logResultIf[List[Symbol]]("specialized overrides in " + clazz, _.nonEmpty) {
/* Return the overridden symbol in syms that needs a specialized overriding symbol,
* together with its specialization environment. The overridden symbol may not be
* the closest to 'overriding', in a given hierarchy.
*
* An method m needs a special override if
* * m overrides a method whose type contains specialized type variables
* * there is a valid specialization environment that maps the overridden method type to m's type.
*/
@nowarn("cat=lint-nonlocal-return")
def needsSpecialOverride(overriding: Symbol): (Symbol, TypeEnv) = {
def checkOverriddenTParams(overridden: Symbol): Unit = {
foreach2(overridden.info.typeParams, overriding.info.typeParams) { (baseTvar, derivedTvar) =>
val missing = concreteTypes(baseTvar).toSet diff concreteTypes(derivedTvar).toSet
if (missing.nonEmpty) {
reporter.error(derivedTvar.pos,
"Type parameter has to be specialized at least for the same types as in the overridden method. Missing "
+ "types: " + missing.mkString("", ", ", "")
)
}
}
}
if (!overriding.isParamAccessor) {
for (overridden <- overriding.allOverriddenSymbols) {
val stvars = specializedTypeVars(overridden.info)
if (stvars.nonEmpty) {
debuglog("specialized override of %s by %s%s".format(overridden.fullLocationString, overriding.fullLocationString,
if (stvars.isEmpty) "" else stvars.map(_.name).mkString("(", ", ", ")")))
if (currentRun compiles overriding)
checkOverriddenTParams(overridden)
val env = unify(overridden.info, overriding.info, emptyEnv, false, true)
def atNext = exitingSpecialize(overridden.owner.info.decl(specializedName(overridden, env)))
if (TypeEnv.restrict(env, stvars).nonEmpty && TypeEnv.isValid(env, overridden) && atNext != NoSymbol) {
debuglog(" " + pp(env) + " found " + atNext)
return (overridden, env)
}
}
}
}
(NoSymbol, emptyEnv)
}
(clazz.info.decls flatMap { overriding =>
needsSpecialOverride(overriding) match {
case (NoSymbol, _) =>
// run/t4996.scala, see the amazing commit message in 9733f56
if (overriding.isSuperAccessor) {
val alias = overriding.alias
debuglog(s"checking special overload for super accessor: ${overriding.fullName}, alias for ${alias.fullName}")
needsSpecialOverride(alias) match {
case nope @ (NoSymbol, _) => None
case (overridden, env) =>
val om = specializedOverload(clazz, overriding, env, overridden)
om.setName(nme.superName(om.name))
om.asInstanceOf[TermSymbol].setAlias(info(alias).target)
om.owner.info.decls.enter(om)
info(om) = SpecialSuperAccessor(om)
om.makeNotPrivate(om.owner)
newOverload(overriding, om, env)
Some(om)
}
} else None
case (overridden, env) =>
val om = specializedOverload(clazz, overridden, env)
clazz.info.decls.enter(om)
foreachWithIndex(om.paramss) { (params, i) =>
foreachWithIndex(params) { (param, j) =>
param.name = overriding.paramss(i)(j).name // scala/bug#6555 Retain the parameter names from the subclass.
}
}
debuglog(s"specialized overload $om for ${overriding.name.decode} in ${pp(env)}: ${om.info}")
om.setFlag(overriding.flags & (ABSOVERRIDE | SYNCHRONIZED))
om.withAnnotations(overriding.annotations.filter(_.symbol == ScalaStrictFPAttr))
typeEnv(om) = env
addConcreteSpecMethod(overriding)
if (overriding.isDeferred) { // abstract override
debuglog("abstract override " + overriding.fullName + " with specialized " + om.fullName)
info(om) = Forward(overriding)
}
else {
// if the override is a normalized member, 'om' gets the
// implementation from its original target, and adds the
// environment of the normalized member (that is, any
// specialized /method/ type parameter bindings)
info get overriding match {
case Some(NormalizedMember(target)) =>
typeEnv(om) = env ++ typeEnv(overriding)
info(om) = Forward(target)
case _ =>
info(om) = SpecialOverride(overriding)
}
info(overriding) = Forward(om setPos overriding.pos)
}
newOverload(overriding, om, env)
ifDebug(exitingSpecialize(assert(
overridden.owner.info.decl(om.name) != NoSymbol,
"Could not find " + om.name + " in " + overridden.owner.info.decls))
)
Some(om)
}
}).toList
}
case object UnifyError extends scala.util.control.ControlThrowable
private[this] def unifyError(tp1: Any, tp2: Any): Nothing = {
log("unifyError" + ((tp1, tp2)))
throw UnifyError
}
/** Return the most general type environment that specializes tp1 to tp2.
* It only allows binding of type parameters annotated with @specialized.
* Fails if such an environment cannot be found.
*
* If `strict` is true, a UnifyError is thrown if unification is impossible.
*
* If `tparams` is true, then the methods tries to unify over type params in polytypes as well.
*/
private def unify(tp1: Type, tp2: Type, env: TypeEnv, strict: Boolean, tparams: Boolean): TypeEnv = (tp1, tp2) match {
case (TypeRef(_, sym1, _), _) if sym1.isSpecialized =>
debuglog(s"Unify $tp1, $tp2")
if (isPrimitiveValueClass(tp2.typeSymbol) || isSpecializedAnyRefSubtype(tp2, sym1))
env + ((sym1, tp2))
else if (isSpecializedAnyRefSubtype(tp2, sym1))
env + ((sym1, tp2))
else if (strict)
unifyError(tp1, tp2)
else
env
case (TypeRef(_, sym1, args1), TypeRef(_, sym2, args2)) =>
if (args1.nonEmpty || args2.nonEmpty)
debuglog(s"Unify types $tp1 and $tp2")
if (strict && args1.length != args2.length) unifyError(tp1, tp2)
val e = unify(args1, args2, env, strict)
if (e.nonEmpty) debuglog(s"unified to: $e")
e
case (TypeRef(_, sym1, _), _) if sym1.isTypeParameterOrSkolem =>
env
case (MethodType(params1, res1), MethodType(params2, res2)) =>
if (strict && params1.length != params2.length) unifyError(tp1, tp2)
debuglog(s"Unify methods $tp1 and $tp2")
val env1 = unifyAux(res1, res2, env, strict)
if (params1.isEmpty) env1
else
foldLeft2(params1, params2)(env1){ (e, p1, p2) => unifyAux(p1.tpe, p2.tpe, e, strict) }
case (PolyType(tparams1, res1), PolyType(tparams2, res2)) =>
debuglog(s"Unify polytypes $tp1 and $tp2")
if (strict && tparams1.length != tparams2.length)
unifyError(tp1, tp2)
else if (tparams && tparams1.sizeCompare(tparams2) == 0) {
val env1 = unifyAux(res1, res2, env, strict)
if (tparams1.isEmpty) env1
else
foldLeft2(tparams1, tparams2)(env1){ (e, tp1, tp2) => unifyAux(tp1.info, tp2.info, e, strict) }
}
else
unify(res1, res2, env, strict = strict, tparams = false)
case (TypeBounds(lo1, hi1), TypeBounds(lo2, hi2)) =>
val env1 = unifyAux(lo1, lo2, env, strict)
unifyAux(hi1, hi2, env1, strict)
case (PolyType(_, res), other) => unify(res, other, env, strict, tparams = false)
case (ThisType(_), ThisType(_)) => env
case (_, SingleType(_, _)) => unify(tp1, tp2.underlying, env, strict, tparams = false)
case (SingleType(_, _), _) => unify(tp1.underlying, tp2, env, strict, tparams = false)
case (ThisType(_), _) => unify(tp1.widen, tp2, env, strict, tparams = false)
case (_, ThisType(_)) => unify(tp1, tp2.widen, env, strict, tparams = false)
case (RefinedType(_, _), RefinedType(_, _)) => env
case (AnnotatedType(_, tp1), tp2) => unify(tp2, tp1, env, strict, tparams = false)
case (ExistentialType(_, res1), _) => unify(tp2, res1, env, strict, tparams = false)
case _ =>
debuglog(s"don't know how to unify $tp1 [${tp1.getClass}] with $tp2 [${tp2.getClass}]")
env
}
private def unify(tp1: List[Type], tp2: List[Type], env: TypeEnv, strict: Boolean): TypeEnv =
if (tp1.isEmpty || tp2.isEmpty) env
else foldLeft2(tp1, tp2)(env) { (env, arg1, arg2) =>
unifyAux(arg1, arg2, env, strict)
}
private def unifyAux(arg1: Type, arg2: Type, env: TypeEnv, strict: Boolean): TypeEnv =
if (!strict) unify(arg1, arg2, env, strict, tparams = false)
else {
val nenv = unify(arg1, arg2, emptyEnv, strict, tparams = false)
if (env.keySet.intersect(nenv.keySet).isEmpty) env ++ nenv
else {
debuglog(s"could not unify: u($arg1, $arg2) yields $nenv, env: $env")
unifyError(arg1, arg2)
}
}
/** Apply the type environment 'env' to the given type. All type
* bindings are supposed to be to primitive types. A type variable
* that is annotated with 'uncheckedVariance' is mapped to the corresponding
* primitive type losing the annotation.
*/
private def subst(env: TypeEnv, tpe: Type): Type = {
class FullTypeMap(from: List[Symbol], to: List[Type]) extends SubstTypeMap(from, to) with AnnotationFilter {
def keepAnnotation(annot: AnnotationInfo) = !(annot matches uncheckedVarianceClass)
override def mapOver(tp: Type): Type = tp match {
case ClassInfoType(parents, decls, clazz) =>
val parents1 = parents mapConserve this
val decls1 = mapOver(decls)
if ((parents1 eq parents) && (decls1 eq decls)) tp
else ClassInfoType(parents1, decls1, clazz)
case _ =>
super.mapOver(tp)
}
}
val (keys, values) = env.toList.unzip
(new FullTypeMap(keys, values))(tpe)
}
private def subst(env: TypeEnv)(decl: Symbol): Symbol =
decl modifyInfo (info =>
if (decl.isConstructor) MethodType(subst(env, info).params, decl.owner.tpe_*)
else subst(env, info)
)
private def unspecializableClass(tp: Type) = (
isRepeatedParamType(tp) // ???
|| tp.typeSymbol.isJavaDefined
|| tp.typeSymbol.isPackageClass
)
/** Type transformation. It is applied to all symbols, compiled or loaded.
* If it is a 'no-specialization' run, it is applied only to loaded symbols.
*/
override def transformInfo(sym: Symbol, tpe: Type): Type = {
if (settings.nospecialization && currentRun.compiles(sym)) {
tpe
} else tpe.resultType match {
case cinfo @ ClassInfoType(parents, decls, clazz) if !unspecializableClass(cinfo) =>
if (!currentRun.compiles(sym) && isPast(ownPhase)) {
// Skip specialization info transform for third party classes that aren't referenced directly
// from the tree or by the specialization info transform itself that are run up to the end of
// the specialization phase.
//
// As a special case, we unconditionally specialize Function and Tuple classes above in `Phase#apply`
// as the backend needs to know about these for code it inlines to enable box- and null-check elimination.
tpe
} else {
val tparams = tpe.typeParams
if (tparams.isEmpty)
exitingSpecialize(parents map (_.typeSymbol.info))
val parents1 = parents mapConserve specializedType
if (parents ne parents1) {
debuglog("specialization transforms %s%s parents to %s".format(
if (tparams.nonEmpty) "(poly) " else "", clazz, parents1)
)
}
val newScope = newScopeWith(specializeClass(clazz, typeEnv(clazz)) ++ specialOverrides(clazz): _*)
// If tparams.isEmpty, this is just the ClassInfoType.
GenPolyType(tparams, ClassInfoType(parents1, newScope, clazz))
}
case _ =>
tpe
}
}
/** Is any type variable in `env` conflicting with any if its type bounds, when
* type bindings in `env` are taken into account?
*
* A conflicting type environment could still be satisfiable.
*/
def nonConflicting(env: TypeEnv) = env forall { case (tvar, tpe) =>
(subst(env, tvar.info.lowerBound) <:< tpe) && (tpe <:< subst(env, tvar.info.upperBound))
}
/** The type environment is sound w.r.t. to all type bounds or only soft
* conflicts appear. An environment is sound if all bindings are within
* the bounds of the given type variable. A soft conflict is a binding
* that does not fall within the bounds, but whose bounds contain
* type variables that are @specialized, (that could become satisfiable).
*/
def satisfiable(env: TypeEnv): Boolean = satisfiable(env, false)
def satisfiable(env: TypeEnv, warnings: Boolean): Boolean = {
def matches(tpe1: Type, tpe2: Type): Boolean = (tpe2 == AnyTpe) || { // opt for common case of unbounded type parameter
val t1 = subst(env, tpe1)
val t2 = subst(env, tpe2)
((t1 <:< t2)
|| specializedTypeVars(t1).nonEmpty
|| specializedTypeVars(t2).nonEmpty)
}
env forall { case (tvar, tpe) =>
matches(tvar.info.lowerBound, tpe) && matches(tpe, tvar.info.upperBound) || {
if (warnings)
runReporting.warning(tvar.pos, s"Bounds prevent specialization of $tvar", WarningCategory.Other, tvar)
debuglog("specvars: " +
tvar.info.lowerBound + ": " +
specializedTypeVars(tvar.info.lowerBound) + " " +
subst(env, tvar.info.upperBound) + ": " +
specializedTypeVars(subst(env, tvar.info.upperBound))
)
false
}
}
}
def satisfiabilityConstraints(env: TypeEnv): Option[TypeEnv] = {
val noconstraints = Some(emptyEnv)
def matches(tpe1: Type, tpe2: Type): Option[TypeEnv] = {
val t1 = subst(env, tpe1)
val t2 = subst(env, tpe2)
// log("---------> " + tpe1 + " matches " + tpe2)
// log(t1 + ", " + specializedTypeVars(t1))
// log(t2 + ", " + specializedTypeVars(t2))
// log("unify: " + unify(t1, t2, env, false, false) + " in " + env)
if (t1 <:< t2) noconstraints
else if (specializedTypeVars(t1).nonEmpty) Some(unify(t1, t2, env, false, false) -- env.keys)
else if (specializedTypeVars(t2).nonEmpty) Some(unify(t2, t1, env, false, false) -- env.keys)
else None
}
env.foldLeft[Option[TypeEnv]](noconstraints) {
case (constraints, (tvar, tpe)) =>
val loconstraints = matches(tvar.info.lowerBound, tpe)
val hiconstraints = matches(tpe, tvar.info.upperBound)
val allconstraints = for (c <- constraints; l <- loconstraints; h <- hiconstraints) yield c ++ l ++ h
allconstraints
}
}
/** This duplicator additionally performs casts of expressions if that is allowed by the `casts` map. */
class Duplicator(casts: Map[Symbol, Type]) extends {
val global: SpecializeTypes.this.global.type = SpecializeTypes.this.global
} with typechecker.Duplicators {
private val (castfrom, castto) = casts.unzip
private object CastMap extends SubstTypeMap(castfrom.toList, castto.toList)
@nowarn("""cat=deprecation&origin=scala\\.tools\\.nsc\\.transform\\.SpecializeTypes\\.Duplicator\\.BodyDuplicator""")
final type SpecializeBodyDuplicator = BodyDuplicator
@nowarn("msg=shadowing a nested class of a parent is deprecated")
@deprecated("use SpecializeBodyDuplicator instead", since = "2.13.4")
class BodyDuplicator(_context: Context) extends super.BodyDuplicator(_context) {
override def castType(tree: Tree, pt: Type): Tree = {
tree modifyType fixType
// log(" tree type: " + tree.tpe)
val ntree = if (tree.tpe != null && !(tree.tpe <:< pt)) {
val casttpe = CastMap(tree.tpe)
if (casttpe <:< pt) gen.mkCast(tree, casttpe)
else if (casttpe <:< CastMap(pt)) gen.mkCast(tree, pt)
else tree
} else tree
ntree.clearType()
}
}
private class SpecializeNamer(context: Context) extends Namer(context) {
// Avoid entering synthetic trees during specialization because the duplicated trees already contain them.
override def enterSyntheticSym(tree: Tree): Symbol = tree.symbol
}
protected override def newBodyDuplicator(context: Context): SpecializeBodyDuplicator =
new SpecializeBodyDuplicator(context)
override def newNamer(context: Context): Namer =
new SpecializeNamer(context)
}
/** Introduced to fix scala/bug#7343: Phase ordering problem between Duplicators and Specialization.
* brief explanation: specialization rewires class parents during info transformation, and
* the new info then guides the tree changes. But if a symbol is created during duplication,
* which runs after specialization, its info is not visited and thus the corresponding tree
* is not specialized. One manifestation is the following:
* {{{
* object Test {
* class Parent[@specialized(Int) T]
*
* def spec_method[@specialized(Int) T](t: T, expectedXSuper: String) = {
* class X extends Parent[T]()
* // even in the specialized variant, the local X class
* // doesn't extend Parent\\$mcI\\$sp, since its symbol has
* // been created after specialization and was not seen
* // by specialization's info transformer.
* ...
* }
* }
* }}}
* We fix this by forcing duplication to take place before specialization.
*
* Note: The constructors phase (which also uses duplication) comes after erasure and uses the
* post-erasure typer => we must protect it from the beforeSpecialization phase shifting.
*/
class SpecializationDuplicator(casts: Map[Symbol, Type]) extends Duplicator(casts) {
override def retyped(context: Context, tree: Tree, oldThis: Symbol, newThis: Symbol, env: scala.collection.Map[Symbol, Type]): Tree =
enteringSpecialize(super.retyped(context, tree, oldThis, newThis, env))
}
/** A tree symbol substituter that substitutes on type skolems.
* If a type parameter is a skolem, it looks for the original
* symbol in the 'from' and maps it to the corresponding new
* symbol. The new symbol should probably be a type skolem as
* well (not enforced).
*
* All private members are made protected in order to be accessible from
* specialized classes.
*/
class ImplementationAdapter(from: List[Symbol],
to: List[Symbol],
targetClass: Symbol,
addressFields: Boolean) extends TreeSymSubstituter(from, to) {
override val symSubst = new SubstSymMap(from, to) {
override def matches(sym1: Symbol, sym2: Symbol) =
if (sym2.isTypeSkolem) sym2.deSkolemize eq sym1
else sym1 eq sym2
}
private def isAccessible(sym: Symbol): Boolean =
if (currentOwner.isAnonymousFunction) {
if (inlineFunctionExpansion) devWarning("anonymous function made it to specialization even though inline expansion is set.")
false
}
else (currentClass == sym.owner.enclClass) && (currentClass != targetClass)
private def shouldMakePublic(sym: Symbol): Boolean =
sym.hasFlag(PRIVATE | PROTECTED) && (addressFields || !nme.isLocalName(sym.name))
/** All private members that are referenced are made protected,
* in order to be accessible from specialized subclasses.
*/
override def transform(tree: Tree): Tree = tree match {
case Select(_, _) =>
val sym = tree.symbol
if (sym.isPrivate) debuglog(
"seeing private member %s, currentClass: %s, owner: %s, isAccessible: %b, isLocalName: %b".format(
sym, currentClass, sym.owner.enclClass, isAccessible(sym), nme.isLocalName(sym.name))
)
if (shouldMakePublic(sym) && !isAccessible(sym)) {
debuglog(s"changing private flag of $sym")
sym.makeNotPrivate(sym.owner)
}
super.transform(tree)
case _ =>
super.transform(tree)
}
}
/** Return the generic class corresponding to this specialized class. */
def originalClass(clazz: Symbol): Symbol =
if (clazz.isSpecialized) {
val (originalName, _, _) = nme.splitSpecializedName(clazz.name)
clazz.owner.info.decl(originalName).suchThat(_.isClass)
} else NoSymbol
def illegalSpecializedInheritance(clazz: Symbol): Boolean = (
clazz.isSpecialized
&& originalClass(clazz).parentSymbolsIterator.exists(p => hasSpecializedParams(p) && !p.isTrait)
)
class SpecializationTransformer(unit: CompilationUnit) extends TypingTransformer(unit) {
override def transformUnit(unit: CompilationUnit): Unit = if (!settings.nospecialization) {
informProgress("specializing " + unit)
try {
exitingSpecialize(super.transformUnit(unit))
} catch {
case te: TypeError =>
reporter.error(te.pos, te.msg)
}
}
/** Map a specializable method to its rhs, when not deferred. */
val body = new mutable.AnyRefMap[Symbol, Tree]()
/** Map a specializable method to its value parameter symbols. */
val parameters = new mutable.AnyRefMap[Symbol, List[Symbol]]()
/** Collect method bodies that are concrete specialized methods.
*/
class CollectMethodBodies extends InternalTraverser {
override def traverse(tree: Tree) = tree match {
case DefDef(_, _, _, vparams :: Nil, _, rhs) =>
if (concreteSpecMethods(tree.symbol) || tree.symbol.isConstructor) {
// debuglog("!!! adding body of a defdef %s, symbol %s: %s".format(tree, tree.symbol, rhs))
body(tree.symbol) = rhs
// body(tree.symbol) = tree // whole method
parameters(tree.symbol) = vparams.map(_.symbol)
concreteSpecMethods -= tree.symbol
} // no need to descend further down inside method bodies
case ValDef(mods, name, tpt, rhs) if concreteSpecMethods(tree.symbol) =>
body(tree.symbol) = rhs
// log("!!! adding body of a valdef " + tree.symbol + ": " + rhs)
//super.traverse(tree)
case _ =>
tree.traverse(this)
}
}
def doesConform(origSymbol: Symbol, treeType: Type, memberType: Type, env: TypeEnv) = {
(treeType =:= memberType) || { // anyref specialization
memberType match {
case PolyType(_, resTpe) =>
debuglog(s"Conformance for anyref - polytype with result type: $resTpe and $treeType\\nOrig. sym.: $origSymbol")
try {
val e = unify(origSymbol.tpe, memberType, emptyEnv, strict = true, tparams = false)
debuglog(s"obtained env: $e")
e.keySet == env.keySet
} catch {
case _: Throwable =>
debuglog("Could not unify.")
false
}
case _ => false
}
}
}
override def transform(tree: Tree): Tree = {
val symbol = tree.symbol
/* The specialized symbol of 'tree.symbol' for tree.tpe, if there is one */
def specSym(qual: Tree): Symbol = {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, strict = false, tparams = false)
def isMatch(member: Symbol) = {
val memberType = qual.tpe memberType member
val residualTreeType = tree match {
case TypeApply(fun, targs) if fun.symbol == symbol =>
// scala/bug#6308 Handle methods with only some type parameters specialized.
// drop the specialized type parameters from the PolyType, and
// substitute in the type environment.
val GenPolyType(tparams, tpe) = fun.tpe
val (from, to) = env.toList.unzip
val residualTParams = tparams.filterNot(env.contains)
GenPolyType(residualTParams, tpe).substituteTypes(from, to)
case _ => tree.tpe
}
(
doesConform(symbol, residualTreeType, memberType, env)
&& TypeEnv.includes(typeEnv(member), env)
)
}
if (env.isEmpty) NoSymbol
else qual.tpe member specializedName(symbol, env) suchThat isMatch
}
def matchingSymbolInPrefix(pre: Type, member: Symbol, env: TypeEnv): Symbol = {
pre member specializedName(member, env) suchThat (_.tpe matches subst(env, member.tpe))
}
def transformSelect(sel: Select) = {
val Select(qual, name) = sel
debuglog(s"specializing Select(sym=${symbol.defString}, tree.tpe=${tree.tpe})")
val qual1 = transform(qual)
def copySelect = treeCopy.Select(tree, qual1, name)
def newSelect(member: Symbol) = atPos(tree.pos)(Select(qual1, member))
def typedOp(member: Symbol) = localTyper typedOperator newSelect(member)
def typedTree(member: Symbol) = localTyper typed newSelect(member)
val ignoreEnv = specializedTypeVars(symbol.info).isEmpty || name == nme.CONSTRUCTOR
if (ignoreEnv) overloads(symbol) find (_ matchesSym symbol) match {
case Some(Overload(member, _)) => typedOp(member)
case _ => copySelect
}
else {
val env = unify(symbol.tpe, tree.tpe, emptyEnv, strict = false, tparams = false)
overloads(symbol) find (_ matchesEnv env) match {
case Some(Overload(member, _)) => typedOp(member)
case _ =>
matchingSymbolInPrefix(qual1.tpe, symbol, env) match {
case NoSymbol => copySelect
case member if member.isMethod => typedOp(member)
case member => typedTree(member)
}
}
}
}
/* Computes residual type parameters after rewiring, like "String" in the following example:
* {{{
* def specMe[@specialized T, U](t: T, u: U) = ???
* specMe[Int, String](1, "2") => specMe\\$mIc\\$sp[String](1, "2")
* }}}
*/
def computeResidualTypeVars(baseTree: Tree, specMember: Symbol, specTree: Tree, baseTargs: List[Tree], env: TypeEnv): Tree = {
val residualTargs = symbol.info.typeParams zip baseTargs collect {
case (tvar, targ) if !env.contains(tvar) || !isPrimitiveValueClass(env(tvar).typeSymbol) => targ
}
ifDebug(assert(residualTargs.sizeCompare(specMember.info.typeParams) == 0,
"residual: %s, tparams: %s, env: %s".format(residualTargs, specMember.info.typeParams, env))
)
val tree1 = gen.mkTypeApply(specTree, residualTargs)
debuglog(s"rewrote $tree to $tree1")
localTyper.typedOperator(atPos(tree.pos)(tree1)) // being polymorphic, it must be a method
}
curTree = tree
tree match {
case Apply(Select(New(tpt), nme.CONSTRUCTOR), args) =>
// OPT: avoid ObjectRef due to capture of patmat var in by-name expression
val tpt1 = tpt
val args1 = args
debuglog(s"Attempting to specialize new $tpt1(${args1.mkString(", ")})")
val found = specializedType(tpt.tpe)
if (found.typeSymbol ne tpt.tpe.typeSymbol) { // the ctor can be specialized
val inst = New(found, transformTrees(args): _*)
localTyper.typedPos(tree.pos)(inst)
}
else
super.transform(tree)
case Apply(sel @ Select(sup @ Super(qual, name), name1), args) if hasNewParents(sup) =>
val sup1 = Super(qual, name) setPos sup.pos
val tree1 = Apply(Select(sup1, name1) setPos sel.pos, transformTrees(args))
val res = localTyper.typedPos(tree.pos)(tree1)
debuglog(s"retyping call to super, from: $symbol to ${res.symbol}")
res
// This rewires calls to specialized methods defined in a class (which have a receiver)
// class C {
// def foo[@specialized T](t: T): T = t
// C.this.foo(3) // TypeApply(Select(This(C), foo), List(Int)) => C.this.foo$mIc$sp(3)
// }
case TypeApply(sel @ Select(qual, name), targs)
if (specializedTypeVars(symbol.info).nonEmpty && name != nme.CONSTRUCTOR) =>
debuglog("checking typeapp for rerouting: " + tree + " with sym.tpe: " + symbol.tpe + " tree.tpe: " + tree.tpe)
val qual1 = transform(qual)
log(">>> TypeApply: " + tree + ", qual1: " + qual1)
specSym(qual1) match {
case NoSymbol =>
// See pos/exponential-spec.scala - can't call transform on the whole tree again.
treeCopy.TypeApply(tree, treeCopy.Select(sel, qual1, name), transformTrees(targs))
case specMember =>
debuglog("found " + specMember.fullName)
val targs1 = targs // OPT: avoid ObjectRef due to capture of patmat var in by-name expression
ifDebug(assert(symbol.info.typeParams.sizeCompare(targs1) == 0, "" + symbol.info.typeParams + " / " + targs))
val env = typeEnv(specMember)
computeResidualTypeVars(tree, specMember, gen.mkAttributedSelect(qual1, specMember), targs, env)
}
// This rewires calls to specialized methods defined in the local scope. For example:
// def outerMethod = {
// def foo[@specialized T](t: T): T = t
// foo(3) // TypeApply(Ident(foo), List(Int)) => foo$mIc$sp(3)
// }
case TypeApply(sel @ Ident(name), targs) if name != nme.CONSTRUCTOR =>
val env = unify(symbol.tpe, tree.tpe, emptyEnv, strict = false, tparams = false)
if (env.isEmpty) super.transform(tree)
else {
overloads(symbol) find (_ matchesEnv env) match {
case Some(Overload(specMember, _)) => computeResidualTypeVars(tree, specMember, Ident(specMember), targs, env)
case _ => super.transform(tree)
}
}
case Select(Super(_, _), _) if illegalSpecializedInheritance(currentClass) =>
val pos = tree.pos
debuglog(pos.source.file.name+":"+pos.line+": not specializing call to super inside illegal specialized inheritance class.\\n" + pos.lineContent)
tree
case sel @ Select(_, _) =>
transformSelect(sel)
case PackageDef(pid, stats) =>
tree.symbol.info // make sure specializations have been performed
atOwner(tree, symbol) {
val specMembers = implSpecClasses(stats) map localTyper.typed
treeCopy.PackageDef(tree, pid, transformStats(stats ::: specMembers, symbol.moduleClass))
}
case Template(parents, self, body) =>
def transformTemplate = {
val specMembers = makeSpecializedMembers(tree.symbol.enclClass) ::: (implSpecClasses(body) map localTyper.typed)
if (!symbol.isPackageClass)
new CollectMethodBodies()(tree)
// currentOwner.info.parents.map(tpe => TypeTree(tpe) setPos parents.head.pos)
val parents1 = map2Conserve(parents, currentOwner.info.parents)((parent, tpe) =>
parent match {
case tt @ TypeTree() if tpe eq tt.tpe => tt
case _ => TypeTree(tpe) setPos parent.pos
}
)
treeCopy.Template(tree, parents1, self, atOwner(currentOwner)(transformTrees(body ::: specMembers)))
}
transformTemplate
case ddef @ DefDef(_, _, _, _, _, _) if info.isDefinedAt(symbol) =>
def transformDefDef(ddef: DefDef) = {
val vparamss = ddef.vparamss
if (symbol.isConstructor) {
val t = atOwner(symbol)(forwardCtorCall(tree.pos, gen.mkSuperInitCall, vparamss, symbol.owner))
def check(fwd: Tree): Unit = if (settings.unitSpecialization) {
val Apply(_, args) = fwd: @unchecked
args.zip(vparamss.flatten).find {
case (arg, param) if (arg.tpe =:= UnitTpe) && param.symbol.name.endsWith(nme.SPECIALIZED_SUFFIX) =>
val msg = "Class parameter is specialized for type Unit. Consider using `@specialized(Specializable.Arg)` instead."
runReporting.warning(arg.pos, msg, WarningCategory.LintUnitSpecialization, param.symbol.owner)
true
case _ => false
}: Unit
}
if (symbol.isPrimaryConstructor)
localTyper.typedPos(symbol.pos)(deriveDefDef(tree)(_ => Block(List(t), Literal(Constant(()))))).tap(_ => check(t))
else // duplicate the original constructor
duplicateBody(ddef, info(symbol).target)
}
else info(symbol) match {
case Implementation(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
// we have an rhs, specialize it
val tree1 = duplicateBody(ddef, target)
debuglog("implementation: " + tree1)
deriveDefDef(tree1)(transform)
case NormalizedMember(target) =>
logResult("constraints")(satisfiabilityConstraints(typeEnv(symbol))) match {
case Some(constraint) if !target.isDeferred =>
// we have an rhs, specialize it
val tree1 = duplicateBody(ddef, target, constraint)
debuglog("implementation: " + tree1)
deriveDefDef(tree1)(transform)
case _ =>
deriveDefDef(tree)(_ => localTyper typed gen.mkThrowNewRuntimeException("Fatal error in code generation: this should never be called."))
}
case SpecialOverride(target) =>
assert(body.isDefinedAt(target), "sym: " + symbol.fullName + " target: " + target.fullName)
//debuglog("moving implementation, body of target " + target + ": " + body(target))
log("%s is param accessor? %b".format(ddef.symbol, ddef.symbol.isParamAccessor))
// we have an rhs, specialize it
val tree1 = addBody(ddef, target)
(new ChangeOwnerTraverser(target, tree1.symbol))(tree1.rhs)
debuglog("changed owners, now: " + tree1)
deriveDefDef(tree1)(transform)
case SpecialOverload(original, env) =>
debuglog("completing specialized " + symbol.fullName + " calling " + original)
debuglog("special overload " + original + " -> " + env)
val t = DefDef(symbol, { vparamss: List[List[Symbol]] =>
val fun = Apply(Select(This(symbol.owner), original),
makeArguments(original, vparamss.head))
debuglog("inside defdef: " + symbol + "; type: " + symbol.tpe + "; owner: " + symbol.owner)
gen.maybeMkAsInstanceOf(fun,
symbol.owner.thisType.memberType(symbol).finalResultType,
symbol.owner.thisType.memberType(original).finalResultType)
})
debuglog("created special overload tree " + t)
debuglog("created " + t)
localTyper.typed(t)
case fwd @ Forward(_) =>
debuglog("forward: " + fwd + ", " + ddef)
val rhs1 = forwardCall(tree.pos, gen.mkAttributedRef(symbol.owner.thisType, fwd.target), vparamss)
debuglog("-->d completed forwarder to specialized overload: " + fwd.target + ": " + rhs1)
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case SpecializedAccessor(target) =>
val rhs1 = if (symbol.isGetter)
gen.mkAttributedRef(target)
else
Assign(gen.mkAttributedRef(target), Ident(vparamss.head.head.symbol))
debuglog("specialized accessor: " + target + " -> " + rhs1)
localTyper.typed(deriveDefDef(tree)(_ => rhs1))
case Abstract(targ) =>
debuglog("abstract: " + targ)
localTyper.typed(deriveDefDef(tree)(rhs => rhs))
case SpecialSuperAccessor(_) => // same as abstract method
debuglog(s"special super accessor: $tree with $symbol -> ${symbol.alias} in ${symbol.alias.owner} (in $currentClass)")
localTyper.typed(deriveDefDef(tree)(rhs => rhs))
case x @ SpecializedInnerClass(_, _) => throw new MatchError(x) // ?!?
}
} // end transformDefDef
expandInnerNormalizedMembers(transformDefDef(ddef))
case ddef @ DefDef(_, _, _, _, _, _) =>
val tree1 = expandInnerNormalizedMembers(tree)
super.transform(tree1)
case ValDef(_, _, _, _) if symbol.hasFlag(SPECIALIZED) && !symbol.isParamAccessor =>
def transformValDef = {
assert(body.isDefinedAt(symbol.alias), body)
val tree1 = deriveValDef(tree)(_ => body(symbol.alias).duplicate)
debuglog("now typing: " + tree1 + " in " + tree.symbol.owner.fullName)
val d = new SpecializationDuplicator(emptyEnv)
val newValDef = d.retyped(
localTyper.context1.asInstanceOf[d.Context],
tree1,
symbol.alias.enclClass,
symbol.enclClass,
typeEnv(symbol.alias) ++ typeEnv(tree.symbol)
)
deriveValDef(newValDef)(transform)
}
transformValDef
case _ =>
super.transform(tree)
}
}
/**
* This performs method specialization inside a scope other than a {class, trait, object}: could be another method
* or a value. This specialization is much simpler, since there is no need to record the new members in the class
* signature, their signatures are only visible locally. It works according to the usual logic:
* - we use normalizeMember to create the specialized symbols
* - we leave DefDef stubs in the tree that are later filled in by tree duplication and adaptation
* @see duplicateBody
*/
private def expandInnerNormalizedMembers(tree: Tree) = tree match {
case ddef @ DefDef(_, _, _, vparams :: Nil, _, rhs)
if ddef.symbol.owner.isMethod &&
specializedTypeVars(ddef.symbol.info).nonEmpty &&
!ddef.symbol.hasFlag(SPECIALIZED) =>
val sym = ddef.symbol
val owner = sym.owner
val norm = normalizeMember(owner, sym, emptyEnv)
if (norm.lengthIs > 1) {
// record the body for duplication
body(sym) = rhs
parameters(sym) = vparams.map(_.symbol)
// to avoid revisiting the member, we can set the SPECIALIZED
// flag. nobody has to see this anyway :)
sym.setFlag(SPECIALIZED)
// create empty bodies for specializations
localTyper.typed(Block(norm.tail.map(sym => DefDef(sym, { vparamss: List[List[Symbol]] => EmptyTree })), ddef))
} else
tree
case _ =>
tree
}
/** Duplicate the body of the given method `tree` to the new symbol `source`.
*
* Knowing that the method can be invoked only in the `castmap` type environment,
* this method will insert casts for all the expressions of types mappend in the
* `castmap`.
*/
private def duplicateBody(tree: DefDef, source: Symbol, castmap: TypeEnv = emptyEnv) = {
val symbol = tree.symbol
val meth = addBody(tree, source)
val d = new SpecializationDuplicator(castmap)
debuglog("-->d DUPLICATING: " + meth)
d.retyped(
localTyper.context1.asInstanceOf[d.Context],
meth,
source.enclClass,
symbol.enclClass,
typeEnv(source) ++ typeEnv(symbol)
)
}
/** Put the body of 'source' as the right hand side of the method 'tree'.
* The destination method gets fresh symbols for type and value parameters,
* and the body is updated to the new symbols, and owners adjusted accordingly.
* However, if the same source tree is used in more than one place, full re-typing
* is necessary. @see method duplicateBody
*/
private def addBody(tree: DefDef, source: Symbol): DefDef = {
val symbol = tree.symbol
debuglog("specializing body of" + symbol.defString)
val DefDef(_, _, tparams, vparams :: Nil, tpt, _) = tree: @unchecked
val env = typeEnv(symbol)
val srcVparams = parameters(source)
val srcTparams = source.typeParams.filter(tparam => !env.contains(tparam) || !isPrimitiveValueType(env(tparam)))
if (settings.isDebug && (srcTparams.nonEmpty || symbol.typeParams.nonEmpty))
debuglog("substituting " + srcTparams + " for " + symbol.typeParams)
// skolemize type parameters
val oldTparams = tparams.map(_.symbol)
val newTparams = deriveFreshSkolems(oldTparams)
map2(tparams, newTparams)(_ setSymbol _)
// create fresh symbols for value parameters to hold the skolem types
val oldVparams = vparams.map(_.symbol)
val newVparams = cloneSymbolsAtOwnerAndModify(oldVparams, symbol, _.substSym(oldTparams, newTparams))
val srcParams = srcVparams ::: srcTparams
val oldParams = oldVparams ::: oldTparams
val newParams = newVparams ::: newTparams
// replace value and type parameters of the old method with the new ones
// log("Adding body for " + tree.symbol + " - origtparams: " + origtparams + "; tparams: " + tparams)
// log("Type vars of: " + source + ": " + source.typeParams)
// log("Type env of: " + tree.symbol + ": " + boundTvars)
// log("newtparams: " + newtparams)
// don't make private fields public
val substituter = new ImplementationAdapter(srcParams, newParams, source.enclClass, false)
val newRhs = substituter(body(source).duplicate)
tpt.modifyType(_.substSym(oldParams, newParams))
copyDefDef(tree)(vparamss = newVparams.map(ValDef.apply) :: Nil, rhs = newRhs)
}
/** Create trees for specialized members of 'sClass', based on the
* symbols that are already there.
*/
private def makeSpecializedMembers(sClass: Symbol): List[Tree] = {
// add special overrides first
// if (!specializedClass.hasFlag(SPECIALIZED))
// for (m <- specialOverrides(specializedClass)) specializedClass.info.decls.enter(m)
val mbrs = new mutable.ListBuffer[Tree]
var hasSpecializedFields = false
for (m <- sClass.info.decls
if m.hasFlag(SPECIALIZED)
&& (m.sourceFile ne null)
&& satisfiable(typeEnv(m), !sClass.hasFlag(SPECIALIZED))) {
debuglog("creating tree for " + m.fullName)
if (m.isMethod) {
if (info(m).target.hasAccessorFlag) hasSpecializedFields = true
if (m.isClassConstructor) {
val origParams = parameters(info(m).target)
val vparams = (
map2(m.info.paramTypes, origParams)((tp, sym) =>
m.newValue(specializedName(sym, typeEnv(sClass)), sym.pos, sym.flags) setInfo tp
)
)
// param accessors for private members (the others are inherited from the generic class)
if (m.isPrimaryConstructor) {
for (param <- vparams ; if sClass.info.nonPrivateMember(param.name) == NoSymbol) {
val acc = param.cloneSymbol(sClass, param.flags | PARAMACCESSOR | PRIVATE)
sClass.info.decls.enter(acc)
mbrs += ValDef(acc, EmptyTree).setType(NoType).setPos(m.pos)
}
}
// ctor
mbrs += DefDef(m, Modifiers(m.flags), mmap(List(vparams))(ValDef.apply), EmptyTree)
} else {
mbrs += DefDef(m, { paramss: List[List[Symbol]] => EmptyTree })
}
} else if (m.isValue) {
mbrs += ValDef(m).setType(NoType)
} else if (m.isClass) {
// mbrs +=
// ClassDef(m, Template(m.info.parents map TypeTree, noSelfType, List())
// .setSymbol(m.newLocalDummy(m.pos)))
// log("created synthetic class: " + m.fullName)
}
}
if (hasSpecializedFields) {
val isSpecializedInstance = (sClass hasFlag SPECIALIZED) || sClass.parentSymbolsIterator.exists(_ hasFlag SPECIALIZED)
val sym = sClass.newMethod(nme.SPECIALIZED_INSTANCE, sClass.pos) setInfoAndEnter MethodType(Nil, BooleanTpe)
mbrs += DefDef(sym, Literal(Constant(isSpecializedInstance)).setType(BooleanTpe)).setType(NoType)
}
mbrs.toList
}
/** Create specialized class definitions */
def implSpecClasses(trees: List[Tree]): List[Tree] = {
trees flatMap {
case tree @ ClassDef(_, _, _, impl) =>
tree.symbol.info // force specialization
specializedClass.getOrNull(tree.symbol) match {
case null => Nil
case map =>
val sym1 = tree.symbol
map.iterator.map {
case (env, specCls) =>
debuglog("created synthetic class: " + specCls + " of " + sym1 + " in " + pp(env))
val parents = specCls.info.parents.map(TypeTree)
ClassDef(specCls, atPos(impl.pos)(Template(parents, noSelfType, List()))
.setSymbol(specCls.newLocalDummy(sym1.pos))) setPos tree.pos
}.toList
}
case _ => Nil
} sortBy (_.name.decoded)
}
}
private def forwardCall(pos: scala.reflect.internal.util.Position, receiver: Tree, paramss: List[List[ValDef]]): Tree = {
val argss = mmap(paramss)(x => Ident(x.symbol))
atPos(pos) { argss.foldLeft(receiver)(Apply.apply) }
}
/** Forward to the generic class constructor. If the current class initializes
* specialized fields corresponding to parameters, it passes null to the superclass
* constructor.
*
* For example:
* {{{
* case class Tuple2[T, U](x: T, y: U)
*
* class Tuple2\\$II {
* val _x\\$I: Int = ..
* def x = _x\\$I
* // same for y
* def this(x: Int, y: Int) {
* super.this(null.asInstanceOf[Int], null.asInstanceOf[Int])
* }
* }
* }}}
*
* Note that erasure first transforms `null.asInstanceOf[Int]` to `unbox(null)`, which is 0.
* Then it adapts the argument `unbox(null)` of type Int to the erased parameter type of Tuple2,
* which is Object, so it inserts a `box` call and we get `box(unbox(null))`, which is
* `new Integer(0)` (not `null`).
*
* However it does not make sense to create an Integer instance to be stored in the generic field
* of the superclass: that field is never used. Therefore we mark the `null` tree with the
* [[SpecializedSuperConstructorCallArgument]] attachment and special-case erasure to replace
* `box(unbox(null))` by `null` in this case.
*/
private def forwardCtorCall(pos: scala.reflect.internal.util.Position, receiver: Tree, paramss: List[List[ValDef]], clazz: Symbol): Tree = {
log(s"forwardCtorCall($pos, $receiver, $paramss, $clazz)")
/* A constructor parameter `f` initializes a specialized field
* iff:
* - it is specialized itself
* - there is a getter for the original (non-specialized) field in the same class
* - there is a getter for the specialized field in the same class
*/
def initializesSpecializedField(f: Symbol) = (
f.name.endsWith(nme.SPECIALIZED_SUFFIX)
&& clazz.info.member(f.unexpandedName).isPublic
&& clazz.info.decl(f.name).suchThat(_.isGetter) != NoSymbol
)
val argss = mmap(paramss)(x =>
if (initializesSpecializedField(x.symbol))
gen.mkAsInstanceOf(Literal(Constant(null)).updateAttachment(SpecializedSuperConstructorCallArgument), x.symbol.tpe)
else
Ident(x.symbol)
)
atPos(pos) { argss.foldLeft(receiver)(Apply.apply) }
}
/** Add method m to the set of symbols for which we need an implementation tree
* in the tree transformer.
*
* @note This field is part of the specializeTypes subcomponent, so any symbols
* that here are not garbage collected at the end of a compiler run!
*/
def addConcreteSpecMethod(m: Symbol): Unit = {
if (currentRun.compiles(m)) concreteSpecMethods += m
}
private def makeArguments(fun: Symbol, vparams: List[Symbol]): List[Tree] = (
//! TODO: make sure the param types are seen from the right prefix
map2(fun.info.paramTypes, vparams)((tp, arg) => gen.maybeMkAsInstanceOf(Ident(arg), tp, arg.tpe))
)
object SpecializedSuperConstructorCallArgument
}
| scala/scala | src/compiler/scala/tools/nsc/transform/SpecializeTypes.scala | Scala | apache-2.0 | 96,939 |
/*
* Copyright 2014 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package agent.flavors.files
import java.io.File
import com.typesafe.scalalogging.LazyLogging
import scala.slick.jdbc.meta.MTable
trait ResourceCatalog {
def update(entities: List[IndexedEntity]): Boolean
def indexByResourceId(identificator: FileResourceIdentificator): Option[IndexedEntity]
def resourceIdByIdx(index: ResourceIndex): Option[IndexedEntity]
def last(): Option[ResourceIndex]
def head(): Option[ResourceIndex]
def nextAfter(index: ResourceIndex): Option[ResourceIndex]
def close(): Unit
}
class H2ResourceCatalog(tapId: Long, dir: File, fileName: String = ".idx") extends InMemoryResourceCatalog {
import scala.slick.driver.H2Driver.simple._
private def dbURL(file: File): String = "jdbc:h2:" + file.getAbsolutePath + "/" + fileName
private lazy val db = {
require(dir.exists() && dir.isDirectory)
val db = Database.forURL(dbURL(dir), driver = "org.h2.Driver");
logger.info(s"Initialised db connection: $db at $dir")
db withSession {
implicit session =>
if (MTable.getTables("findex").list.isEmpty) {
logger.info("Creating findex table")
findexes.ddl.create
}
}
db
}
class FileIndex(tag: Tag) extends Table[(Long, Long, Long, String, String, Long, Long)](tag, "findex") {
def * = (tapId, seed, resourceId, dir, name, createdTimestamp, sizeNow)
def tapId = column[Long]("tapId")
def seed = column[Long]("seed")
def resourceId = column[Long]("resourceId")
def dir = column[String]("dir")
def name = column[String]("name")
def createdTimestamp = column[Long]("createdTimestamp")
def sizeNow = column[Long]("sizeNow")
}
private val findexes = TableQuery[FileIndex]
load()
private def load(): Unit = {
val list = db withSession {
implicit session =>
(for {
entity <- findexes if entity.tapId === tapId
} yield entity).list
}
super.update(list.map {
case (_, seed, resourceId, dirName, name, createdTimestamp, sizeNow) =>
IndexedEntity(
ResourceIndex(seed, resourceId),
FileResourceIdentificator(dirName, name, createdTimestamp, sizeNow))
})
}
private def persist(entities: List[IndexedEntity]) = {
db withSession {
implicit session =>
(for {
entity <- findexes if entity.tapId === tapId
} yield entity).delete
entities.foreach { entity =>
findexes += (tapId, entity.idx.seed, entity.idx.resourceId, entity.id.dir, entity.id.name, entity.id.createdTimestamp, entity.id.sizeNow)
}
logger.debug(s"Persisted $entities")
}
}
override def update(entities: List[IndexedEntity]): Boolean = {
if (entities.map(_.idx) != memory.map(_.idx)) persist(entities)
super.update(entities)
}
override def close() = {
persist(memory)
super.close()
}
}
class InMemoryResourceCatalog extends ResourceCatalog with LazyLogging {
var memory = List[IndexedEntity]()
override def update(entities: List[IndexedEntity]): Boolean = {
logger.debug(s"Catalog state: $entities")
if (memory == entities)
false
else {
memory = entities
true
}
}
override def indexByResourceId(identificator: FileResourceIdentificator): Option[IndexedEntity] = {
memory
.find(_.id.same(identificator))
}
override def resourceIdByIdx(index: ResourceIndex): Option[IndexedEntity] = {
memory
.find(_.idx == index) orElse memory.headOption
}
override def last(): Option[ResourceIndex] = {
val result = memory.lastOption.map(_.idx)
logger.debug(s"last resource = $result")
result
}
override def head(): Option[ResourceIndex] = {
val result = memory.headOption.map(_.idx)
logger.debug(s"first resource = $result")
result
}
override def nextAfter(index: ResourceIndex): Option[ResourceIndex] = {
val result = memory.dropWhile(_.idx != index) match {
case current :: next :: _ => Some(next.idx)
case _ => None
}
logger.debug(s"next after $index = $result")
result
}
override def close(): Unit = {}
}
| mglukh/ehub | modules/core/src/main/scala/agent/flavors/files/ResourceCatalog.scala | Scala | apache-2.0 | 4,727 |
package ohnosequences.db.rna16s.test
import org.scalatest.FunSuite
import ohnosequences.files.read
import ohnosequences.faster.{FASTA, FASTAEntry}
import ohnosequences.db.rna16s, rna16s.Version
import org.scalatest.EitherValues._
class Mappings extends FunSuite {
test("There is a valid mapping to taxon ID for each sequence in the database") {
Version.all foreach { version =>
val sequencesFile = data.sequences(version).right.value
val mappingsFile = data.mappings(version).right.value
val maybeWrong = read
.withLines(sequencesFile) { lines =>
val fasta = FASTA.parse(lines.buffered)
val mappings = read
.withLines(mappingsFile)(rna16s.io.deserializeMappings)
.right
.value
fasta.collectFirst {
case FASTAEntry(header, _) if (!mappings.isDefinedAt(header.id)) =>
header.id
}
}
.right
.value
maybeWrong.map { id =>
fail(s"$id has no mapping to taxon ID in version $version")
}
}
}
}
| ohnosequences/db.rna16s | src/test/scala/mappings.scala | Scala | agpl-3.0 | 1,077 |
package com.wallace.demo.rest.sever.demo.services.app.common
import spray.json._
/**
* Created by 10192057 on 2016/6/17.
*/
case class Person(name: List[String], age: List[Int])
case class NewPerson(name: String, age: Int)
object NewPersonProtocol extends DefaultJsonProtocol {
implicit object NewPersonFormat extends RootJsonFormat[NewPerson] {
override def write(person: NewPerson): JsValue = JsObject("name" -> JsString(person.name), "age" -> JsNumber(person.age))
override def read(json: JsValue): NewPerson = json match {
case JsArray(Vector(JsString(name), JsNumber(age))) => NewPerson(name, age.toInt)
case _ => deserializationError("Person expected")
}
}
}
object JsonTestDemo {
def main(args: Array[String]): Unit = {
import NewPersonProtocol._
val newP = NewPerson("wallace", 23)
val json = newP.toJson
println(json.prettyPrint)
}
}
| LeagueForHacker/Rest-Sever-Demo | src/main/scala/com/wallace/demo/rest/sever/demo/services/app/common/Person.scala | Scala | mit | 904 |
package com.nielsen.ecom.wordseg
import akka.actor.Actor
import org.ansj.splitWord.analysis._
//#worker
class SegWorker extends Actor {
def receive = {
case word: String =>
val result = ToAnalysis.parse(word).toString()
sender() ! result
}
} | adrianwkj/web_akka | src/main/scala/com/nielsen/ecom/wordseg/Worker.scala | Scala | cc0-1.0 | 263 |
// lchannels - session programming in Scala
// Copyright (c) 2016, Alceste Scalas and Imperial College London
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/** @author Alceste Scalas <[email protected]> */
package lchannels.examples.game.b
import lchannels._
import lchannels.examples.game.protocol.binary
import lchannels.examples.game.protocol.b._
import scala.concurrent.duration._
import com.typesafe.scalalogging.StrictLogging
class Client(name: String, s: In[binary.PlayB], wait: Duration)
(implicit timeout: Duration)
extends Runnable with StrictLogging {
private def logTrace(msg: String) = logger.trace(f"${name}: ${msg}")
private def logDebug(msg: String) = logger.debug(f"${name}: ${msg}")
private def logInfo(msg: String) = logger.info(f"${name}: ${msg}")
private def logWarn(msg: String) = logger.warn(f"${name}: ${msg}")
private def logError(msg: String) = logger.error(f"${name}: ${msg}")
// Own thread
private val thread = { val t = new Thread(this); t.start(); t }
def join() = thread.join()
override def run() = {
val c = MPPlayB(s) // Wrap the channel in a multiparty session obj
logInfo("Started. Waiting for multiparty session...")
val game = c.receive.p
logInfo("...done. Sending name to C, and waiting for A's info...")
val info = game.send(InfoBC(name)).receive
logInfo(f"...got InfoCA(${info.p}). Starting game loop.")
loop(info.cont)
}
@scala.annotation.tailrec
private def loop(g: MPMov1ABOrMov2AB): Unit = {
logInfo(f"Delay: ${wait}")
Thread.sleep(wait.toMillis)
logInfo("Waiting for A's move...")
g.receive match {
case Mov1AB(p, cont) => {
logInfo(f"Got Mov1AB(${p}), sending Mov1BC(${p}) and looping")
val g2 = cont.send(Mov1BC(p))
loop(g2)
}
case Mov2AB(p, cont) => {
logInfo(f"Got Mov2AB(${p}), sending Mov2BC(${p}) and looping")
val g2 = cont.send(Mov2BC(p))
loop(g2)
}
}
}
}
object Actor extends App {
// Helper method to ease external invocation
def run() = main(Array())
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import com.typesafe.config.ConfigFactory
import akka.actor.ActorSystem
import binary.actor.{ConnectB => Connect}
val config = ConfigFactory.load() // Loads resources/application.conf
implicit val as = ActorSystem("GameClientBSys",
config = Some(config.getConfig("GameClientBSys")),
defaultExecutionContext = Some(global))
ActorChannel.setDefaultEC(global)
ActorChannel.setDefaultAS(as)
implicit val timeout = 60.seconds
val serverPath = "akka.tcp://[email protected]:31340/user/b"
println(f"[*] Connecting to ${serverPath}...")
val c: Out[Connect] = ActorOut[Connect](serverPath)
val c2 = c !! Connect()_
val client = new Client("Bob", c2, 2.seconds)(30.seconds)
client.join()
as.terminate()
}
| scribble/scribble.github.io | src/main/jbake/assets/docs/lchannels/examples/src/main/scala/lchannels/examples/game/ClientB.scala | Scala | apache-2.0 | 4,296 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.magic
import java.io.OutputStream
import akka.actor.{ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import com.ibm.spark.interpreter.{ExecuteOutput, ExecuteError}
import com.ibm.spark.magic.{MagicTemplate, MagicLoader}
import com.typesafe.config.ConfigFactory
import org.mockito.Matchers.{eq => mockEq, _}
import org.mockito.Mockito._
import org.scalatest.mock.MockitoSugar
import org.scalatest.{FunSpecLike, Matchers}
import scala.concurrent.duration._
object MagicManagerSpec {
val config = """
akka {
loglevel = "WARNING"
}"""
}
class MagicManagerSpec extends TestKit(
ActorSystem(
"MagicManagerSpec",
ConfigFactory.parseString(MagicManagerSpec.config)
)
) with ImplicitSender with FunSpecLike with Matchers with MockitoSugar {
describe("MagicManager") {
describe("#receive") {
describe("with message type ValidateMagicMessage") {
it("should return false if the code does not parse as magic") {
val mockMagicLoader = mock[MagicLoader]
val magicManager =
system.actorOf(Props(classOf[MagicManager], mockMagicLoader))
magicManager ! ValidateMagicMessage("notAMagic")
expectMsg(200.milliseconds, false)
}
it("should return true if code parses as line magic") {
val mockMagicLoader = mock[MagicLoader]
val magicManager =
system.actorOf(Props(classOf[MagicManager], mockMagicLoader))
magicManager ! ValidateMagicMessage("%lineMagic asdfasdf")
expectMsg(200.milliseconds, true)
}
it("should return true if code parses as cell magic") {
val mockMagicLoader = mock[MagicLoader]
val magicManager =
system.actorOf(Props(classOf[MagicManager], mockMagicLoader))
magicManager ! ValidateMagicMessage("%%cellMagic asdflj\\nasdf\\n")
expectMsg(200.milliseconds, true)
}
}
describe("with message type (ExecuteMagicMessage, OutputStream)") {
it("should return an error if the magic requested is not defined") {
val fakeMagicName = "myMagic"
val mockMagicLoader = mock[MagicLoader]
doReturn(false).when(mockMagicLoader).hasMagic(anyString())
val magicManager =
system.actorOf(Props(classOf[MagicManager], mockMagicLoader))
magicManager ! ((
ExecuteMagicMessage("%%" + fakeMagicName),
mock[OutputStream]
))
// Expect magic to not exist
expectMsg(200.milliseconds, Right(ExecuteError(
"Missing Magic",
s"Magic $fakeMagicName does not exist!",
List()
)))
}
it("should evaluate the magic if it exists and return the error if it fails") {
val fakeMagicName = "myBadMagic"
val fakeMagicReturn = new RuntimeException("EXPLOSION")
val mockMagic = mock[MagicTemplate]
doThrow(fakeMagicReturn).when(mockMagic).executeCell(any[Seq[String]])
val myMagicLoader = new MagicLoader() {
override protected def createMagicInstance(name: String) =
mockMagic
}
val magicManager =
system.actorOf(Props(classOf[MagicManager], myMagicLoader))
magicManager ! ((
ExecuteMagicMessage("%%" + fakeMagicName),
mock[OutputStream]
))
val result =
receiveOne(5.seconds)
.asInstanceOf[Either[ExecuteOutput, ExecuteError]]
result.right.get shouldBe an [ExecuteError]
}
it("should evaluate the magic if it exists and return the output if it succeeds") {
val fakeMagicName = "myMagic"
val fakeMagicReturn = Map()
val mockMagic = mock[MagicTemplate]
doReturn(fakeMagicReturn)
.when(mockMagic).executeCell(any[Seq[String]])
val myMagicLoader = new MagicLoader() {
override def hasMagic(name: String): Boolean = true
override protected def createMagicInstance(name: String) =
mockMagic
}
val magicManager =
system.actorOf(Props(classOf[MagicManager], myMagicLoader))
magicManager ! ((
ExecuteMagicMessage("%%" + fakeMagicName),
mock[OutputStream]
))
// TODO: Refactor timeout-based test to avoid incremental adjustment
expectMsg(3000.milliseconds, Left(fakeMagicReturn))
}
}
}
}
} | bpburns/spark-kernel | kernel/src/test/scala/com/ibm/spark/kernel/protocol/v5/magic/MagicManagerSpec.scala | Scala | apache-2.0 | 5,178 |
package springtastrophe.trees
import org.scalatest.{Matchers, FlatSpec}
class JavaClassTest extends FlatSpec with Matchers {
"An annotation finder" should "not find any annotations on a class ... with no annotations" in {
(pending)
}
it should "find the annotations on a class ... with annotations" in {
(pending)
}
}
| hamishdickson/springtastrophe | src/test/scala/springtastrophe/trees/JavaClassTest.scala | Scala | mit | 337 |
package org.odfi.indesign.core.module.ui.www.external
import com.idyria.osi.wsb.webapp.localweb.LocalWebHTMLVIew
import com.idyria.osi.vui.html.Html
import com.idyria.osi.wsb.webapp.localweb.DefaultLocalWebHTMLBuilder
import com.idyria.osi.vui.html.Head
import com.idyria.osi.vui.html.HTMLNode
import org.w3c.dom.html.HTMLElement
trait ExternalBuilder extends LocalWebHTMLVIew with DefaultLocalWebHTMLBuilder {
def externalAdd(targetNode:HTMLNode[HTMLElement, Any]) : Unit = {
}
override def render: HTMLNode[HTMLElement, HTMLNode[HTMLElement, _]] = {
// Let Main Rendereing Chain happen
var result = super.render
// Add Scripts/Stylesheet depending on result
var targetNode = result match {
// HTML: Look for Head; if none; add to result node
case h: Html[_, _] =>
h.children.find {
case n if (classOf[Head[HTMLElement, _]].isInstance(n)) => true
case _ => false
} match {
case Some(head) => head.asInstanceOf[Head[HTMLElement, _]]
case None => result
}
// Others: Add to result node
case _ => result
}
// ADd scripts
externalAdd(targetNode)
/*switchToNode(targetNode, {
stylesheet(new URI(s"${viewPath}/resources/semantic/semantic.min.css".noDoubleSlash)) {
}
script(new URI(s"${viewPath}/resources/semantic/semantic.min.js".noDoubleSlash)) {
}
})*/
// Return
result
}
} | opendesignflow/indesign | indesign-wwwui/src/main/scala/org/odfi/indesign/core/module/ui/www/external/ExternalBuilder.scala | Scala | gpl-3.0 | 1,529 |
package au.com.feedbacker.controllers
import au.com.feedbacker.AllFixtures
import au.com.feedbacker.model.CredentialStatus.CredentialStatus
import au.com.feedbacker.model.{CredentialStatus, Person}
import org.scalatest.prop.PropertyChecks
import org.scalatestplus.play.PlaySpec
import play.api.mvc.Result
import play.api.test.FakeRequest
import org.scalacheck.Arbitrary.arbitrary
/**
* Created by lachlang on 25/02/2017.
*/
class SessionManagerSpec extends PlaySpec with AllFixtures with PropertyChecks {
val arbInvalidStatus = arbitrary[CredentialStatus].suchThat(_ != CredentialStatus.Active)
"SessionManager#hash" should {
"never return the value it is given" in {
forAll (minSuccessful(10)) { arbString:String =>
whenever (arbString.length >= 1 && arbString.length <= 100) {
val sessionManager = new SessionManager
sessionManager.hash(arbString) must not equal arbString
}
}
}
"never return two values from the same seed" in {
forAll (minSuccessful(10)) { arbString:String =>
whenever (arbString.length >= 1 && arbString.length <= 100) {
val sessionManager = new SessionManager
val hash1 = sessionManager.hash(arbString)
val hash2 = sessionManager.hash(arbString)
hash1 must not equal hash2
}
}
}
}
"SessionManager#validatePassword" should {
"always validate the correct password" in {
forAll (minSuccessful(3)) { (arbString:String) =>
whenever (arbString.length >= 8) {
val sessionManager = new SessionManager
val hash = sessionManager.hash(arbString)
sessionManager.validatePassword(arbString, hash) mustBe true
}
}
}
"never validate the incorrect password" in {
forAll (minSuccessful(3)) { (arbString1:String, arbString2:String) =>
whenever (arbString1 != arbString2) {
val sessionManager = new SessionManager
val hash = sessionManager.hash(arbString1)
sessionManager.validatePassword(arbString2, hash) mustBe false
}
}
}
}
"SessionManager#extractToken" should {
"return a valid session token when it exists" in {
forAll() { (sessionToken:SessionToken, result: Result) =>
val sessionManager = new SessionManager
sessionManager.signIn(sessionToken, result)
val sessionTokenResponse = sessionManager.extractToken(FakeRequest().withCookies(SessionManager.createSessionCookie(sessionToken.token)))
sessionTokenResponse mustEqual Some(sessionToken)
}
}
"return None for invalid session" in {
forAll() { (sessionToken: SessionToken) =>
val sessionManager = new SessionManager
val sessionTokenResponse = sessionManager.extractToken(FakeRequest().withCookies(SessionManager.createSessionCookie(sessionToken.token)))
sessionTokenResponse mustBe None
}
}
}
"SessionManager#initialiseToken" should {
"return None when the credentials are not active" in {
forAll(arbInvalidStatus, arbitrary[Person], arbitrary[String]) { (invalidStatus, example, password) =>
val person = example.copy(credentials = example.credentials.copy(status = invalidStatus))
whenever(person.credentials.status != CredentialStatus.Active && password.length > 0) {
val sessionManager = new SessionManager
val result = sessionManager.initialiseToken(person.setNewHash(sessionManager.hash(password)), password)
result mustBe None
}
}
}
"return None when the password is incorrect" in {
forAll() { (example:Person, password:String) =>
val person = example.copy(credentials = example.credentials.copy(status = CredentialStatus.Active))
val sessionManager = new SessionManager
val result = sessionManager.initialiseToken(person.setNewHash(sessionManager.hash(person.credentials.hash)),password)
result mustBe None
}
}
"create a session token" in {
forAll() { example:Person =>
val person = example.copy(credentials = example.credentials.copy(status = CredentialStatus.Active))
val sessionManager = new SessionManager
val result = sessionManager.initialiseToken(person.setNewHash(sessionManager.hash(person.credentials.hash)),person.credentials.hash)
result must not be None
}
}
}
"SessionManager#generateToken" should {
"generate a string of length 86" in {
val sessionManager = new SessionManager
val result = sessionManager.generateToken
result.length mustBe 86
}
"generate different strings from consecutive calls" in {
val sessionManager = new SessionManager
val result1 = sessionManager.generateToken
val result2 = sessionManager.generateToken
val result3 = sessionManager.generateToken
val result4 = sessionManager.generateToken
val result5 = sessionManager.generateToken
result1 must not equal result2
result1 must not equal result3
result1 must not equal result4
result1 must not equal result5
result2 must not equal result3
result2 must not equal result4
result2 must not equal result5
result3 must not equal result4
result3 must not equal result5
result4 must not equal result5
}
}
"SessionManager#signIn" should {
// cache is already checked by signout and extract token
"add the session cookie to the response" in {
forAll { (sessionToken: SessionToken, arbResult: Result) =>
val sessionManager = new SessionManager
val result = sessionManager.signIn(sessionToken, arbResult)
result.header.headers.getOrElse("Set-Cookie","") contains(sessionToken.token) mustBe true
}
}
}
"SessionManager#signOut" should {
"remove the session from the cache" in {
forAll { (sessionToken:SessionToken, arbResult:Result) =>
val sessionManager = new SessionManager
sessionManager.signIn(sessionToken, arbResult)
val signInResponse = sessionManager.extractToken(FakeRequest().withCookies(SessionManager.createSessionCookie(sessionToken.token)))
signInResponse mustEqual Some(sessionToken)
sessionManager.signOut(sessionToken, arbResult)
val signOutResponse = sessionManager.extractToken(FakeRequest().withCookies(SessionManager.createSessionCookie(sessionToken.token)))
signOutResponse mustBe None
}
}
"remove the session cookie from the response" in {
forAll { (sessionToken: SessionToken, arbResult: Result) =>
val sessionManager = new SessionManager
val resultIn = sessionManager.signIn(sessionToken, arbResult)
resultIn.header.headers.getOrElse("Set-Cookie","") contains(sessionToken.token) mustBe true
val resultOut = sessionManager.signOut(sessionToken, arbResult)
resultOut.header.headers.getOrElse("Set-Cookie","") contains(sessionToken.token) mustBe false
}
}
}
}
| lachlang/feedbacker | test/au/com/feedbacker/controllers/SessionManagerSpec.scala | Scala | apache-2.0 | 7,017 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.catalog
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.functions.sql.ScalarSqlFunctions
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.sql._
import org.apache.calcite.sql.`type`.{OperandTypes, ReturnTypes, SqlReturnTypeInference, SqlTypeTransforms}
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.sql.util.ReflectiveSqlOperatorTable
import org.apache.calcite.sql.validate.{SqlNameMatcher, SqlNameMatchers}
import _root_.java.util.{List => JList}
import java.util
import _root_.scala.collection.JavaConversions._
class BasicOperatorTable extends ReflectiveSqlOperatorTable {
/**
* List of supported SQL operators / functions.
*
* This list should be kept in sync with [[SqlStdOperatorTable]].
*/
private val builtInSqlOperators: Seq[SqlOperator] = Seq(
// SET OPERATORS
SqlStdOperatorTable.UNION,
SqlStdOperatorTable.UNION_ALL,
SqlStdOperatorTable.EXCEPT,
SqlStdOperatorTable.EXCEPT_ALL,
SqlStdOperatorTable.INTERSECT,
SqlStdOperatorTable.INTERSECT_ALL,
// BINARY OPERATORS
SqlStdOperatorTable.AND,
SqlStdOperatorTable.AS,
SqlStdOperatorTable.CONCAT,
SqlStdOperatorTable.DIVIDE,
SqlStdOperatorTable.DIVIDE_INTEGER,
SqlStdOperatorTable.DOT,
SqlStdOperatorTable.EQUALS,
SqlStdOperatorTable.GREATER_THAN,
SqlStdOperatorTable.IS_DISTINCT_FROM,
SqlStdOperatorTable.IS_NOT_DISTINCT_FROM,
SqlStdOperatorTable.GREATER_THAN_OR_EQUAL,
SqlStdOperatorTable.LESS_THAN,
SqlStdOperatorTable.LESS_THAN_OR_EQUAL,
SqlStdOperatorTable.MINUS,
SqlStdOperatorTable.MULTIPLY,
SqlStdOperatorTable.NOT_EQUALS,
SqlStdOperatorTable.OR,
SqlStdOperatorTable.PLUS,
SqlStdOperatorTable.DATETIME_PLUS,
// POSTFIX OPERATORS
SqlStdOperatorTable.DESC,
SqlStdOperatorTable.NULLS_FIRST,
SqlStdOperatorTable.IS_NOT_NULL,
SqlStdOperatorTable.IS_NULL,
SqlStdOperatorTable.IS_NOT_TRUE,
SqlStdOperatorTable.IS_TRUE,
SqlStdOperatorTable.IS_NOT_FALSE,
SqlStdOperatorTable.IS_FALSE,
SqlStdOperatorTable.IS_NOT_UNKNOWN,
SqlStdOperatorTable.IS_UNKNOWN,
// PREFIX OPERATORS
SqlStdOperatorTable.NOT,
SqlStdOperatorTable.UNARY_MINUS,
SqlStdOperatorTable.UNARY_PLUS,
// GROUPING FUNCTIONS
SqlStdOperatorTable.GROUP_ID,
SqlStdOperatorTable.GROUPING,
SqlStdOperatorTable.GROUPING_ID,
// AGGREGATE OPERATORS
SqlStdOperatorTable.SUM,
SqlStdOperatorTable.SUM0,
SqlStdOperatorTable.COUNT,
SqlStdOperatorTable.COLLECT,
SqlStdOperatorTable.MIN,
SqlStdOperatorTable.MAX,
SqlStdOperatorTable.AVG,
SqlStdOperatorTable.STDDEV_POP,
SqlStdOperatorTable.STDDEV_SAMP,
SqlStdOperatorTable.VAR_POP,
SqlStdOperatorTable.VAR_SAMP,
// ARRAY OPERATORS
SqlStdOperatorTable.ARRAY_VALUE_CONSTRUCTOR,
SqlStdOperatorTable.ELEMENT,
// MAP OPERATORS
SqlStdOperatorTable.MAP_VALUE_CONSTRUCTOR,
// ARRAY MAP SHARED OPERATORS
SqlStdOperatorTable.ITEM,
SqlStdOperatorTable.CARDINALITY,
// SPECIAL OPERATORS
SqlStdOperatorTable.ROW,
SqlStdOperatorTable.OVERLAPS,
SqlStdOperatorTable.LITERAL_CHAIN,
SqlStdOperatorTable.BETWEEN,
SqlStdOperatorTable.SYMMETRIC_BETWEEN,
SqlStdOperatorTable.NOT_BETWEEN,
SqlStdOperatorTable.SYMMETRIC_NOT_BETWEEN,
SqlStdOperatorTable.NOT_LIKE,
SqlStdOperatorTable.LIKE,
SqlStdOperatorTable.NOT_SIMILAR_TO,
SqlStdOperatorTable.SIMILAR_TO,
SqlStdOperatorTable.CASE,
SqlStdOperatorTable.REINTERPRET,
SqlStdOperatorTable.EXTRACT,
SqlStdOperatorTable.IN,
// FUNCTIONS
SqlStdOperatorTable.SUBSTRING,
SqlStdOperatorTable.OVERLAY,
SqlStdOperatorTable.TRIM,
SqlStdOperatorTable.POSITION,
SqlStdOperatorTable.CHAR_LENGTH,
SqlStdOperatorTable.CHARACTER_LENGTH,
SqlStdOperatorTable.UPPER,
SqlStdOperatorTable.LOWER,
SqlStdOperatorTable.INITCAP,
SqlStdOperatorTable.POWER,
SqlStdOperatorTable.SQRT,
SqlStdOperatorTable.MOD,
SqlStdOperatorTable.LN,
SqlStdOperatorTable.LOG10,
ScalarSqlFunctions.LOG2,
SqlStdOperatorTable.ABS,
SqlStdOperatorTable.EXP,
SqlStdOperatorTable.NULLIF,
SqlStdOperatorTable.COALESCE,
SqlStdOperatorTable.FLOOR,
SqlStdOperatorTable.CEIL,
SqlStdOperatorTable.LOCALTIME,
SqlStdOperatorTable.LOCALTIMESTAMP,
SqlStdOperatorTable.CURRENT_TIME,
SqlStdOperatorTable.CURRENT_TIMESTAMP,
SqlStdOperatorTable.CURRENT_DATE,
ScalarSqlFunctions.DATE_FORMAT,
SqlStdOperatorTable.CAST,
SqlStdOperatorTable.SCALAR_QUERY,
SqlStdOperatorTable.EXISTS,
SqlStdOperatorTable.SIN,
SqlStdOperatorTable.COS,
SqlStdOperatorTable.TAN,
ScalarSqlFunctions.TANH,
SqlStdOperatorTable.COT,
SqlStdOperatorTable.ASIN,
SqlStdOperatorTable.ACOS,
SqlStdOperatorTable.ATAN,
SqlStdOperatorTable.ATAN2,
ScalarSqlFunctions.COSH,
SqlStdOperatorTable.DEGREES,
SqlStdOperatorTable.RADIANS,
SqlStdOperatorTable.SIGN,
SqlStdOperatorTable.ROUND,
SqlStdOperatorTable.PI,
ScalarSqlFunctions.E,
SqlStdOperatorTable.RAND,
SqlStdOperatorTable.RAND_INTEGER,
ScalarSqlFunctions.CONCAT,
ScalarSqlFunctions.CONCAT_WS,
SqlStdOperatorTable.REPLACE,
ScalarSqlFunctions.BIN,
ScalarSqlFunctions.HEX,
ScalarSqlFunctions.LOG,
ScalarSqlFunctions.LPAD,
ScalarSqlFunctions.RPAD,
ScalarSqlFunctions.MD5,
ScalarSqlFunctions.SHA1,
ScalarSqlFunctions.SINH,
ScalarSqlFunctions.SHA224,
ScalarSqlFunctions.SHA256,
ScalarSqlFunctions.SHA384,
ScalarSqlFunctions.SHA512,
ScalarSqlFunctions.SHA2,
ScalarSqlFunctions.REGEXP_EXTRACT,
ScalarSqlFunctions.FROM_BASE64,
ScalarSqlFunctions.TO_BASE64,
ScalarSqlFunctions.UUID,
ScalarSqlFunctions.LTRIM,
ScalarSqlFunctions.RTRIM,
ScalarSqlFunctions.REPEAT,
ScalarSqlFunctions.REGEXP_REPLACE,
SqlStdOperatorTable.TRUNCATE,
// TIME FUNCTIONS
SqlStdOperatorTable.YEAR,
SqlStdOperatorTable.QUARTER,
SqlStdOperatorTable.MONTH,
SqlStdOperatorTable.WEEK,
SqlStdOperatorTable.HOUR,
SqlStdOperatorTable.MINUTE,
SqlStdOperatorTable.SECOND,
SqlStdOperatorTable.DAYOFYEAR,
SqlStdOperatorTable.DAYOFMONTH,
SqlStdOperatorTable.DAYOFWEEK,
SqlStdOperatorTable.TIMESTAMP_ADD,
SqlStdOperatorTable.TIMESTAMP_DIFF,
// MATCH_RECOGNIZE
SqlStdOperatorTable.FIRST,
SqlStdOperatorTable.LAST,
SqlStdOperatorTable.PREV,
SqlStdOperatorTable.FINAL,
SqlStdOperatorTable.RUNNING,
BasicOperatorTable.MATCH_PROCTIME,
BasicOperatorTable.MATCH_ROWTIME,
// EXTENSIONS
BasicOperatorTable.TUMBLE,
BasicOperatorTable.HOP,
BasicOperatorTable.SESSION,
BasicOperatorTable.TUMBLE_START,
BasicOperatorTable.TUMBLE_END,
BasicOperatorTable.HOP_START,
BasicOperatorTable.HOP_END,
BasicOperatorTable.SESSION_START,
BasicOperatorTable.SESSION_END,
BasicOperatorTable.TUMBLE_PROCTIME,
BasicOperatorTable.TUMBLE_ROWTIME,
BasicOperatorTable.HOP_PROCTIME,
BasicOperatorTable.HOP_ROWTIME,
BasicOperatorTable.SESSION_PROCTIME,
BasicOperatorTable.SESSION_ROWTIME
)
builtInSqlOperators.foreach(register)
override def lookupOperatorOverloads(
opName: SqlIdentifier,
category: SqlFunctionCategory,
syntax: SqlSyntax,
operatorList: util.List[SqlOperator],
nameMatcher: SqlNameMatcher): Unit = {
// set caseSensitive=false to make sure the behavior is same with before.
super.lookupOperatorOverloads(
opName, category, syntax, operatorList, SqlNameMatchers.withCaseSensitive(false))
}
}
object BasicOperatorTable {
/**
* We need custom group auxiliary functions in order to support nested windows.
*/
val TUMBLE: SqlGroupedWindowFunction = new SqlGroupedWindowFunction(
// The TUMBLE group function was hard code to $TUMBLE in CALCITE-3382.
"$TUMBLE",
SqlKind.TUMBLE,
null,
OperandTypes.or(OperandTypes.DATETIME_INTERVAL, OperandTypes.DATETIME_INTERVAL_TIME)) {
override def getAuxiliaryFunctions: JList[SqlGroupedWindowFunction] =
Seq(
TUMBLE_START,
TUMBLE_END,
TUMBLE_ROWTIME,
TUMBLE_PROCTIME)
}
val TUMBLE_START: SqlGroupedWindowFunction = TUMBLE.auxiliary(SqlKind.TUMBLE_START)
val TUMBLE_END: SqlGroupedWindowFunction = TUMBLE.auxiliary(SqlKind.TUMBLE_END)
val TUMBLE_ROWTIME: SqlGroupedWindowFunction =
new SqlGroupedWindowFunction(
"TUMBLE_ROWTIME",
SqlKind.OTHER_FUNCTION,
TUMBLE,
// ensure that returned rowtime is always NOT_NULLABLE
ReturnTypes.cascade(ReturnTypes.ARG0, SqlTypeTransforms.TO_NOT_NULLABLE),
null,
TUMBLE.getOperandTypeChecker,
SqlFunctionCategory.SYSTEM)
val TUMBLE_PROCTIME: SqlGroupedWindowFunction =
TUMBLE.auxiliary("TUMBLE_PROCTIME", SqlKind.OTHER_FUNCTION)
val HOP: SqlGroupedWindowFunction = new SqlGroupedWindowFunction(
SqlKind.HOP,
null,
OperandTypes.or(
OperandTypes.DATETIME_INTERVAL_INTERVAL,
OperandTypes.DATETIME_INTERVAL_INTERVAL_TIME)) {
override def getAuxiliaryFunctions: _root_.java.util.List[SqlGroupedWindowFunction] =
Seq(
HOP_START,
HOP_END,
HOP_ROWTIME,
HOP_PROCTIME)
}
val HOP_START: SqlGroupedWindowFunction = HOP.auxiliary(SqlKind.HOP_START)
val HOP_END: SqlGroupedWindowFunction = HOP.auxiliary(SqlKind.HOP_END)
val HOP_ROWTIME: SqlGroupedWindowFunction =
new SqlGroupedWindowFunction(
"HOP_ROWTIME",
SqlKind.OTHER_FUNCTION,
HOP,
// ensure that returned rowtime is always NOT_NULLABLE
ReturnTypes.cascade(ReturnTypes.ARG0, SqlTypeTransforms.TO_NOT_NULLABLE),
null,
HOP.getOperandTypeChecker,
SqlFunctionCategory.SYSTEM)
val HOP_PROCTIME: SqlGroupedWindowFunction = HOP.auxiliary("HOP_PROCTIME", SqlKind.OTHER_FUNCTION)
val SESSION: SqlGroupedWindowFunction = new SqlGroupedWindowFunction(
SqlKind.SESSION,
null,
OperandTypes.or(OperandTypes.DATETIME_INTERVAL, OperandTypes.DATETIME_INTERVAL_TIME)) {
override def getAuxiliaryFunctions: _root_.java.util.List[SqlGroupedWindowFunction] =
Seq(
SESSION_START,
SESSION_END,
SESSION_ROWTIME,
SESSION_PROCTIME)
}
val SESSION_START: SqlGroupedWindowFunction = SESSION.auxiliary(SqlKind.SESSION_START)
val SESSION_END: SqlGroupedWindowFunction = SESSION.auxiliary(SqlKind.SESSION_END)
val SESSION_ROWTIME: SqlGroupedWindowFunction =
new SqlGroupedWindowFunction(
"SESSION_ROWTIME",
SqlKind.OTHER_FUNCTION,
SESSION,
// ensure that returned rowtime is always NOT_NULLABLE
ReturnTypes.cascade(ReturnTypes.ARG0, SqlTypeTransforms.TO_NOT_NULLABLE),
null,
SESSION.getOperandTypeChecker,
SqlFunctionCategory.SYSTEM)
val SESSION_PROCTIME: SqlGroupedWindowFunction =
SESSION.auxiliary("SESSION_PROCTIME", SqlKind.OTHER_FUNCTION)
private val RowTimeTypeInference = new TimeIndicatorReturnType(true)
private val ProcTimeTypeInference = new TimeIndicatorReturnType(false)
private class TimeIndicatorReturnType(isRowTime: Boolean) extends SqlReturnTypeInference {
override def inferReturnType(opBinding: SqlOperatorBinding): RelDataType = {
val flinkTypeFactory = opBinding.getTypeFactory.asInstanceOf[FlinkTypeFactory]
if (isRowTime) {
flinkTypeFactory.createRowtimeIndicatorType()
} else {
flinkTypeFactory.createProctimeIndicatorType()
}
}
}
val MATCH_ROWTIME: SqlFunction =
new SqlFunction(
"MATCH_ROWTIME",
SqlKind.OTHER_FUNCTION,
RowTimeTypeInference,
null,
OperandTypes.NILADIC,
SqlFunctionCategory.MATCH_RECOGNIZE
) {
override def isDeterministic: Boolean = true
}
val MATCH_PROCTIME: SqlFunction =
new SqlFunction(
"MATCH_PROCTIME",
SqlKind.OTHER_FUNCTION,
ProcTimeTypeInference,
null,
OperandTypes.NILADIC,
SqlFunctionCategory.MATCH_RECOGNIZE
) {
override def isDeterministic: Boolean = false
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/catalog/BasicOperatorTable.scala | Scala | apache-2.0 | 13,006 |
object Monoids3 extends App {
trait Monoid[A] {
def mappend(a1: A, a2: A): A
def mzero: A
}
object Monoid {
implicit val IntMonoid: Monoid[Int] = new Monoid[Int] {
def mappend(a: Int, b: Int): Int = a + b
def mzero: Int = 0
}
implicit val StringMonoid: Monoid[String] = new Monoid[String] {
def mappend(a: String, b: String): String = a + b
def mzero: String = ""
}
}
def sum[A: Monoid](xs: List[A]): A = {
val m = implicitly[Monoid[A]]
xs.foldLeft(m.mzero)(m.mappend)
}
val r = sum(List("a", "b", "c"))
println(r)
} | diegopacheco/scala-playground | scalaz/src/main/scala/Monoids3.scala | Scala | unlicense | 602 |
package functionalops.systemz
import scalaz._
import Scalaz._
trait ManagementInstances extends ManagementClasses {
// TODO Define default/package level typeclass instances here
}
| functionalops/systemz | management/src/main/scala/functionalops/instances.scala | Scala | bsd-3-clause | 184 |
package code
package snippet
import net.liftweb._
import http.js._
import http.js.JsCmds._
import http.js.JE._
import net.liftmodules.extras.Bootstrap3Screen
/*
* Base all LiftScreens off this. Currently configured to use bootstrap 3.
*/
abstract class BaseScreen extends Bootstrap3Screen {
override def defaultToAjax_? = true
}
| eltimn/lift-poly-example | src/main/scala/code/snippet/BaseScreen.scala | Scala | apache-2.0 | 336 |
package blog
import skinny.orm._, feature._
import scalikejdbc._, SQLInterpolation._
import org.joda.time._
case class Post(
id: Long,
title: String,
body: String,
viewCount: BigDecimal,
tags: Seq[Tag] = Nil,
createdAt: DateTime,
updatedAt: Option[DateTime] = None)
object Post extends SkinnyCRUDMapper[Post] with TimestampsFeature[Post] {
override val connectionPoolName = 'blog
override val tableName = "posts"
override val defaultAlias = createAlias("p")
val tagsRef = hasManyThrough[Tag](
through = PostTag,
many = Tag,
merge = (p, t) => p.copy(tags = t)) // .byDefault
override def extract(rs: WrappedResultSet, rn: ResultName[Post]): Post = new Post(
id = rs.get(rn.id),
title = rs.get(rn.title),
body = rs.get(rn.body),
viewCount = rs.get(rn.viewCount),
createdAt = rs.get(rn.createdAt),
updatedAt = rs.get(rn.updatedAt)
)
} | BlackPrincess/skinny-framework | orm/src/test/scala/blog/Post.scala | Scala | mit | 898 |
package com.rasterfoundry.api.config
import com.rasterfoundry.api.utils.Config
import com.rasterfoundry.database.FeatureFlagDao
import com.rasterfoundry.datamodel.FeatureFlag
import doobie.free.connection.ConnectionIO
import io.circe.generic.JsonCodec
@JsonCodec
final case class AngularConfig(
clientId: String,
clientEnvironment: String,
auth0Domain: String,
rollbarClientToken: String,
intercomAppId: String,
featureFlags: Seq[FeatureFlag],
tileServerLocation: String,
dropboxClientId: String
)
object AngularConfigService extends Config {
def getConfig(): ConnectionIO[AngularConfig] =
for {
features <- FeatureFlagDao.query.list
} yield
AngularConfig(
auth0ClientId,
clientEnvironment,
auth0Domain,
rollbarClientToken,
intercomAppId,
features,
tileServerLocation,
dropboxClientId
)
}
| aaronxsu/raster-foundry | app-backend/api/src/main/scala/config/Config.scala | Scala | apache-2.0 | 916 |
package com.sidesna.iicfiltering.managedata
import org.apache.spark.sql.{SQLContext, DataFrame}
/**
* Created by Miguel A. Sotomayor
* Date: 18/10/15
*
*
*/
trait ExtractionBase {
def getCustomers(sqlContext: SQLContext): DataFrame
def getProducts(sqlContext: SQLContext): DataFrame
def getPurchases(sqlContext: SQLContext): DataFrame
}
| masfworld/IICFiltering | src/main/scala/com/sidesna/iicfiltering/managedata/ExtractionBase.scala | Scala | mit | 350 |
/*
* VideoUpdateProgressJob.scala
*
* Copyright (c) 2014 Ronald Kurniawan. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301 USA
*/
package net.fluxo.dd
import org.quartz.{JobExecutionException, JobExecutionContext, Job}
import java.io.{File, IOException}
import org.apache.log4j.Level
import scala.util.control.Breaks._
import org.json.simple.JSONObject
import org.apache.commons.io.FileUtils
/**
* This class monitors and updates the progress of youtube-dl downloads on database, so it can be queried by clients.
*
* @author Ronald Kurniawan (viper)
* @version 0.4.5, 29/05/14.
* @see org.quartz.Job
*/
class VideoUpdateProgressJob extends Job {
/**
* Monitor the progress of downloads and update the records in the database. It restarts any unfinished downloads, then
* checks for any active downloads for their progresses. Lastly, it checks for finished downloads , update their status
* in the database and move the files into target directory.
*
* @param context a <code>org.quartz.JobExecutionContext</code> object
* @throws org.quartz.JobExecutionException JobExecutionException
*/
@throws(classOf[JobExecutionException])
override def execute(context: JobExecutionContext) {
try {
OVideoP restartDownload()
val videoIterator = (OVideoP ActiveProcesses) iterator()
while (videoIterator.hasNext) {
val vidObj = videoIterator.next
val tGID = vidObj.VideoTaskGid.orNull
// if there's no extension defined in the video tracker object, try to obtain one from the json file
val infoObject = new File(tGID + ".info.json")
if (!((vidObj VideoExt) isDefined) && infoObject.exists) {
val bestFormat = OUtils extractValueFromJSONFile(infoObject, "format_id")
val formatArray = OUtils extractArrayFromJSONObject(infoObject, "formats")
val formatIterator = formatArray.iterator
breakable {
while (formatIterator.hasNext) {
val f = formatIterator.next.asInstanceOf[JSONObject]
if ((f get "format_id").asInstanceOf[String].equals(bestFormat)) {
OVideoP updateVideoExtension(tGID, (f get "ext").asInstanceOf[String])
break()
}
}
}
}
if (!((vidObj VideoTitle) isDefined) && infoObject.exists) {
val title = OUtils extractValueFromJSONFile(infoObject, "stitle")
OVideoP updateVideoTitle(tGID, title)
}
// look for the ".part" file
val partFile = new File(tGID + "." + vidObj.VideoExt.getOrElse("") + ".part")
val fullFile = new File(tGID + "." + vidObj.VideoExt.getOrElse(""))
// if "part" file exists, calculate the download progress
if (partFile.exists) {
val downloaded = FileUtils sizeOf partFile
if ((vidObj LastDownloadedBytes) == downloaded) {
vidObj.StallCount_=((vidObj StallCount) + 1)
if ((vidObj StallCount) > 3) {
// the process has stalled for more than 30 seconds, kill it!
OVideoP killProcess ((vidObj VideoTaskGid) getOrElse "")
}
} else {
vidObj.StallCount_=(0)
val totalFileLength = vidObj.VideoTotalLength
vidObj.LastDownloadedBytes_=(totalFileLength)
val fileName = ((vidObj VideoTitle) getOrElse "") + "." + ((vidObj VideoExt) getOrElse "")
DbControl updateVideoTask(tGID, OVideoP getOwner tGID, fileName, "active", totalFileLength, downloaded)
}
} else if (fullFile.exists) {
// if full file exists, that means the download has finished. Rename and move the file to target dir, then cleanup
val targetVideoFile = new File(OUtils.readConfig.DownloadDir.getOrElse("") + "/" + ((vidObj VideoTitle) getOrElse "") +
"." + ((vidObj VideoExt) getOrElse ""))
FileUtils copyFile(fullFile, targetVideoFile)
FileUtils forceDelete fullFile
FileUtils forceDelete infoObject
DbControl finishVideoTask(FileUtils sizeOf targetVideoFile, tGID)
OVideoP removeFromList tGID
}
}
} catch {
case ioe: IOException =>
LogWriter writeLog("Error performing I/O operation: " + (ioe getMessage), Level.ERROR)
}
}
}
| fluxodesign/DownloadDaemon | src/main/scala/net/fluxo/dd/VideoUpdateProgressJob.scala | Scala | gpl-2.0 | 4,737 |
package sbtfmppresolver
case class Args(underlying: Seq[(String, String)]) {
def get(key: String): Option[(String, String)] = {
underlying.find(_._1 == key)
}
def valueOf(key: String): String = {
get(key).map(_._2) match {
case None | Some("") => throw new IllegalArgumentException(s"Please provide a value for $key")
case Some(value) => value
}
}
def contains(key: String): Boolean = {
underlying.map(_._1).contains(key)
}
def containsAny(keys: String*): Boolean = {
val targetKeys = keys.toSet
val currentKeys = underlying.map(_._1).toSet
(targetKeys & currentKeys).nonEmpty
}
def replace(key: String, value: String): Args = {
Args(
underlying.map {
case (currentKey, _) if key == currentKey => (key, value)
case keyValue => keyValue
}
)
}
def toArray: Array[String] = {
underlying.flatMap { case (key, value) => Array(key, value) }.toArray
}
}
object Args {
def apply(args: Array[String]): Args = {
fromString(args.mkString(" "))
}
def fromString(args: String): Args = {
ArgsParser(args)
}
}
| jeffreyolchovy/sbt-fmpp-resolver | resolver/src/main/scala/sbtfmppresolver/Args.scala | Scala | apache-2.0 | 1,125 |
package cromwell.server
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
import cromwell.engine.backend.{Backend, CromwellBackend}
import cromwell.engine.workflow.WorkflowManagerActor
trait WorkflowManagerSystem {
protected def systemName = "cromwell-system"
protected def newActorSystem(): ActorSystem = ActorSystem(systemName)
implicit final lazy val actorSystem = newActorSystem()
def shutdownActorSystem(): Unit = {
actorSystem.shutdown()
}
def backendType: String = ConfigFactory.load.getConfig("backend").getString("backend")
lazy val backend: Backend = CromwellBackend.initBackend(backendType, actorSystem)
// For now there's only one WorkflowManagerActor so no need to dynamically name it
lazy val workflowManagerActor = actorSystem.actorOf(WorkflowManagerActor.props(backend), "WorkflowManagerActor")
}
| dgtester/cromwell | src/main/scala/cromwell/server/WorkflowManagerSystem.scala | Scala | bsd-3-clause | 863 |
/*
* Contributions:
* Jean-Francois GUENA: implement "suffixed collection name" feature (issue #39 partially fulfilled)
* ...
*/
package akka.contrib.persistence.mongodb
import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import reactivemongo.api._
import reactivemongo.api.bson.collection.{BSONCollection, BSONSerializationPack}
import reactivemongo.api.bson.{BSONDocument, _}
import reactivemongo.api.commands.{CommandError, WriteConcern}
import reactivemongo.api.indexes.{Index, IndexType}
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.util.{Failure, Success}
object RxMongoPersistenceDriver {
import MongoPersistenceDriver._
def toWriteConcern(writeSafety: WriteSafety, wtimeout: Duration, fsync: Boolean): WriteConcern = (writeSafety, wtimeout.toMillis.toInt, fsync) match {
case (Unacknowledged, wt, f) =>
WriteConcern.Unacknowledged.copy(fsync = f, wtimeout = Option(wt))
case (Acknowledged, wt, f) =>
WriteConcern.Acknowledged.copy(fsync = f, wtimeout = Option(wt))
case (Journaled, wt, _) =>
WriteConcern.Journaled.copy(wtimeout = Option(wt))
case (ReplicaAcknowledged, wt, f) =>
WriteConcern.ReplicaAcknowledged(2, wt, !f)
}
}
class RxMongoDriverProvider(actorSystem: ActorSystem) {
val driver: AsyncDriver = {
val md = AsyncDriver()
actorSystem.registerOnTermination(driver.close()(actorSystem.dispatcher))
md
}
}
class RxMongoDriver(system: ActorSystem, config: Config, driverProvider: RxMongoDriverProvider) extends MongoPersistenceDriver(system, config) {
import RxMongoPersistenceDriver._
val RxMongoSerializers: RxMongoSerializers = RxMongoSerializersExtension(system)
// Collection type
override type C = BSONCollection
override type D = BSONDocument
private def rxSettings = RxMongoDriverSettings(system.settings)
private[mongodb] val driver = driverProvider.driver
private[this] lazy val parsedMongoUri = MongoConnection.parseURI(mongoUri) match {
case Success(parsed) => parsed
case Failure(throwable) => throw throwable
}
implicit val waitFor: FiniteDuration = 10.seconds
lazy val connection: Future[MongoConnection] =
driver.connect(parsedMongoUri)
def closeConnections(): Unit = {
driver.close(5.seconds)
()
}
def dbName: String = databaseName.getOrElse(parsedMongoUri.db.getOrElse(DEFAULT_DB_NAME))
def failoverStrategy: FailoverStrategy = {
val rxMSettings = rxSettings
FailoverStrategy(
initialDelay = rxMSettings.InitialDelay,
retries = rxMSettings.Retries,
delayFactor = rxMSettings.GrowthFunction)
}
def db: Future[DefaultDB] =
for {
conn <- connection
db <- conn.database(name = dbName, failoverStrategy = failoverStrategy)
} yield db
override def collection(name: String): Future[BSONCollection] =
db.map(_[BSONCollection](name))
override def ensureCollection(name: String): Future[BSONCollection] =
ensureCollection(name, _.create())
private[this] def ensureCollection(name: String, collectionCreator: BSONCollection => Future[Unit]): Future[BSONCollection] =
for {
coll <- collection(name)
_ <- collectionCreator(coll).recover { case CommandError.Code(MongoErrors.NamespaceExists.code) => coll }
} yield coll
def journalWriteConcern: WriteConcern = toWriteConcern(journalWriteSafety, journalWTimeout, journalFsync)
def snapsWriteConcern: WriteConcern = toWriteConcern(snapsWriteSafety, snapsWTimeout, snapsFsync)
def metadataWriteConcern: WriteConcern = toWriteConcern(journalWriteSafety, journalWTimeout, journalFsync)
override def ensureIndex(indexName: String, unique: Boolean, sparse: Boolean, keys: (String, Int)*): C => Future[C] = {
collection =>
val ky = keys.toSeq.map { case (f, o) => f -> (if (o > 0) IndexType.Ascending else IndexType.Descending) }
collection.indexesManager.ensure(Index(BSONSerializationPack)(
key = ky,
background = true,
unique = unique,
sparse = sparse,
name = Some(indexName),
dropDups = true,
version = None,
partialFilter = None,
options = BSONDocument.empty
)).map(_ => collection)
}
def collectionNames: Future[List[String]] =
for {
database <- db
names <- database.collectionNames
} yield names
override def cappedCollection(name: String): Future[C] =
for {
cc <- ensureCollection(name, _.createCapped(realtimeCollectionSize, None))
s <- cc.stats
_ <- if (s.capped) Future.successful(()) else cc.convertToCapped(realtimeCollectionSize, None)
} yield cc
def getMongoVersionFromBuildInfo: Future[String] =
db.flatMap { database =>
database.runCommand(BSONDocument("buildInfo" -> 1), FailoverStrategy())
.one[BSONDocument](ReadPreference.Primary)
.map(_.getAsOpt[BSONString]("version").getOrElse(BSONString("")).value)
}
def removeEmptyCollection(collection: C, indexName: String): Future[Unit] = {
for {
// first count, may be inaccurate in cluster environment
firstCount <- collection.count(None, None, 0, None, ReadConcern.Local)
// just to be sure: second count, always accurate and should be fast as we are pretty sure the result is zero
secondCount <- if (firstCount == 0L) {
for {
version <- mongoVersion
count <- if (version.atLeast(ServerVersion.`3.6.0`)) {
collection.count(None, None, 0, Some(collection.hint(indexName)), ReadConcern.Majority)
} else {
collection.count(None, None, 0, None, ReadConcern.Majority)
}
} yield count
} else Future.successful(firstCount)
if secondCount == 0L
_ <- collection.drop(failIfNotFound = false)
} yield ()
}
}
class RxMongoPersistenceExtension(actorSystem: ActorSystem) extends MongoPersistenceExtension(actorSystem) {
val driverProvider: RxMongoDriverProvider = new RxMongoDriverProvider(actorSystem)
override def configured(config: Config): Configured = Configured(config)
case class Configured(config: Config) extends ConfiguredExtension {
lazy val driver = new RxMongoDriver(actorSystem, config, driverProvider)
override lazy val journaler: MongoPersistenceJournallingApi = new RxMongoJournaller(driver) with MongoPersistenceJournalMetrics {
override def driverName = "rxmongo"
}
override lazy val snapshotter = new RxMongoSnapshotter(driver)
override lazy val readJournal = new RxMongoReadJournaller(driver)
}
}
object RxMongoDriverSettings {
def apply(systemSettings: ActorSystem.Settings): RxMongoDriverSettings = {
val fullName = s"${getClass.getPackage.getName}.rxmongo"
val systemConfig = systemSettings.config
systemConfig.checkValid(ConfigFactory.defaultReference(), fullName)
new RxMongoDriverSettings(systemConfig.getConfig(fullName))
}
}
class RxMongoDriverSettings(val config: Config) {
config.checkValid(config, "failover")
private val failover = config.getConfig("failover")
def InitialDelay: FiniteDuration = failover.getFiniteDuration("initialDelay")
def Retries: Int = failover.getInt("retries")
def Growth: String = failover.getString("growth")
def ConstantGrowth: Boolean = Growth == "con"
def LinearGrowth: Boolean = Growth == "lin"
def ExponentialGrowth: Boolean = Growth == "exp"
def Factor: Double = failover.getDouble("factor")
def GrowthFunction: Int => Double = Growth match {
case "con" => (_: Int) => Factor
case "lin" => (i: Int) => i.toDouble
case "exp" => (i: Int) => math.pow(i.toDouble, Factor)
}
}
| JeanFrancoisGuena/akka-persistence-mongo | rxmongo/src/main/scala/akka/contrib/persistence/mongodb/RxMongoPersistenceExtension.scala | Scala | apache-2.0 | 7,741 |
package net.fwbrasil.activate.storage.relational
import java.util.regex.Pattern
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
import net.fwbrasil.activate.storage.marshalling.StorageValue
import com.google.common.collect.MapMaker
import com.google.common.cache.CacheBuilder
trait QlStatement extends Serializable {
val statement: String
val restrictionQuery: Option[(String, Int)]
val bindsList: List[Map[String, StorageValue]]
val entityClass: Class[_]
def expectedNumbersOfAffectedRowsOption: List[Option[Int]]
lazy val (indexedStatement, columns) = {
val pattern = Pattern.compile("(:[a-zA-Z0-9_]*)")
var matcher = pattern.matcher(statement)
var result = statement
val columns = ListBuffer[String]()
while (matcher.find) {
val group = matcher.group
result = matcher.replaceFirst("?")
matcher = pattern.matcher(result)
columns += group.substring(1)
}
(result, columns.toList)
}
def valuesList(columns: List[String]) =
for (binds <- bindsList)
yield columns.map(binds(_))
def valuesList: List[List[StorageValue]] =
valuesList(columns)
override def toString = statement + restrictionQuery.map(" restriction " + _).getOrElse("")
}
class NormalQlStatement(
val statement: String,
val entityClass: Class[_],
val binds: Map[String, StorageValue] = Map(),
val restrictionQuery: Option[(String, Int)] = None,
val expectedNumberOfAffectedRowsOption: Option[Int] = None)
extends QlStatement {
override def expectedNumbersOfAffectedRowsOption =
List(expectedNumberOfAffectedRowsOption)
val bindsList = List(binds)
def isCompatible(other: NormalQlStatement) =
statement == other.statement &&
restrictionQuery == other.restrictionQuery
}
class BatchQlStatement(
val statement: String,
val entityClass: Class[_],
val bindsList: List[Map[String, StorageValue]],
val restrictionQuery: Option[(String, Int)],
override val expectedNumbersOfAffectedRowsOption: List[Option[Int]])
extends QlStatement
object BatchQlStatement {
@tailrec def group(qlStatements: List[NormalQlStatement], batchLimit: Int, grouped: List[QlStatement] = List()): List[QlStatement] = {
if (batchLimit <= 1)
qlStatements
else if (qlStatements.isEmpty)
grouped
else {
val (head :: tail) = qlStatements
var batchSize = 0
val (tailToGroup, others) = tail.span(each => {
batchSize += 1
each.isCompatible(head) &&
batchSize <= batchLimit
})
if (tailToGroup.isEmpty)
group(others, batchLimit, grouped ++ List(head))
else {
val toGroup = List(head) ++ tailToGroup
val expectedNumberOfAffectedRows = toGroup.map(_.expectedNumbersOfAffectedRowsOption).flatten
val batch =
new BatchQlStatement(
head.statement,
head.entityClass,
toGroup.map(_.binds),
head.restrictionQuery,
expectedNumberOfAffectedRows)
group(others, batchLimit, grouped ++ List(batch))
}
}
}
}
| xdevelsistemas/activate | activate-core/src/main/scala/net/fwbrasil/activate/storage/relational/QlStatement.scala | Scala | lgpl-2.1 | 3,440 |
//: ----------------------------------------------------------------------------
//: Copyright (C) 2015 Verizon. All Rights Reserved.
//:
//: Licensed under the Apache License, Version 2.0 (the "License");
//: you may not use this file except in compliance with the License.
//: You may obtain a copy of the License at
//:
//: http://www.apache.org/licenses/LICENSE-2.0
//:
//: Unless required by applicable law or agreed to in writing, software
//: distributed under the License is distributed on an "AS IS" BASIS,
//: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//: See the License for the specific language governing permissions and
//: limitations under the License.
//:
//: ----------------------------------------------------------------------------
package knobs
sealed trait Interpolation
case class Literal(text: String) extends Interpolation
case class Interpolate(text: String) extends Interpolation
| rintcius/knobs | core/src/main/scala/knobs/Interpolation.scala | Scala | apache-2.0 | 965 |
package drt.client.services
import utest._
object StaffQueueAssignmentTests extends TestSuite {
def tests = TestSuite {
'DeskRecHandler - {
}
}
}
| somanythings/drt-scalajs-spa-exploration | client/src/test/scala/spatutorial/client/services/StaffQueueAssignmentTests.scala | Scala | apache-2.0 | 161 |
package aecor.schedule
import java.time._
import java.time.format.DateTimeFormatter
import aecor.schedule.CassandraScheduleEntryRepository.{ Queries, TimeBucket }
import aecor.schedule.ScheduleEntryRepository.ScheduleEntry
import aecor.util.effect._
import akka.NotUsed
import akka.persistence.cassandra._
import akka.persistence.cassandra.session.scaladsl.CassandraSession
import akka.stream.Materializer
import akka.stream.scaladsl.{ Sink, Source }
import cats.Monad
import cats.data.Kleisli
import cats.effect.Effect
import cats.implicits._
import com.datastax.driver.core.Row
import com.datastax.driver.extras.codecs.jdk8.InstantCodec
import org.slf4j.LoggerFactory
class CassandraScheduleEntryRepository[F[_]](cassandraSession: CassandraSession, queries: Queries)(
implicit materializer: Materializer,
F: Effect[F]
) extends ScheduleEntryRepository[F] {
private val log = LoggerFactory.getLogger(classOf[CassandraScheduleEntryRepository[F]])
private val preparedInsertEntry = cassandraSession.prepare(queries.insertEntry)
private val preparedSelectEntries = cassandraSession.prepare(queries.selectEntries)
private val preparedSelectEntry = cassandraSession.prepare(queries.selectEntry)
override def insertScheduleEntry(id: ScheduleBucketId,
entryId: String,
dueDate: LocalDateTime): F[Unit] =
F.fromFuture(preparedInsertEntry)
.map(
_.bind()
.setString("schedule_name", id.scheduleName)
.setString("schedule_bucket", id.scheduleBucket)
.setString("entry_id", entryId)
.setString("time_bucket", TimeBucket(dueDate.toLocalDate).key)
.set("due_date", dueDate.toInstant(ZoneOffset.UTC), classOf[Instant])
.setBool("fired", false)
)
.flatMap(x => F.fromFuture(cassandraSession.executeWrite(x)))
.void
override def markScheduleEntryAsFired(id: ScheduleBucketId, entryId: String): F[Unit] =
F.fromFuture(preparedSelectEntry)
.map(
_.bind()
.setString("schedule_name", id.scheduleName)
.setString("schedule_bucket", id.scheduleBucket)
.setString("entry_id", entryId)
)
.flatMap(x => F.fromFuture(cassandraSession.selectOne(x)))
.flatMap {
case Some(row) =>
F.fromFuture(preparedInsertEntry)
.map(
_.bind()
.setString("schedule_name", id.scheduleName)
.setString("schedule_bucket", id.scheduleBucket)
.setString("entry_id", entryId)
.setString("time_bucket", row.getString("time_bucket"))
.setTimestamp("due_date", row.getTimestamp("due_date"))
.setBool("fired", true)
)
.flatMap(x => F.fromFuture(cassandraSession.executeWrite(x)))
.void
case None =>
().pure[F]
}
private def getBucket(timeBucket: TimeBucket,
from: LocalDateTime,
to: LocalDateTime): Source[ScheduleEntry, NotUsed] =
Source
.single(())
.mapAsync(1) { _ =>
preparedSelectEntries
}
.map(
_.bind()
.setString("time_bucket", timeBucket.key)
.set("from_due_date", from.atOffset(ZoneOffset.UTC).toInstant, classOf[Instant])
.set("to_due_date", to.atOffset(ZoneOffset.UTC).toInstant, classOf[Instant])
)
.flatMapConcat(cassandraSession.select)
.map(fromRow)
.mapMaterializedValue(_ => NotUsed)
.named(s"getBucket($timeBucket, $from, $to)")
private def getEntries(
from: LocalDateTime,
to: LocalDateTime
): Source[ScheduleEntryRepository.ScheduleEntry, NotUsed] =
if (to isBefore from) {
getEntries(to, from)
} else {
def rec(bucket: TimeBucket): Source[ScheduleEntry, NotUsed] = {
log.debug("Querying bucket [{}] from [{}] to [{}]", bucket, from, to)
getBucket(bucket, from, to).concat {
Source.lazily { () =>
val nextBucket = bucket.next
if (nextBucket.isAfter(to.toLocalDate)) {
Source.empty
} else {
rec(nextBucket)
}
}
}
}
rec(TimeBucket(from.toLocalDate)).named(s"getEntries($from, $to)")
}
override def processEntries(from: LocalDateTime, to: LocalDateTime, parallelism: Int)(
f: (ScheduleEntry) => F[Unit]
): F[Option[ScheduleEntry]] =
F.fromFuture {
getEntries(from, to)
.mapAsync(parallelism)(x => f(x).as(x).unsafeToFuture())
.runWith(Sink.lastOption)
}
private def fromRow(row: Row): ScheduleEntry =
ScheduleEntry(
ScheduleBucketId(row.getString("schedule_name"), row.getString("schedule_bucket")),
row.getString("entry_id"),
LocalDateTime.ofInstant(row.get("due_date", classOf[Instant]), ZoneOffset.UTC),
row.getString("time_bucket"),
row.getBool("fired")
)
}
object CassandraScheduleEntryRepository {
def apply[F[_]: Effect](cassandraSession: CassandraSession, queries: Queries)(
implicit materializer: Materializer
): CassandraScheduleEntryRepository[F] =
new CassandraScheduleEntryRepository(cassandraSession, queries)
final case class TimeBucket(day: LocalDate, key: String) {
def next: TimeBucket =
TimeBucket(day.plusDays(1))
def isAfter(other: LocalDate): Boolean =
day.isAfter(other)
def isBefore(other: LocalDate): Boolean =
day.isBefore(other)
override def toString: String = key
}
object TimeBucket {
private val timeBucketFormatter: DateTimeFormatter = DateTimeFormatter.ofPattern("yyyyMMdd")
def apply(epochTimestamp: Long): TimeBucket = {
val time = LocalDateTime.ofInstant(Instant.ofEpochMilli(epochTimestamp), ZoneOffset.UTC)
apply(time.toLocalDate)
}
def apply(day: LocalDate): TimeBucket = {
val key = day.format(timeBucketFormatter)
apply(day, key)
}
}
final case class Queries(keyspace: String, tableName: String) {
val materializedViewName = s"${tableName}_by_time_bucket"
val createTable: String =
s"""
CREATE TABLE IF NOT EXISTS $keyspace.$tableName (
schedule_name text,
schedule_bucket text,
entry_id text,
time_bucket text,
due_date timestamp,
fired boolean,
PRIMARY KEY ((schedule_name, schedule_bucket), entry_id, time_bucket, due_date)
)
"""
val createMaterializedView: String =
s"""
CREATE MATERIALIZED VIEW IF NOT EXISTS $keyspace.$materializedViewName
AS SELECT time_bucket, schedule_name, schedule_bucket, entry_id, due_date, fired FROM schedule_entries
WHERE
time_bucket IS NOT NULL
AND due_date IS NOT NULL
AND schedule_name IS NOT NULL
AND schedule_bucket IS NOT NULL
AND entry_id IS NOT NULL
PRIMARY KEY (time_bucket, due_date, schedule_name, schedule_bucket, entry_id)
WITH CLUSTERING ORDER BY (due_date ASC);
"""
def insertEntry: String =
s"""
INSERT INTO $keyspace.$tableName (schedule_name, schedule_bucket, entry_id, time_bucket, due_date, fired)
VALUES (:schedule_name, :schedule_bucket, :entry_id, :time_bucket, :due_date, :fired);
"""
def selectEntry: String =
s"""
SELECT * FROM $keyspace.$tableName
WHERE schedule_name = :schedule_name
AND schedule_bucket = :schedule_bucket
AND entry_id = :entry_id
"""
def selectEntries: String =
s"""
SELECT * FROM $keyspace.$materializedViewName
WHERE
time_bucket = :time_bucket
AND due_date > :from_due_date
AND due_date <= :to_due_date
ORDER BY due_date ASC;
"""
}
def init[F[_]](queries: Queries)(implicit F: Monad[F]): Session.Init[F] = Kleisli { session =>
for {
_ <- session.execute(queries.createTable)
_ <- session.execute(queries.createMaterializedView)
_ <- session.registerCodec(InstantCodec.instance)
} yield ()
}
}
| notxcain/aecor | modules/schedule/src/main/scala/aecor/schedule/CassandraScheduleEntryRepository.scala | Scala | mit | 8,187 |
package picasso.math
import picasso.graph.Trace
import scala.collection.GenSeq
/** abstract view of a WSTS.
* The domain is embedded into the type, and the ordering implicit.
* The user should only have to specify the transitions.
* TODO put S and T as type parameters ? then WSTS can extends Traceable. seems a bad idea: makes a lot of type become explicit
*/
abstract class WSTS extends TransitionSystem {
implicit val ordering: WellPartialOrdering[S]
def isTraceValid(t: Trace[S,T]): Boolean = {
t.triples forall ( t => post(t._1, t._2) exists (ordering.equiv(_, t._3)))
}
}
//put different trait for each kind of analysis:
trait PredBasis {
self: WSTS =>
//for instance the pre of a reset net is easy may returns an infinite set.
//but computing the minimal element is easy.
//therefore it is better to have directly the pred-basis function
def predBasis(s: UpwardClosedSet[S], t: T): UpwardClosedSet[S]
def predBasis(s: UpwardClosedSet[S]): UpwardClosedSet[S] =
(UpwardClosedSet.empty[S] /: transitions)(_ ++ predBasis(s, _))
}
| dzufferey/picasso | core/src/main/scala/picasso/math/WSTS.scala | Scala | bsd-2-clause | 1,077 |
package repository
import akka.stream.Materializer
import com.google.inject.{AbstractModule, Provides}
import com.mohiva.play.silhouette.api.util.PasswordInfo
import com.mohiva.play.silhouette.impl.providers.OAuth2Info
import com.mohiva.play.silhouette.persistence.daos.DelegableAuthInfoDAO
import play.api.Configuration
import repository.mongodb.party._
import repository.mongodb.silhouette.{
MongoDBOAuth2Repository,
MongoDBPasswordAuthRepository
}
class MongoDBModule extends AbstractModule {
override def configure(): Unit = {}
@Provides
def userRepositoryProvider(configuration: Configuration): UserRepository = {
new MongoDBUserRepository(configuration)
}
@Provides
def avatarRepositoryProvider(
configuration: Configuration,
materializer: Materializer
): AvatarRepository = {
new MongoDBAvatarRepository()(configuration, materializer)
}
@Provides
def oauth2RepositoryProvider(
configuration: Configuration
): DelegableAuthInfoDAO[OAuth2Info] = {
new MongoDBOAuth2Repository(configuration)
}
@Provides
def passAuthRepositoryProvider(
configuration: Configuration
): DelegableAuthInfoDAO[PasswordInfo] = {
new MongoDBPasswordAuthRepository(configuration)
}
}
| kpmeen/symbiotic | examples/symbiotic-server/app/repository/MongoDBModule.scala | Scala | apache-2.0 | 1,248 |
package models.other
import models.db.DeckShip
import models.join.ShipWithName
import scalikejdbc.{AutoSession, DBSession}
import scala.concurrent.duration._
/**
*
* @author ponkotuy
* Date: 15/03/04.
*/
case class ShipWithCondition(id: Int, name: String, stype: String, cond: Int, rest: Int, deckId: Option[Int])
object ShipWithCondition {
def fromShip(ship: ShipWithName)(implicit session: DBSession = AutoSession): ShipWithCondition = {
val now = System.currentTimeMillis()
val cond = ship.cond + ((now - ship.created) / 3.minutes.toMillis * 3).toInt
val deckId = DeckShip.findByShipId(ship.memberId, ship.id).map(_.deckId)
ShipWithCondition(ship.id, ship.name, ship.stName, cond, 49 - cond, deckId)
}
}
| ttdoda/MyFleetGirls | server/app/models/other/ShipWithCondition.scala | Scala | mit | 734 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.partials
import org.mockito.{ArgumentMatchersSugar, MockitoSugar}
import org.scalatest.BeforeAndAfterEach
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.test.{FakeHeaders, FakeRequest, WithApplication}
import play.api.test.CSRFTokenHelper._
import play.filters.csrf.CSRF
import play.twirl.api.Html
import uk.gov.hmrc.http.{CoreGet, HeaderCarrier, HttpReads}
import scala.concurrent.{ExecutionContext, Future}
class FormPartialSpec
extends AnyWordSpecLike
with Matchers
with MockitoSugar
with ArgumentMatchersSugar
with BeforeAndAfterEach
with ScalaFutures {
import scala.concurrent.ExecutionContext.Implicits.global
val fakeApplication =
new GuiceApplicationBuilder()
.configure("csrf.sign.tokens" -> false)
.build()
val mockHttpGet = mock[CoreGet]
val partialProvider = new FormPartialRetriever {
override val httpGet: CoreGet =
mockHttpGet
override val headerCarrierForPartialsConverter: HeaderCarrierForPartialsConverter =
fakeApplication.injector.instanceOf[HeaderCarrierForPartialsConverter]
}
override protected def beforeEach() = {
super.beforeEach()
reset(mockHttpGet)
}
"get" should {
"retrieve HTML from the given URL" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
when(mockHttpGet.GET[HtmlPartial](any[String], any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext]))
.thenReturn(
Future.successful(HtmlPartial.Success(title = None, content = Html("some content A"))),
Future.successful(HtmlPartial.Success(title = None, content = Html("some content B")))
)
partialProvider.getPartial("foo").futureValue shouldBe HtmlPartial.Success(title = None, content = Html("some content A"))
partialProvider.getPartial("foo").futureValue shouldBe HtmlPartial.Success(title = None, content = Html("some content B"))
verify(mockHttpGet, times(2))
.GET(eqTo(s"foo?csrfToken=${csrfValue}"), any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext])
}
"retrieve HTML from the given URL, which includes query string" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
when(mockHttpGet.GET[HtmlPartial](any[String], any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext]))
.thenReturn(Future.successful(HtmlPartial.Success(title = None, content = Html("some content C"))))
partialProvider.getPartial("foo?attrA=valA&attrB=valB").futureValue shouldBe HtmlPartial.Success(title = None, content = Html("some content C"))
verify(mockHttpGet)
.GET(eqTo(s"foo?attrA=valA&attrB=valB&csrfToken=${csrfValue}"), any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext])
}
"return HtmlPartial.Failure when there is an exception retrieving the partial from the URL" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
when(mockHttpGet.GET[HtmlPartial](any[String], any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext]))
.thenReturn(Future.successful(HtmlPartial.Failure()))
partialProvider.getPartial("foo").futureValue should be (HtmlPartial.Failure())
verify(mockHttpGet)
.GET(eqTo(s"foo?csrfToken=${csrfValue}"), any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext])
}
"return provided Html when there is an exception retrieving the partial from the URL" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
when(mockHttpGet.GET[HtmlPartial](any[String], any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext]))
.thenReturn(Future.successful(HtmlPartial.Failure()))
partialProvider.getPartialContentAsync(url = "foo", errorMessage = Html("something went wrong")).futureValue.body should be("something went wrong")
verify(mockHttpGet)
.GET(eqTo(s"foo?csrfToken=${csrfValue}"), any, any)(any[HttpReads[HtmlPartial]], any[HeaderCarrier], any[ExecutionContext])
}
}
"processTemplate" should {
"return the original template if there is no csrf token in it" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val template =
"""
|<div>hello {{param}}</div>
""".stripMargin
val expectedTemplate =
"""
|<div>hello world</div>
""".stripMargin
partialProvider.processTemplate(Html(template), Map("param" -> "world")).body shouldBe expectedTemplate
}
"use empty string for csrf token if there is no csrf token in the request" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest()
val template =
"""
|<div>hello {{param}} {{csrfToken}}</div>
""".stripMargin
val expectedTemplate =
"""
|<div>hello world </div>
""".stripMargin
partialProvider.processTemplate(Html(template), Map("param" -> "world")).body shouldBe expectedTemplate
}
"return the template with the CSRF token placeholder replaced with the actual value" in new WithApplication(fakeApplication) {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
val template =
"""
|<div>hello {{param}} {{csrfToken}}</div>
""".stripMargin
val expectedTemplate =
s"""
|<div>hello world ${csrfValue}</div>
""".stripMargin
partialProvider.processTemplate(Html(template), Map("param" -> "world")).body shouldBe expectedTemplate
}
}
"urlWithCsrfToken" should {
implicit val request = FakeRequest("GET", "/getform", FakeHeaders(), "").withCSRFToken
val csrfValue = CSRF.getToken(request).get.value
"add a query string" in new WithApplication(fakeApplication) {
val url = partialProvider.urlWithCsrfToken("/contact/problem_reports")
url shouldBe s"/contact/problem_reports?csrfToken=${csrfValue}"
}
"append to the existing query string with 1 value" in new WithApplication(fakeApplication) {
val url = partialProvider.urlWithCsrfToken("/contact/problem_reports?service=yta")
url shouldBe s"/contact/problem_reports?service=yta&csrfToken=${csrfValue}"
}
"append to the existing query string with 2 values" in new WithApplication(fakeApplication) {
val url = partialProvider.urlWithCsrfToken("/contact/problem_reports?service=yta&secure=true")
url shouldBe s"/contact/problem_reports?service=yta&secure=true&csrfToken=${csrfValue}"
}
}
}
| hmrc/play-partials | src/test/scala/uk/gov/hmrc/play/partials/FormPartialSpec.scala | Scala | apache-2.0 | 8,042 |
package io.atal.butterfly.action
import io.atal.butterfly.{Editor, Clipboard}
/** Implement the action MoveCursorsToBottom
* Move the cursors to the bottom
*
* @constructor Create the action
*/
class MoveCursorsToBottom extends Action {
/** Execute the action
*
* @param editor The editor onto the action is executed
* @param clipboard The clipboard onto the action is executed
*/
def execute(editor: Editor, clipboard: Clipboard): Unit = editor.moveCursorsToBottom
}
| Matthieu-Riou/Butterfly | src/main/scala/io/atal/butterfly/action/MoveCursorsToBottom.scala | Scala | mit | 499 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.multicolumn
import io.deepsense.deeplang.ExecutionContext
import io.deepsense.deeplang.doperables.dataframe.{DataFrame, DataFrameColumnsGetter}
trait SingleColumnTransformerUtils {
def transformSingleColumnInPlace(
inputColumn: String,
dataFrame: DataFrame,
executionContext: ExecutionContext,
transform: (String) => DataFrame
): DataFrame = {
val temporaryColumnName =
DataFrameColumnsGetter.uniqueSuffixedColumnName(inputColumn)
val temporaryDataFrame = transform(temporaryColumnName)
val allColumnNames = temporaryDataFrame.sparkDataFrame.schema.map(_.name)
val filteredColumns = allColumnNames.collect {
case columnName if columnName == inputColumn =>
temporaryDataFrame.sparkDataFrame(temporaryColumnName).as(inputColumn)
case columnName if columnName != temporaryColumnName =>
temporaryDataFrame.sparkDataFrame(columnName)
}
val filteredDataFrame = temporaryDataFrame.sparkDataFrame.select(filteredColumns: _*)
DataFrame.fromSparkDataFrame(filteredDataFrame)
}
}
object SingleColumnTransformerUtils extends SingleColumnTransformerUtils
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperables/multicolumn/SingleColumnTransformerUtils.scala | Scala | apache-2.0 | 1,771 |
package tetris
import java.io.{DataOutputStream, File, FileInputStream, FileOutputStream}
import java.nio.channels.FileChannel.MapMode._
import org.apache.commons.io.IOUtils
import org.apache.spark.{SparkConf, SparkContext}
import scala.collection.JavaConversions._
import scala.collection.mutable.HashMap
/**
* Created by papacharlie on 10/31/15.
*/
object Utils {
implicit val workers : Option[Int] = None
def executeInSpark[T](fun: SparkContext => T)(implicit workers: Option[Int] = None): T = {
val conf = workers match {
case Some(i) => new SparkConf().setMaster(s"local[$i]").setAppName("tetris")
case _ => new SparkConf().setMaster("local[*]").setAppName("tetris")
}
val sc = new SparkContext(conf)
val res = fun(sc)
sc.stop()
res
}
def clearScreen() = print("\\u001b[H\\u001b[2J")
def waitToPrint() = Thread.sleep(100)
def printStacks(stacks: Iterable[Stack]) = {
// Assumes foreach runs in order (not actually guaranteed)
stacks.foreach { stack =>
clearScreen()
println(stack)
waitToPrint()
}
}
implicit class Pipe[T](val t: T) extends AnyVal {
def |->(fun: T => Unit): T = {
fun(t)
t
}
def |>[U](fun: T => U): U = fun(t)
}
def mkdirp(dirname: String) = {
if (!new File(dirname).isDirectory) {
new File(dirname).mkdir()
}
}
val arrays = "ranks"
val maps = "maps"
val rankArrayFilename = "ranks/rank_array.arr"
val rankMapFilename = "maps/rank_map.map"
def biggestPart(filename: String): Option[String] = {
val f = filename.split(File.separatorChar)
f.take(f.length - 1).mkString(File.separator) match {
case "" => new File(".").list.filter(_.contains(f.last)).sorted.lastOption
case dir => new File(dir).list.filter(_.contains(f.last)).sorted.lastOption.map(dir + File.separator + _)
}
}
def saveArrayInt(filename: String, arr: Array[Int], iteration: Option[Int] = None): Unit = {
def save(filename: String) = {
val file = new DataOutputStream(new FileOutputStream(filename))
for (n <- arr.indices) {
file.writeFloat(arr(n))
if (n % 128 == 0) { // per block size
file.flush()
}
}
file.flush()
file.close()
}
iteration match {
case Some(i) => save(s"$filename.$i")
case _ => save(filename)
}
}
def saveArrayDouble(filename: String, arr: Array[Float], iteration: Option[Int] = None): Unit = {
def save(filename: String) = {
val file = new DataOutputStream(new FileOutputStream(filename))
for (n <- arr.indices) {
file.writeFloat(arr(n))
if (n % 128 == 0) { // per block size
file.flush()
}
}
file.flush()
file.close()
}
iteration match {
case Some(i) => save(s"$filename.$i")
case _ => save(filename)
}
}
def loadArrayInt(filename: String, iteration: Option[Int] = None, size: Int = ContourRank.contours): Option[Array[Int]] = {
def load(filename: String): Option[Array[Int]] = {
if (new File(filename).exists()) {
val arr = Array.fill[Int](size)(0)
val file = new File(filename)
val fileSize = file.length
val stream = new FileInputStream(file)
val buffer = stream.getChannel.map(READ_ONLY, 0, fileSize)
for (n <- arr.indices) {
arr(n) = buffer.getInt()
}
Some(arr)
} else {
None
}
}
(iteration, biggestPart(filename)) match {
case (Some(i), _) => load(s"$filename.$i")
case (_, Some(s)) => load(s)
case _ => load(filename)
}
}
def loadArrayDouble(filename: String, iteration: Option[Int] = None, size: Int = ContourRank.contours): Option[Array[Float]] = {
def load(filename: String): Option[Array[Float]] = {
if (new File(filename).exists()) {
val arr = Array.fill[Float](size)(0.0.toFloat)
val file = new File(filename)
val fileSize = file.length
val stream = new FileInputStream(file)
val buffer = stream.getChannel.map(READ_ONLY, 0, fileSize)
for (n <- arr.indices) {
arr(n) = buffer.getFloat()
}
Some(arr)
} else {
None
}
}
(iteration, biggestPart(filename)) match {
case (Some(i), _) => load(s"$filename.$i")
case (_, Some(s)) => load(s)
case _ => load(filename)
}
}
def iterationExists(filename: String, iteration: Int) = new File(s"$filename.$iteration").exists()
def partExists(filename: String, part: Int) = new File(s"$filename.$part").exists()
def savePartedHashMapInt(filename: String, map: HashMap[Int, Seq[Int]], part: Int): Unit = {
val file = new FileOutputStream(s"$filename.$part")
map.foreach { case (c, seq) =>
IOUtils.write(s"$c,${seq.mkString(",")}\\n", file)
}
file.close()
}
def savePartedHashMapIntByte(filename: String, map: HashMap[(Int, Byte), Seq[Int]], part: Int): Unit = {
val file = new FileOutputStream(s"$filename.$part")
map.foreach { case ((c, b), seq) =>
IOUtils.write(s"$c,$b,${seq.mkString(",")}\\n", file)
}
file.close()
}
def loadHashMapInt(filename: String, part: Option[Int]): Option[HashMap[Int, Seq[Int]]] = {
def load(filename: String): Option[HashMap[Int, Seq[Int]]] = {
val map: HashMap[Int, Seq[Int]] = new HashMap()
if (new File(filename).exists()) {
val lines = IOUtils.readLines(new FileInputStream(filename))
lines.map { line =>
val nums = line.split(",")
map += Integer.parseInt(nums.head) -> nums.tail.map(Integer.parseInt)
}
}
if (map.isEmpty) None else Some(map)
}
part match {
case Some(p) => load(s"$filename.$p")
case _ => load(filename)
}
}
def loadHashMapIntByte(filename: String, part: Option[Int]): Option[HashMap[(Int, Byte), Seq[Int]]] = {
def load(filename: String): Option[HashMap[(Int, Byte), Seq[Int]]] = {
val map: HashMap[(Int, Byte), Seq[Int]] = new HashMap()
if (new File(filename).exists()) {
val lines = IOUtils.readLines(new FileInputStream(filename))
lines.map { line =>
val nums = line.split(",")
map += (Integer.parseInt(nums.head), Integer.parseInt(nums.tail.head).toByte) -> nums.tail.tail.map(Integer.parseInt)
}
}
if (map.isEmpty) None else Some(map)
}
part match {
case Some(p) => load(s"$filename.$p")
case _ => load(filename)
}
}
}
| PapaCharlie/TetrisBot | tetris/src/main/scala/tetris/Utils.scala | Scala | mit | 6,536 |
package troy
package cql.parser.dml
import java.util.UUID
import org.scalatest.{ FlatSpec, Matchers }
import troy.cql.ast.dml.SimpleSelection._
import troy.cql.ast.dml.{ IfExist, IfCondition }
import troy.cql.ast._
import troy.cql.ast.dml._
import troy.cql.ast.dml.WhereClause.Relation.Simple
import troy.cql.parser.ParserTestUtils.parseQuery
class DeleteStatementParserTest extends FlatSpec with Matchers {
"Delete Parser" should "parse simple delete statement" in {
val statement = parseQuery("DELETE FROM NerdMovies WHERE movie = 'Serenity';").asInstanceOf[DeleteStatement]
statement.simpleSelection.isEmpty shouldBe true
statement.from.table shouldBe "NerdMovies"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "movie"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term.asInstanceOf[StringConstant].value shouldBe "Serenity"
}
it should "parse simple delete statement with UUID value" in {
val statement = parseQuery("DELETE FROM Users WHERE userid = 01234567-0123-0123-0123-0123456789ab;")
.asInstanceOf[DeleteStatement]
statement.simpleSelection.isEmpty shouldBe true
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term shouldBe UuidConstant(UUID.fromString("01234567-0123-0123-0123-0123456789ab"))
}
it should "parse delete specific column statement" in {
val statement = parseQuery("DELETE phone FROM Users WHERE userid IN (123, 222);").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
statement.simpleSelection(0).asInstanceOf[ColumnName].columnName shouldBe "phone"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.In
val literal: TupleLiteral = simpleRelation.term.asInstanceOf[TupleLiteral]
literal.values.size shouldBe 2
literal.values(0) shouldBe IntegerConstant(123)
literal.values(1) shouldBe IntegerConstant(222)
}
it should "parse delete specific columns statement " in {
val statement = parseQuery("DELETE phone, name FROM Users WHERE userid = 123;").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 2
statement.simpleSelection(0).asInstanceOf[ColumnName].columnName shouldBe "phone"
statement.simpleSelection(1).asInstanceOf[ColumnName].columnName shouldBe "name"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term.asInstanceOf[IntegerConstant].value shouldBe 123
}
it should "parse delete specific column with field name statement " in {
val statement = parseQuery("DELETE address.postcode FROM Users WHERE userid = 123;").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
val simpleSelection = statement.simpleSelection(0).asInstanceOf[ColumnNameDot]
simpleSelection.columnName shouldBe "address"
simpleSelection.fieldName shouldBe "postcode"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term shouldBe IntegerConstant(123)
}
it should "parse delete specific column with term statement " in {
val statement = parseQuery("DELETE address['postcode'] FROM Users WHERE userid = 123;").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
val simpleSelection = statement.simpleSelection(0).asInstanceOf[ColumnNameOf]
simpleSelection.columnName shouldBe "address"
simpleSelection.term.asInstanceOf[StringConstant].value shouldBe "postcode"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term.asInstanceOf[IntegerConstant].value shouldBe 123
}
it should "parse delete specific column if exists statement " in {
val statement = parseQuery("DELETE phone FROM Users WHERE userid = 123 IF EXISTS;").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
statement.simpleSelection(0).asInstanceOf[ColumnName].columnName shouldBe "phone"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
statement.ifCondition.isDefined shouldBe true
statement.ifCondition.get shouldBe IfExist
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term.asInstanceOf[IntegerConstant].value shouldBe 123
}
it should "parse delete specific column with simple if condition " in {
val statement = parseQuery("DELETE phone FROM Users WHERE userid = 123 IF postcode = 'M1' ;").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
statement.simpleSelection(0).asInstanceOf[ColumnName].columnName shouldBe "phone"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
statement.ifCondition.isDefined shouldBe true
val conditions = statement.ifCondition.get.asInstanceOf[IfCondition].conditions
conditions.size shouldBe 1
val condition = conditions(0).asInstanceOf[Condition]
condition.simpleSelection.asInstanceOf[ColumnName].columnName shouldBe "postcode"
condition.operator shouldBe Operator.Equals
condition.term.asInstanceOf[StringConstant].value shouldBe "M1"
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term.asInstanceOf[IntegerConstant].value shouldBe 123
}
it should "parse IN tuple of UUID" ignore {
val statement = parseQuery("DELETE phone FROM Users WHERE userid IN (C73DE1D3, B70DE1D0);").asInstanceOf[DeleteStatement]
statement.simpleSelection.nonEmpty shouldBe true
statement.simpleSelection.size shouldBe 1
statement.simpleSelection(0).asInstanceOf[ColumnName].columnName shouldBe "phone"
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "userid"
simpleRelation.operator shouldBe Operator.In
val literal: TupleLiteral = simpleRelation.term.asInstanceOf[TupleLiteral]
literal.values.size shouldBe 2
literal.values(0) shouldBe StringConstant("C73DE1D3")
literal.values(1) shouldBe StringConstant("B70DE1D0")
}
it should "parse simple delete statement with NULL term" in {
val statement = parseQuery("DELETE FROM Users WHERE phone = null;")
.asInstanceOf[DeleteStatement]
statement.simpleSelection.isEmpty shouldBe true
statement.from.table shouldBe "Users"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "phone"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term shouldBe NullConstant
}
it should "parse simple delete statement with Boolean term" in {
val statement = parseQuery("DELETE FROM posts WHERE published = false;")
.asInstanceOf[DeleteStatement]
statement.simpleSelection.isEmpty shouldBe true
statement.from.table shouldBe "posts"
statement.using.isEmpty shouldBe true
val relations = statement.where.relations
relations.size shouldBe 1
val simpleRelation = relations(0).asInstanceOf[Simple]
simpleRelation.columnName shouldBe "published"
simpleRelation.operator shouldBe Operator.Equals
simpleRelation.term shouldBe BooleanConstant(false)
}
} | schemasafe/troy | cql-parser/src/test/scala/troy/cql/parser/dml/DeleteStatementParserTest.scala | Scala | apache-2.0 | 9,455 |
/***********************************************************************
* Copyright (c) 2015-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.geomesa.nifi.processors.hbase
import org.apache.nifi.annotation.documentation.{DeprecationNotice, Tags}
import org.geomesa.nifi.datastore.processor.{ConverterIngestProcessor, PutGeoMesa}
@Tags(Array("geomesa", "geo", "ingest", "convert", "hbase", "geotools"))
@DeprecationNotice(
alternatives = Array(classOf[HBaseDataStoreService], classOf[PutGeoMesa]),
reason = "Replaced with controller service for data store connections")
class PutGeoMesaHBase extends HBaseProcessor with ConverterIngestProcessor
| geomesa/geomesa-nifi | geomesa-hbase-bundle/geomesa-hbase-processors/src/main/scala/org/geomesa/nifi/processors/hbase/PutGeoMesaHBase.scala | Scala | apache-2.0 | 985 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.uuid
import java.util.{Date, UUID}
import com.google.common.primitives.{Bytes, Longs, Shorts}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.jts.geom.{Geometry, Point}
import org.locationtech.geomesa.curve.TimePeriod.TimePeriod
import org.locationtech.geomesa.curve.{BinnedTime, Z3SFC}
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.util.hashing.MurmurHash3
/**
* Creates feature id based on the z3 index.
*/
class Z3FeatureIdGenerator extends FeatureIdGenerator {
override def createId(sft: SimpleFeatureType, sf: SimpleFeature): String = {
if (sft.getGeometryDescriptor == null) {
// no geometry in this feature type - just use a random UUID
UUID.randomUUID().toString
} else {
Z3UuidGenerator.createUuid(sft, sf).toString
}
}
}
/**
* UUID generator that creates UUIDs that sort by z3 index.
* UUIDs will be prefixed with a shard number, which will ensure some distribution of values as well
* as allow pre-splitting of tables based on hex values.
*
* Uses variant 2 (IETF) and version 4 (for random UUIDs, although it's not totally random).
* See https://en.wikipedia.org/wiki/Universally_unique_identifier#Variants_and_versions
*
* Format is:
*
* 4 bits for a shard - enough for a single hex digit
* 44 bits of the z3 index value
* 4 bits for the UUID version
* 12 more bits of the z3 index value
* 2 bits for the UUID variant
* 62 bits of randomness
*/
object Z3UuidGenerator extends RandomLsbUuidGenerator with LazyLogging {
private val NullGeom = "Cannot meaningfully index a feature with a NULL geometry"
/**
* Creates a UUID where the first 8 bytes are based on the z3 index of the feature and
* the second 8 bytes are based on a random number.
*
* This provides uniqueness along with locality.
*
* @param sft simple feature type
* @param sf feature
* @return
*/
def createUuid(sft: SimpleFeatureType, sf: SimpleFeature): UUID = {
val time = sft.getDtgIndex.flatMap(i => Option(sf.getAttribute(i)).map(_.asInstanceOf[Date].getTime))
.getOrElse(System.currentTimeMillis())
val pt = sf.getAttribute(sft.getGeomIndex)
if (pt == null) {
throw new IllegalArgumentException(NullGeom)
}
if (sft.isPoints) {
createUuid(pt.asInstanceOf[Point], time, sft.getZ3Interval)
} else {
import org.locationtech.geomesa.utils.geotools.Conversions.RichGeometry
createUuid(pt.asInstanceOf[Geometry].safeCentroid(), time, sft.getZ3Interval)
}
}
/**
* Create a UUID based on the raw values that make up the z3
*
* @param geom geometry
* @param time millis since java epoch
* @param period z3 time period
* @return
*/
def createUuid(geom: Geometry, time: Long, period: TimePeriod): UUID = {
import org.locationtech.geomesa.utils.geotools.Conversions.RichGeometry
if (geom == null) {
throw new IllegalArgumentException(NullGeom)
}
createUuid(geom.safeCentroid(), time, period)
}
/**
* Create a UUID based on the raw values that make up the z3, optimized for point geometries
*
* @param pt point
* @param time millis since java epoch
* @param period z3 time period
* @return
*/
def createUuid(pt: Point, time: Long, period: TimePeriod): UUID = {
if (pt == null) {
throw new IllegalArgumentException(NullGeom)
}
// create the random part
// this uses the same temp array we use later, so be careful with the order this gets called
val leastSigBits = createRandomLsb()
val z3 = {
val BinnedTime(b, t) = BinnedTime.timeToBinnedTime(period)(time)
val z = Z3SFC(period).index(pt.getX, pt.getY, t).z
Bytes.concat(Shorts.toByteArray(b), Longs.toByteArray(z))
}
// shard is first 4 bits of our uuid (e.g. 1 hex char) - this allows nice pre-splitting
val shard = math.abs(MurmurHash3.bytesHash(z3) % 16).toByte
val msb = getTempByteArray
// set the shard bits, then the z3 bits
msb(0) = lohi(shard, z3(0))
msb(1) = lohi(z3(0), z3(1))
msb(2) = lohi(z3(1), z3(2))
msb(3) = lohi(z3(2), z3(3))
msb(4) = lohi(z3(3), z3(4))
msb(5) = lohi(z3(4), z3(5))
msb(6) = lohi(0, (z3(5) << 4).asInstanceOf[Byte]) // leave 4 bits for the version
msb(7) = z3(6)
// we drop the last 4 bytes of the z3 to ensure some randomness
// that leaves us 62 bits of randomness, and still gives us ~10 bits per dimension for locality
// set the UUID version - we skipped those bits when writing
setVersion(msb)
// create the long
val mostSigBits = Longs.fromByteArray(msb)
new UUID(mostSigBits, leastSigBits)
}
/**
* Gets the z3 time period bin based on a z3 uuid
*
* @param uuid uuid, as bytes
* @return
*/
def timeBin(uuid: Array[Byte], offset: Int = 0): Short = timeBin(uuid(offset), uuid(offset + 1), uuid(offset + 2))
/**
* Gets the z3 time period bin based on a z3 uuid
*
* @param b0 first byte of the uuid
* @param b1 second byte of the uuid
* @param b2 third byte of the uuid
* @return
*/
def timeBin(b0: Byte, b1: Byte, b2: Byte): Short = {
// undo the lo-hi byte merging to get the two bytes for the time period
Shorts.fromBytes(lohi(b0, b1), lohi(b1, b2))
}
// takes 4 low bits from b1 as the new hi bits, and 4 high bits of b2 as the new low bits, of a new byte
private def lohi(b1: Byte, b2: Byte): Byte =
((java.lang.Byte.toUnsignedInt(b1) << 4) | (java.lang.Byte.toUnsignedInt(b2) >>> 4)).asInstanceOf[Byte]
}
| elahrvivaz/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/uuid/Z3FeatureIdGenerator.scala | Scala | apache-2.0 | 6,201 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.testdata
import javax.inject.{ Inject, Singleton }
import model.persisted.{ QuestionnaireAnswer, QuestionnaireQuestion }
import model.testdata.candidate.CreateCandidateData
import play.api.mvc.RequestHeader
import repositories._
import repositories.application.GeneralApplicationRepository
import services.testdata.candidate.{ ConstructiveGenerator, InProgressAssistanceDetailsStatusGenerator }
import services.testdata.faker.DataFaker
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.ExecutionContext.Implicits.global
//object InProgressQuestionnaireStatusGenerator extends InProgressQuestionnaireStatusGenerator {
// override val previousStatusGenerator = InProgressAssistanceDetailsStatusGenerator
// override val appRepository = applicationRepository
// override val qRepository = questionnaireRepository
//}
@Singleton
class InProgressQuestionnaireStatusGenerator @Inject() (val previousStatusGenerator: InProgressAssistanceDetailsStatusGenerator,
appRepository: GeneralApplicationRepository,
qRepository: QuestionnaireRepository,
dataFaker: DataFaker
) extends ConstructiveGenerator {
// val appRepository: GeneralApplicationRepository
// val qRepository: QuestionnaireRepository
private val didYouLiveInUkBetween14and18Answer = dataFaker.yesNo
private def getWhatWasYourHomePostCodeWhenYouWere14 = {
if (didYouLiveInUkBetween14and18Answer == "Yes") {
Some(QuestionnaireQuestion("What was your home postcode when you were 14?",
QuestionnaireAnswer(Some(dataFaker.homePostcode), None, None)))
} else {
None
}
}
private def getSchoolName14to16Answer = {
if (didYouLiveInUkBetween14and18Answer == "Yes") {
Some(QuestionnaireQuestion("Aged 14 to 16 what was the name of your school?",
QuestionnaireAnswer(Some(dataFaker.age14to16School), None, None)))
} else {
None
}
}
private def getSchoolName16to18Answer = {
if (didYouLiveInUkBetween14and18Answer == "Yes") {
Some(QuestionnaireQuestion("Aged 16 to 18 what was the name of your school or college?",
QuestionnaireAnswer(Some(dataFaker.age16to18School), None, None)))
} else {
None
}
}
private def getFreeSchoolMealsAnswer = {
if (didYouLiveInUkBetween14and18Answer == "Yes") {
Some(QuestionnaireQuestion("Were you at any time eligible for free school meals?",
QuestionnaireAnswer(Some(dataFaker.yesNoPreferNotToSay), None, None)))
} else {
None
}
}
private def getHaveDegreeAnswer(generatorConfig: CreateCandidateData.CreateCandidateData) = {
if (generatorConfig.isCivilServant) {
Some(QuestionnaireQuestion("Do you have a degree?",
QuestionnaireAnswer(Some(if (generatorConfig.hasDegree) { "Yes" } else { "No" }), None, None))
)
} else { None }
}
private def getUniversityAnswer(generatorConfig: CreateCandidateData.CreateCandidateData) = {
if (generatorConfig.hasDegree) {
Some(QuestionnaireQuestion("What is the name of the university you received your degree from?",
QuestionnaireAnswer(Some(dataFaker.university._2), None, None)))
} else {
None
}
}
private def getUniversityDegreeCategoryAnswer(generatorConfig: CreateCandidateData.CreateCandidateData) = {
if (generatorConfig.hasDegree) {
Some(QuestionnaireQuestion("Which category best describes your degree?",
QuestionnaireAnswer(Some(dataFaker.degreeCategory._2), None, None)))
} else {
None
}
}
private def getParentsOccupation = dataFaker.parentsOccupation
private def getParentsOccupationDetail(parentsOccupation: String) = {
if (parentsOccupation == "Employed") {
Some(QuestionnaireQuestion("When you were 14, what kind of work did your highest-earning parent or guardian do?",
QuestionnaireAnswer(Some(dataFaker.parentsOccupationDetails), None, None)))
} else {
Some(QuestionnaireQuestion("When you were 14, what kind of work did your highest-earning parent or guardian do?",
QuestionnaireAnswer(Some(parentsOccupation), None, None)))
}
}
private def getEmployeedOrSelfEmployeed(parentsOccupation: String) = {
if (parentsOccupation == "Employed") {
Some(QuestionnaireQuestion("Did they work as an employee or were they self-employed?",
QuestionnaireAnswer(Some(dataFaker.employeeOrSelf), None, None)))
} else {
None
}
}
private def getSizeParentsEmployeer(parentsOccupation: String) = {
if (parentsOccupation == "Employed") {
Some(QuestionnaireQuestion("Which size would best describe their place of work?",
QuestionnaireAnswer(Some(dataFaker.sizeParentsEmployeer), None, None)))
} else {
None
}
}
private def getSuperviseEmployees(parentsOccupation: String) = {
if (parentsOccupation == "Employed") {
Some(QuestionnaireQuestion("Did they supervise employees?",
QuestionnaireAnswer(Some(dataFaker.yesNoPreferNotToSay), None, None)))
} else {
None
}
}
private def getAllQuestionnaireQuestions(parentsOccupation: String, generatorConfig: CreateCandidateData.CreateCandidateData) = List(
Some(QuestionnaireQuestion("I understand this won't affect my application", QuestionnaireAnswer(Some(dataFaker.yesNo), None, None))),
Some(QuestionnaireQuestion("What is your gender identity?", QuestionnaireAnswer(Some(dataFaker.gender), None, None))),
Some(QuestionnaireQuestion("What is your sexual orientation?", QuestionnaireAnswer(Some(dataFaker.sexualOrientation), None, None))),
Some(QuestionnaireQuestion("What is your ethnic group?", QuestionnaireAnswer(Some(dataFaker.ethnicGroup), None, None))),
Some(QuestionnaireQuestion("Did you live in the UK between the ages of 14 and 18?", QuestionnaireAnswer(
Some(didYouLiveInUkBetween14and18Answer), None, None))
),
getWhatWasYourHomePostCodeWhenYouWere14,
getSchoolName14to16Answer,
getSchoolName16to18Answer,
getFreeSchoolMealsAnswer,
getHaveDegreeAnswer(generatorConfig),
getUniversityAnswer(generatorConfig),
getUniversityDegreeCategoryAnswer(generatorConfig),
Some(QuestionnaireQuestion("Do you consider yourself to come from a lower socio-economic background?",
QuestionnaireAnswer(Some(dataFaker.yesNoPreferNotToSay), None, None))
),
Some(QuestionnaireQuestion("Do you have a parent or guardian that completed a university degree course, or qualifications " +
"below degree level, by the time you were 18?",
QuestionnaireAnswer(Some(dataFaker.parentsDegree), None, None))
),
getParentsOccupationDetail(parentsOccupation),
getEmployeedOrSelfEmployeed(parentsOccupation),
getSizeParentsEmployeer(parentsOccupation),
getSuperviseEmployees(parentsOccupation)
).filter(_.isDefined).map { someItem => someItem.get }
def generate(generationId: Int, generatorConfig: CreateCandidateData.CreateCandidateData)(implicit hc: HeaderCarrier, rh: RequestHeader) = {
for {
candidateInPreviousStatus <- previousStatusGenerator.generate(generationId, generatorConfig)
_ <- qRepository.addQuestions(
candidateInPreviousStatus.applicationId.get,
getAllQuestionnaireQuestions(getParentsOccupation, generatorConfig)
)
_ <- appRepository.updateQuestionnaireStatus(candidateInPreviousStatus.applicationId.get, "start_questionnaire")
_ <- appRepository.updateQuestionnaireStatus(candidateInPreviousStatus.applicationId.get, "education_questionnaire")
_ <- appRepository.updateQuestionnaireStatus(candidateInPreviousStatus.applicationId.get, "diversity_questionnaire")
_ <- appRepository.updateQuestionnaireStatus(candidateInPreviousStatus.applicationId.get, "occupation_questionnaire")
} yield {
candidateInPreviousStatus
}
}
}
| hmrc/fset-faststream | app/services/testdata/InProgressQuestionnaireStatusGenerator.scala | Scala | apache-2.0 | 8,646 |
/*
* Copyright 2016 Coral realtime streaming analytics (http://coral-streaming.github.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.coral.api
import java.util.UUID
import io.coral.utils.Utils
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
/**
* Represents a runtime. This is a read-only, cached version of the
* "truth" present in the runtimes table in Cassandra.
* @param name The unique name of the runtime
* @param status The status of the runtime.
* 0 = created
* 1 = running
* 2 = stopped
* 3 = invalid
* 4 = deleted
* @param projectId The projectId this runtime belongs to
* @param startTime The time this runtime was started.
*/
case class Runtime(// The unique id of the runtime
id: UUID,
// The owner of the runtime
owner: UUID,
// The name of the runtime (without owner name)
name: String,
// The unique name of the owner ("owner-runtimeName")
uniqueName: String,
// The full path of the runtime admin actor that manages this runtime
adminPath: String,
// The current status of the runtime
status: Int,
// The corresponding projectId of the runtime
projectId: Option[UUID],
// The JSON definition of the runtime
jsonDef: JObject,
// Runtime statistics for the runtime
runtimeStats: Option[RuntimeStatistics],
// The timestamp the runtime started on
startTime: Long) {
def toJson(): JObject = {
("id" -> JString(id.toString)) ~
("owner" -> JString(owner.toString)) ~
("name" -> JString(name)) ~
("uniqueName" -> JString(uniqueName)) ~
("status" -> friendly(status)) ~
("project" -> JString(projectId.toString)) ~
("json" -> jsonDef) ~
("starttime" -> Utils.friendlyTime(startTime))
}
def friendly(status: Int): String = {
status match {
case 0 => "created"
case 1 => "running"
case 2 => "stopped"
case 3 => "invalid"
case 4 => "deleted"
case _ => "unrecognized status"
}
}
} | coral-streaming/coral | src/main/scala/io/coral/api/Runtime.scala | Scala | apache-2.0 | 2,615 |
package glaux.interfaces.api
package domain
import glaux.neuralnetwork.trainers.SGD.SGDSettings
import glaux.reinforcementlearning.DeepMindQLearner.ConvolutionBased
case class SessionId(agentName: AgentName, profileId: ProfileId)
sealed trait AgentSettings {
def name: AgentName
}
case class AdvancedAgentSettings(
name: AgentName,
numOfActions: Int,
learnerSettings: ConvolutionBased.Settings,
trainerSettings: SGDSettings
) extends AgentSettings
| A-Noctua/glaux | interface-api/src/main/scala/glaux/interfaces/api/domain/models.scala | Scala | mit | 476 |
package slaq.ql
import slaq.Fail
import slaq.session.{PositionedResult, PositionedParameters}
import slaq.util.{Node, UnaryNode}
import core.{Profile, ColumnOption, ColumnOptions}
abstract class Table[T](
val schemaName: Option[String],
val tableName: String
) extends ColumnBase[T] {
final type TableType = T
def this(_tableName: String) = this(None, _tableName)
def nodeChildren = Nil
override def isNamedTable = true
override def toString = s"Table $tableName"
final def maybeJoin: Option[Join] = nodeDelegate match {
case j: Join => Some(j)
case _ => None
}
type ProfileType = Profile
val O: ColumnOptions = ColumnOptions
def * : ColumnBase[T]
final def column[C: TypeMapper](name: String, options: ColumnOption[C, ProfileType]*) = {
val node = nodeDelegate match {
case j: Join => j.extractNode(tableName, forTableAlias = false)
case delegate => delegate
}
new NamedColumn[C](node, name, options: _*)
}
def create_* : Iterable[NamedColumn[_]] = {
def createTableError(msg: String) = Fail(
s"Cannot use $msg in ${tableName}.* CREATE TABLE statement"
)
Node(*) match {
case p: Projection[_] =>
0 until p.productArity map (n => Node(p.productElement(n)) match {
case c: NamedColumn[_] => c
case c => createTableError(s"column $c")
})
case n: NamedColumn[_] => Iterable(n)
case n => createTableError(s"$n")
}
}
def foreignKey[P, PU, TT <: Table[_], U](name: String, sourceColumns: P, targetTable: TT)(
targetColumns: TT => P,
onUpdate: ForeignKeyAction = ForeignKeyAction.NoAction,
onDelete: ForeignKeyAction = ForeignKeyAction.NoAction
)(using unpackT: Unpack[TT, U], unpackP: Unpack[P, PU]): ForeignKeyQuery[TT, U] = {
val targetUnpackable = Unpackable(
targetTable.mapOp(Table.Alias.apply), unpackT
)
val fk = new ForeignKey(
name, this, targetUnpackable, targetTable, unpackP,
sourceColumns, targetColumns, onUpdate, onDelete
)
new ForeignKeyQuery(List(fk), targetUnpackable)
}
def primaryKey[TT](name: String, sourceColumns: TT)(using unpack: Unpack[TT, _]): PrimaryKey =
PrimaryKey(name, unpack.linearizer(sourceColumns).getLinearizedNodes)
def tableConstraints: Iterator[Constraint] =
getClass().getMethods.iterator.filter( m =>
m.getParameterTypes.length == 0 &&
classOf[Constraint].isAssignableFrom(m.getReturnType)
).map { m =>
m.setAccessible(true)
m.invoke(this).asInstanceOf[Constraint]
}
final def foreignKeys: Iterable[ForeignKey[_ <: Table[_], _]] =
tableConstraints.collect {
case q: ForeignKeyQuery[_, _] => q.fks
}.toIndexedSeq.flatten
final def primaryKeys: Iterable[PrimaryKey] =
tableConstraints.collect { case k: PrimaryKey => k }.toIndexedSeq
def index[TT](name: String, on: TT, unique: Boolean = false)(using unpack: Unpack[TT, _]) = new Index(
name, this, unpack.linearizer(on).getLinearizedNodes, unique
)
def indexes: Iterable[Index] =
getClass().getMethods.view.filter( m =>
m.getParameterTypes.length == 0 && m.getReturnType == classOf[Index]
).map { m =>
m.setAccessible(true)
m.invoke(this).asInstanceOf[Index]
}
final def getLinearizedNodes = *.getLinearizedNodes
final def getResult(profile: Profile, rs: PositionedResult) =
*.getResult(profile, rs)
final def updateResult(profile: Profile, rs: PositionedResult, value: T) =
*.updateResult(profile, rs, value)
final def setParameter(profile: Profile, ps: PositionedParameters, value: Option[T]) =
*.setParameter(profile, ps, value)
def ddl(using profile: ProfileType): DDL = profile.buildTableDDL(this)
}
object Table {
def unapply[T](t: Table[T]) = Some(t.tableName)
case class Alias(child: Node) extends UnaryNode {
override def toString = s"Table.Alias $child"
override def isNamedTable = true
}
case class Ref(table: Node, tableJoin: Option[Join])
case class Name(alias: String, isFresh: Boolean)
}
| godenji/slaq | src/main/scala/slaq/scalaquery/ql/Table.scala | Scala | bsd-2-clause | 4,093 |
package name.bshelden.arcanefluids.model
/**
* ADT for rotation axes
*
* (c) 2013 Byron Shelden
* See COPYING for details
*/
sealed trait RotAxis
/**
* Rotate the YZ plane CCW around the X axis
*
* 90 Degrees:
* 1 0 0
* 0 0 1
* 0 -1 0
**/
case object RotX extends RotAxis
/**
* Rotate the XZ plane CCW around the Y axis
*
* 90 Degrees:
* 0 0 -1
* 0 1 0
* 1 0 0
**/
case object RotY extends RotAxis
/**
* Rotate the XY plane CCW around the Z axis
*
* 90 Degrees:
* 0 1 0
* -1 0 0
* 0 0 1
**/
case object RotZ extends RotAxis
| bshelden/ArcaneFluids | src/main/scala/name/bshelden/arcanefluids/model/RotAxis.scala | Scala | bsd-2-clause | 578 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.execution.Ack
import monix.execution.Ack.Continue
import monix.execution.exceptions.DummyException
import monix.reactive.{Observable, Observer}
import scala.concurrent.duration.Duration.Zero
import scala.concurrent.{Future, Promise}
import scala.concurrent.duration._
object MapSuite extends BaseOperatorSuite {
def sum(sourceCount: Int): Long = sourceCount.toLong * (sourceCount + 1)
def count(sourceCount: Int) = sourceCount
def createObservable(sourceCount: Int) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o =
if (sourceCount == 1)
Observable.now(1L).map(_ * 2)
else
Observable.range(1, sourceCount.toLong + 1, 1).map(_ * 2)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
}
def observableInError(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val ex = DummyException("dummy")
val o =
if (sourceCount == 1)
createObservableEndingInError(Observable.now(1L), ex)
.map(_ * 2)
else
createObservableEndingInError(Observable.range(1, sourceCount.toLong + 1, 1), ex)
.map(_ * 2)
Sample(o, count(sourceCount), sum(sourceCount), Zero, Zero)
}
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o =
if (sourceCount == 1)
Observable.now(1).map(_ => throw ex)
else
Observable.range(1, sourceCount.toLong + 1, 1).map { x =>
if (x == sourceCount)
throw ex
else
x * 2
}
Sample(o, count(sourceCount - 1), sum(sourceCount - 1), Zero, Zero)
}
}
override def cancelableObservables(): Seq[Sample] = {
val obs = Observable.range(0, 1000).delayOnNext(1.second).map(_ + 1)
Seq(Sample(obs, 0, 0, 0.seconds, 0.seconds))
}
test("should not do back-pressure for onComplete, for 1 element") { implicit s =>
val p = Promise[Continue.type]()
var wasCompleted = false
createObservable(1) match {
case Some(Sample(obs, _, _, waitForFirst, waitForNext)) =>
var onNextReceived = false
obs.unsafeSubscribeFn(new Observer[Long] {
def onNext(elem: Long): Future[Ack] = { onNextReceived = true; p.future }
def onError(ex: Throwable): Unit = throw new IllegalStateException()
def onComplete(): Unit = wasCompleted = true
})
assert(wasCompleted)
s.tick(waitForFirst)
assert(onNextReceived)
p.success(Continue)
s.tick(waitForNext)
case _ =>
fail()
}
}
}
| monifu/monifu | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/MapSuite.scala | Scala | apache-2.0 | 3,510 |
/*
* Copyright (c) 2015 Alexandros Pappas p_alx hotmail com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package gr.gnostix.freeswitch.actors
import java.sql.Timestamp
import akka.actor.ActorRef
import scala.collection.JavaConverters._
import org.freeswitch.esl.client.transport.event.EslEvent
import scala.collection.SortedMap
/**
* Created by rebel on 23/8/15.
*/
object ActorsProtocol {
sealed trait RouterProtocol
sealed trait RouterRequest extends RouterProtocol
sealed trait RouterResponse extends RouterProtocol
case class Event(headers: scala.collection.Map[String, String]) extends RouterRequest
case object InitializeDashboardHeartBeat extends RouterRequest
case object InitializeDashboardBasicStats extends RouterRequest
case object GetConcurrentCalls extends RouterRequest
case object GetCompletedCalls extends RouterRequest
case object GetTotalConcurrentCalls extends RouterRequest
case class ConcurrentCallsNum(calls: Int) extends RouterResponse
case class ConcurrentCallsChannels(calls: List[CallNew]) extends RouterResponse
case object GetTotalFailedCalls extends RouterRequest
case object GetFailedCalls extends RouterRequest
case class GetFailedCallsAnalysis(fromNumberOfDigits: Int, toNumberOfDigits: Int) extends RouterRequest
case class GetFailedCallsByDate(from: Timestamp, to: Timestamp) extends RouterRequest
case class GetCallsResponse(totalCalls: Int, activeCallsUUID: List[String]) extends RouterResponse
case class GetCallInfo(uuid: String) extends RouterRequest
case class GetChannelInfo(callUuid: String, channelUuid: String) extends RouterRequest
case object GetConcurrentCallsChannel extends RouterRequest
case class GetConcurrentCallsChannel(uuid: String) extends RouterRequest
case class GetConcurrentCallsChannelByIpPrefix(ip: Option[String], prefix: Option[String]) extends RouterRequest
case object GetFailedCallsChannel extends RouterRequest
case class GetFailedCallsChannelByTime(time: Timestamp) extends RouterRequest
case class GetFailedCallsChannelByIp(ip: String) extends RouterRequest
case object GetCompletedCallsChannel extends RouterRequest
case object GetLastHeartBeat extends RouterRequest
case object GetAllHeartBeat extends RouterRequest
//case object GetFailedCallsTimeSeries extends RouterRequest
case object GetBasicStatsTimeSeries extends RouterRequest
//case object GetConcurrentCallsTimeSeries extends RouterRequest
//case object GetBasicAcdTimeSeries extends RouterRequest
case object GetCompletedCallMinutes extends RouterRequest
case object GetEslConnections extends RouterRequest
case class EslConnectionData(ip: String, port: Int, password: String) extends RouterRequest
case class DelEslConnection(ip: String) extends RouterRequest
case class ShutdownEslConnection(ip: String) extends RouterRequest
case class CompletedCall(uuid: String, hangupTime: Timestamp, callActor: ActorRef) extends RouterProtocol
case class CallTerminated(callEnd: CallEnd) extends RouterProtocol
case object GetACDAndRTP extends RouterRequest
case object GetBillSecAndRTPByCountry extends RouterRequest
case class GetACDAndRTPByTime(lastCheck: Timestamp) extends RouterRequest
case class AcdData(acd: Double) extends RouterResponse
case class GetNumberDialCode(number: String) extends RouterRequest
case class NumberDialCodeCountry(toNumber: String, prefix: Option[String], country: Option[String]) extends RouterResponse
case class AddDialCodeList(fileName: String, dialCodes: SortedMap[String, String]) extends RouterRequest
case class DelDialCodeList(fileName: String) extends RouterRequest
case class GetDialCodeList(fileName: String) extends RouterRequest
case object GetAllDialCodeList extends RouterRequest
case class AllDialCodes(fileName: String, totalCodes: Int)
case class AddAtmoClientUuid(uuid: String)
case class RemoveAtmoClientUuid(uuid: String)
object Event {
def apply(event: EslEvent): Event = Event(event.getEventHeaders.asScala)
def apply(): Event = Event(scala.collection.Map.empty[String, String])
}
def mkEvent(event: EslEvent): Event = Event(event)
}
object ServletProtocol {
sealed trait ApiProtocol
sealed trait ApiRequest extends ApiProtocol
sealed trait ApiResponse extends ApiProtocol
case class ApiReply(status: Int, message: String) extends ApiResponse
case class ApiReplyError(status: Int, message: String) extends ApiResponse
case class ApiReplyData(status: Int, message: String, payload: Any) extends ApiResponse
} | gnostix/freeswitch-monitoring | src/main/scala/gr/gnostix/freeswitch/actors/ActorsProtocol.scala | Scala | apache-2.0 | 5,138 |
package com.twitter.finagle.context
import com.twitter.finagle.util.ByteArrays
import com.twitter.io.Buf
import com.twitter.util.{Duration, Return, Throw, Time, Try}
/**
* A deadline is the time by which some action (e.g., a request) must
* complete. A deadline has a timestamp in addition to the deadline.
* This timestamp denotes the time at which the deadline was enacted.
*
* This is done so that they may be reconciled over process boundaries;
* e.g., to account for variable latencies in message deliveries.
*
* @param timestamp the time at which the deadline was enacted.
*
* @param deadline the time by which the action must complete.
*/
case class Deadline(timestamp: Time, deadline: Time) extends Ordered[Deadline] {
def compare(that: Deadline): Int = this.deadline.compare(that.deadline)
def expired: Boolean = Time.now > deadline
def remaining: Duration = deadline - Time.now
}
/**
* A broadcast context for deadlines.
*/
object Deadline extends Contexts.broadcast.Key[Deadline]("com.twitter.finagle.Deadline") {
/**
* Returns the current request's deadline, if set.
*/
def current: Option[Deadline] =
Contexts.broadcast.get(Deadline)
/**
* Construct a deadline from a timeout.
*/
def ofTimeout(timeout: Duration): Deadline = {
val now = Time.now
Deadline(now, now + timeout)
}
/**
* Construct a new deadline, representing the combined deadline
* `d1` and `d2`. Specifically, the returned deadline has the
* earliest deadline but the latest timestamp. This represents the
* strictest deadline and the latest observation.
*/
def combined(d1: Deadline, d2: Deadline): Deadline =
Deadline(d1.timestamp max d2.timestamp, d1.deadline min d2.deadline)
/**
* Marshal deadline to byte buffer, deadline.timestamp and deadline.deadline
* must not be Time.Top, Time.Bottom or Time.Undefined
*/
def marshal(deadline: Deadline): Buf = {
val bytes = new Array[Byte](16)
ByteArrays.put64be(bytes, 0, deadline.timestamp.inNanoseconds)
ByteArrays.put64be(bytes, 8, deadline.deadline.inNanoseconds)
Buf.ByteArray.Owned(bytes)
}
private[this] def readBigEndianLong(b: Buf, offset: Int): Long = {
((b.get(offset) & 0xff).toLong << 56) |
((b.get(offset + 1) & 0xff).toLong << 48) |
((b.get(offset + 2) & 0xff).toLong << 40) |
((b.get(offset + 3) & 0xff).toLong << 32) |
((b.get(offset + 4) & 0xff).toLong << 24) |
((b.get(offset + 5) & 0xff).toLong << 16) |
((b.get(offset + 6) & 0xff).toLong << 8) |
(b.get(offset + 7) & 0xff).toLong
}
def tryUnmarshal(body: Buf): Try[Deadline] = {
if (body.length != 16)
return Throw(
new IllegalArgumentException(s"Invalid body. Length ${body.length} but required 16")
)
val timestamp = readBigEndianLong(body, 0)
val deadline = readBigEndianLong(body, 8)
Return(Deadline(Time.fromNanoseconds(timestamp), Time.fromNanoseconds(deadline)))
}
}
| mkhq/finagle | finagle-core/src/main/scala/com/twitter/finagle/context/Deadline.scala | Scala | apache-2.0 | 2,978 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import org.apache.flink.api.scala._
import org.apache.flink.table.api.SqlParserException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.Test
class LimitTest extends TableTestBase {
private val util = batchTestUtil()
util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
@Test
def testLimitWithoutOffset(): Unit = {
util.verifyPlan("SELECT * FROM MyTable LIMIT 5")
}
@Test
def testLimit0WithoutOffset(): Unit = {
util.verifyPlan("SELECT * FROM MyTable LIMIT 0")
}
@Test(expected = classOf[SqlParserException])
def testNegativeLimitWithoutOffset(): Unit = {
util.verifyPlan("SELECT * FROM MyTable LIMIT -1")
}
@Test
def testLimitWithOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable LIMIT 10 OFFSET 1")
}
@Test
def testLimitWithOffset0(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable LIMIT 10 OFFSET 0")
}
@Test
def testLimit0WithOffset0(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable LIMIT 0 OFFSET 0")
}
@Test
def testLimit0WithOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable LIMIT 0 OFFSET 10")
}
@Test(expected = classOf[SqlParserException])
def testLimitWithNegativeOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable LIMIT 10 OFFSET -1")
}
@Test
def testFetchWithOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable OFFSET 10 ROWS FETCH NEXT 10 ROWS ONLY")
}
@Test
def testFetchWithoutOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable FETCH FIRST 10 ROWS ONLY")
}
@Test
def testFetch0WithoutOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable FETCH FIRST 0 ROWS ONLY")
}
@Test
def testOnlyOffset(): Unit = {
util.verifyPlan("SELECT a, c FROM MyTable OFFSET 10 ROWS")
}
}
| fhueske/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/LimitTest.scala | Scala | apache-2.0 | 2,731 |