code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package info.hargrave.composer.backend.manager
import java.io.{FileInputStream, FileOutputStream, File}
import info.hargrave.composer._
import info.hargrave.composer.backend.manager.ProjectController.ProjectFactory
import info.hargrave.composer.backend.manager.projects.CUEProject
import info.hargrave.composer.backend.manager.ui.ProjectUserInterface
import info.hargrave.composer.ui.PromptInterface
import info.hargrave.composer.ui.PromptInterface.{PromptType, PromptResponse}
import info.hargrave.composer.util.Localization
import scala.collection.mutable.{Map => MutableMap}
/**
* Manages a collection of projects, working with a TabPane
*/
class ProjectController(implicit val interface: ProjectUserInterface,
implicit val prompts: PromptInterface) extends AnyRef with Localization /* explicit import to fix compiler bug */ {
import ProjectController.ProjectDataAccess
private implicit val projectStorage = MutableMap[Project, File]()
/**
* Get the active project from the interface
*
* @return active project
*/
def activeProject: Option[Project] = interface.activeProject
/**
* Procedure for closing a project.
* Checks if the project has been saved in its current state and prompts the user to save it if it has not been saved
*
* @param project project
*/
def closeProject(project: Project): Unit = {
logger.info(tf"log.project.close"(project))
prompts.displayConfirmationPrompt(t"dialog.save_project", t"dialog.save_project.banner",
t"dialog.save_project.body", cancelable = true) match {
case PromptResponse.CONFIRM =>
logger.info(t"log.info.project.close_confirm_save")
try {
saveProject(project)
} catch {
case anyThrowable: Throwable =>
prompts.displayNotificationPrompt(t"dialog.error", t"dialog.error_while_saving",
t"dialog.error_while_saving.body", promptType = PromptType.ERROR) match {
case PromptResponse.ACKNOWLEDGE => // Do nothing
case PromptResponse.DENY => return
}
logger.error(t"error.project.saving", anyThrowable)
}
try {
interface.closeProject(project)
} catch {
case noSuchProject: NoSuchElementException =>
logger.error(t"error.project.close", noSuchProject)
}
case PromptResponse.DENY => try {
interface.closeProject(project)
} catch {
case noSuchProject: NoSuchElementException =>
logger.error(t"error.project.close", noSuchProject)
}
case PromptResponse.CLOSED | PromptResponse.CANCEL =>
logger.info(t"log.info.project.close_confirm_dismissed")
}
}
/**
* Save a project.
*
* If promptForLocation is true, the user will be prompted for a location to save the file to. If a location different
* from the project's current location, the project's stored location will be changed to that location.
*
* If promptForLocation is false, the project location will be looked up via [[ProjectDataAccess]]. If the project
* has no location, it will be set to ''user.home/project.default_file_name'' (i.e. /home/user/untitled.cue)
*
* @param project project to save
* @param promptForLocation whether to prompt for the location to save the project to or not
*/
def saveProject(project: Project, promptForLocation: Boolean = false): Unit = {
logger.info(tf"log.project.saving"(project))
val storageLocation = project.storageLocation match {
case some: Some[File] => some.get
case None =>
if(!promptForLocation)
logger.warn(t"warn.project.location_always_default")
new File(p"user.home", t"project.default_file_name")
}
lazy val saveLocation = promptForLocation match {
case false => storageLocation
case true => askForLocation()
}
verifyAndSaveFile(saveLocation)
// End of function
// Nested functions follow
//--------------------------------------------------------------------------------------------------------------
def verifyAndSaveFile(saveFile: File): Unit = saveFile.exists() match {
case true =>
prompts.displayNotificationPrompt(t"dialog.save_file.already_exists", t"dialog.save_file.already_exists.banner",
tf"dialog.save_file.already_exists.body"(saveFile.getAbsolutePath), PromptType.WARNING) match {
case PromptResponse.ACKNOWLEDGE =>
writeProjectToFile(saveFile)
project.storageLocation = Some(saveFile)
case PromptResponse.DENY =>
val alternateSaveFile = askForLocation()
verifyAndSaveFile(alternateSaveFile)
case PromptResponse.CLOSED =>
// Do nothing
}
case false =>
writeProjectToFile(saveFile)
project.storageLocation = Some(saveFile)
}
def askForLocation(): File = {
prompts.displayFileSelectionPrompt(initialFile = Some(storageLocation), title = Some(t"dialog.save_file"),
filter = Some(project.extensionFilter), saveFile = true,
validator = fOpt => fOpt.isDefined && !fOpt.get(0).isDirectory) match {
case files: Some[Seq[File]] => files.get(0)
case None =>
logger.warn("The user failed to select a save file when prompted (somehow?)")
throw new IllegalStateException("Unreachable branch: None returned by file prompt with Some-only validator")
}
}
def writeProjectToFile(file: File): Unit = {
logger.trace(s"Blindly attempting to create file at ${file.getAbsolutePath}")
file.createNewFile()
logger.debug(s"Opening an output stream to ${file.getAbsolutePath}")
val output = new FileOutputStream(file)
logger.trace(s"Calling project#writeProject on $output")
try {
project.writeProject(output)
output.flush()
}
finally output.close()
logger.debug("Save complete")
}
}
/**
* Attempt to find a project factory based on the file extension and then construct the project.
* Once instantiated, call readProject on an inputStream constructed with the provided file.
*
* Assuming that the project does not throw an exception [[Project.readProject() for the reasons provided]], the project will be returned
* otherwise the exception will be passed up.
*
* @param file file to read from
* @return project instance
*/
@throws(classOf[NoSuchElementException])
def createProjectFromFile(file: File): Project = {
logger.info(tf"log.opening_file"(file))
val projectInstance = ProjectController.ProjectExtensionAssociations(file.getName.split("""\.""").last.toLowerCase)()
val inputStream = new FileInputStream(file)
logger.debug(s"instantiated project ($projectInstance) based on filetype")
logger.trace(s"opened input stream $inputStream on file $file")
try projectInstance.readProject(inputStream)
finally inputStream.close()
projectInstance.storageLocation = Some(file)
projectInstance
}
/**
* Creates a new project from [[ProjectFactory]] project.
* Project factories can be gotten from ProjectExtensionAssociations.
*
* @param factory
* @return
*/
def createNewProject(factory: ProjectFactory): Project = {
logger.debug("Creating new project")
val projectInstance = factory()
logger.trace(s"Instantiated $projectInstance")
addProject(projectInstance)
projectInstance
}
/**
* Call [[Project.readProject()]] on a preexisting project and its storage location
*
* @param project project to reload
*/
def reloadProject(project: Project): Unit = project.storageLocation match {
case someFile: Some[File] =>
val fileInputStream = new FileInputStream(someFile.get)
try project.readProject(fileInputStream) finally fileInputStream.close()
case None => throw new IllegalArgumentException(t"project.no_storage_location")
}
/**
* Open a file selection dialog that allows for selection of multiple files and then process the selected files
* and open the created resulting projects
*
* @return opened projects
*/
def openProjectsInteractively(): Seq[Project] = {
prompts.displayFileSelectionPrompt(title = Some(t"dialog.open_files"), filter = Some(ProjectController.FatExtensionFilter),
multipleFiles = true, validator = fOpt => fOpt.isDefined && fOpt.get.forall(_.isFile)) match {
case someFiles: Some[Seq[File]] =>
val files = someFiles.get
val projects = files.map(createProjectFromFile)
projects.foreach(addProject)
projects
case None => Seq() // Dialog was closed
}
}
/**
* Perform the necessary action to add a preexisting project to the controller
*
* @param project project
*/
def addProject(project: Project): Unit = {
logger.debug(s"adding project $project, storage: ${project.storageLocation}")
interface.addProject(project)
}
}
object ProjectController {
type ProjectFactory = () => Project
type ProjectType = Class[_<:Project]
type Extensions = Seq[String]
type ExtensionFilter = Map[String, Seq[String]]
// TODO 1-1 when real CUE files could be N-1? (.bin)
val ProjectExtensionAssociations: Map[String, ProjectFactory] = Map("cue" -> (()=> new CUEProject))
val ProjectExtensionFilters: Map[ProjectType, ExtensionFilter] =
Map(classOf[CUEProject ] -> Map(t"project.type.cue" -> Seq("*.cue")))
lazy val FatExtensionFilter =
ProjectExtensionFilters.values.foldRight(Map[String, Seq[String]]()) {case(filter: ExtensionFilter, fatMap: ExtensionFilter) =>
var updatedFatMap = fatMap
filter.foreach{case(desc, exts) =>
updatedFatMap = fatMap.updated(desc, updatedFatMap.get(desc) match {
case someSeq: Some[Extensions] => someSeq.get ++ exts
case None => exts
})
}
updatedFatMap
}
implicit class ProjectDataAccess (project: Project)(implicit val locationMap: MutableMap[Project, File]) {
def storageLocation: Option[File] = locationMap.get(project)
def storageLocation_=(file: Option[File]): Option[File] = file match {
case someFile: Some[File] => locationMap.put(project, file.get)
case None =>
locationMap.remove(project)
None
}
def extensionFilter: Map[String, Seq[String]] =
ProjectController.ProjectExtensionFilters.getOrElse(project.getClass, Map(t"project.type.unknown" -> Seq("*")))
}
}
| RomanHargrave/CUEComposer | src/main/scala/info/hargrave/composer/backend/manager/ProjectController.scala | Scala | gpl-3.0 | 12,614 |
package model
import play.api.libs.json._
/**
* Represents the Swagger definition for SwapSpaceMonitorMemoryUsage2.
* @param additionalProperties Any additional properties this model may have.
*/
@javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]")
case class SwapSpaceMonitorMemoryUsage2(
`class`: Option[String],
availablePhysicalMemory: Option[Int],
availableSwapSpace: Option[Int],
totalPhysicalMemory: Option[Int],
totalSwapSpace: Option[Int]
additionalProperties:
)
object SwapSpaceMonitorMemoryUsage2 {
implicit lazy val swapSpaceMonitorMemoryUsage2JsonFormat: Format[SwapSpaceMonitorMemoryUsage2] = {
val realJsonFormat = Json.format[SwapSpaceMonitorMemoryUsage2]
val declaredPropNames = Set("`class`", "availablePhysicalMemory", "availableSwapSpace", "totalPhysicalMemory", "totalSwapSpace")
Format(
Reads {
case JsObject(xs) =>
val declaredProps = xs.filterKeys(declaredPropNames)
val additionalProps = JsObject(xs -- declaredPropNames)
val restructuredProps = declaredProps + ("additionalProperties" -> additionalProps)
val newObj = JsObject(restructuredProps)
realJsonFormat.reads(newObj)
case _ =>
JsError("error.expected.jsobject")
},
Writes { swapSpaceMonitorMemoryUsage2 =>
val jsObj = realJsonFormat.writes(swapSpaceMonitorMemoryUsage2)
val additionalProps = jsObj.value("additionalProperties").as[JsObject]
val declaredProps = jsObj - "additionalProperties"
val newObj = declaredProps ++ additionalProps
newObj
}
)
}
}
| cliffano/swaggy-jenkins | clients/scala-play-server/generated/app/model/SwapSpaceMonitorMemoryUsage2.scala | Scala | mit | 1,732 |
/*
* Copyright (c) 2013-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
organization := "Snowplow Analytics Ltd",
version := "0.0.2",
description := "An example event consumer for Amazon Kinesis",
scalaVersion := "2.10.1",
scalacOptions := Seq("-deprecation", "-encoding", "utf8",
"-feature"),
scalacOptions in Test := Seq("-Yrangepos"),
resolvers ++= Dependencies.resolutionRepos
)
// Makes our SBT app settings available from within the app
lazy val scalifySettings = Seq(sourceGenerators in Compile <+= (sourceManaged in Compile, version, name, organization) map { (d, v, n, o) =>
val file = d / "settings.scala"
IO.write(file, """package com.snowplowanalytics.kinesis.consumer.generated
|object Settings {
| val organization = "%s"
| val version = "%s"
| val name = "%s"
|}
|""".stripMargin.format(o, v, n))
Seq(file)
})
// sbt-assembly settings for building a fat jar
import sbtassembly.Plugin._
import AssemblyKeys._
lazy val sbtAssemblySettings = assemblySettings ++ Seq(
// Slightly cleaner jar name
jarName in assembly := {
name.value + "-" + version.value + ".jar"
}
)
import com.github.bigtoast.sbtthrift.ThriftPlugin
lazy val buildSettings = basicSettings ++ scalifySettings ++
sbtAssemblySettings ++ ThriftPlugin.thriftSettings
}
| snowplow/kinesis-example-scala-consumer | project/BuildSettings.scala | Scala | apache-2.0 | 2,233 |
/*
Copyright 2013 Stephen K Samuel
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.BufferedOpFilter
import com.sksamuel.scrimage.filter.PointillizeGridType._
/** @author Stephen Samuel */
class PointillizeFilter(angle: Float = 0.0f,
scale: Int,
edgeThickness: Float,
edgeColor: Int = 0xff000000,
fadeEdges: Boolean,
fuzziness: Float,
gridType: PointillizeGridType)
extends BufferedOpFilter {
val op = new thirdparty.jhlabs.image.PointillizeFilter
op.setAngle(angle)
op.setScale(scale)
op.setEdgeThickness(edgeThickness)
op.setEdgeColor(edgeColor)
op.setFadeEdges(fadeEdges)
op.setFuzziness(fuzziness)
gridType match {
case Random => op.setGridType(thirdparty.jhlabs.image.CellularFilter.RANDOM)
case Square => op.setGridType(thirdparty.jhlabs.image.CellularFilter.SQUARE)
case Hexagonal => op.setGridType(thirdparty.jhlabs.image.CellularFilter.HEXAGONAL)
case Octangal => op.setGridType(thirdparty.jhlabs.image.CellularFilter.OCTAGONAL)
case Triangular => op.setGridType(thirdparty.jhlabs.image.CellularFilter.TRIANGULAR)
}
}
object PointillizeFilter {
def apply(gridType: PointillizeGridType): PointillizeFilter = apply(angle = 0.0f, gridType = gridType)
def apply(angle: Float = 0.0f,
scale: Int = 6,
edgeThickness: Float = 0.4f,
edgeColor: Int = 0xff000000,
fadeEdges: Boolean = false,
fuzziness: Float = 0.1f,
gridType: PointillizeGridType = Hexagonal): PointillizeFilter = new PointillizeFilter(angle, scale, edgeThickness, edgeColor, fadeEdges, edgeColor, gridType)
}
sealed trait PointillizeGridType
object PointillizeGridType {
case object Random extends PointillizeGridType
case object Square extends PointillizeGridType
case object Hexagonal extends PointillizeGridType
case object Octangal extends PointillizeGridType
case object Triangular extends PointillizeGridType
}
| carlosFattor/scrimage | scrimage-filters/src/main/scala/com/sksamuel/scrimage/filter/PointillizeFilter.scala | Scala | apache-2.0 | 2,635 |
/*
* Copyright (c) 2015 Uncharted Software Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.tiling
import scala.collection.mutable.{Map => MutableMap}
import scala.collection.mutable
import scala.util.Try
import org.scalatest.FunSuite
import com.oculusinfo.binning.BinIndex
import com.oculusinfo.binning.TileIndex
import com.oculusinfo.binning.TileAndBinIndices
import com.oculusinfo.tilegen.util.ExtendedNumeric
import com.oculusinfo.binning.TilePyramid
import com.oculusinfo.binning.impl.AOITilePyramid
import org.apache.spark.SharedSparkContext
import org.apache.spark.rdd.RDD
/**
* @author nkronenfeld
*/
class StandardBinningFunctionsTestSuite extends FunSuite {
import StandardBinningFunctions._
test("Guassian tiling") {
val pyramid: TilePyramid = new AOITilePyramid(0.0, 0.0, 8.0, 8.0)
val index = new CartesianSchemaIndexScheme
// Use an arbitrary assymetric kernel for testing
val kernel = Array(
Array(0.02, 0.12, 0.26, 0.36, 0.46),
Array(0.13, 0.13, 0.35, 0.25, 0.15),
Array(0.24, 0.24, 0.44, 0.24, 0.24),
Array(0.35, 0.25, 0.33, 0.53, 0.13),
Array(0.46, 0.16, 0.22, 0.12, 0.00)
)
val levelsTested = List(0, 1)
// Create our locate function
val locateFcn: Seq[Any] => Traversable[(TileIndex, Array[BinIndex])] =
locateIndexOverLevelsWithKernel[Seq[Any]](kernel, index, pyramid, 8, 8)(levelsTested)
// Create our populate function
val populateFcn: (TileIndex, Array[BinIndex], Double) => MutableMap[BinIndex, Double] =
populateTileGaussian[Double](kernel)
// Tests gaussian blurring for a single data point and given the expected number bins output for each level
// Uses a data value of 1 and looks for a correctly shifted version of the kernel
def testGaussianTiling (startingPoint : Seq[Any], expectedNumBins: List[Int]) = {
val testPoint = List((startingPoint, 1.0))
// Run our input data through our functions to get individual bin values
val output: Seq[(TileIndex, BinIndex, Double)] = testPoint.flatMap{case (index, value) =>
locateFcn(index)flatMap{case (tile, ubins) =>
populateFcn(tile, ubins, value).map{case (bin, value) => (tile, bin, value)}
}
}
// Group the values by level
val groupedOutput = output.groupBy(f => f._1.getLevel)
// Test the output for each level
groupedOutput.foreach{ case (level, binSequence) => {
// Using data values of 1.0, the output should just be a shifted copies of the kernel
val outputUBinIndex = binSequence.map{case (tile, bin, value) => (TileIndex.tileBinIndexToUniversalBinIndex(tile, bin), value)}
// Find bin coords of input point
val (x,y) = index.toCartesian(testPoint(0)._1)
val tile = pyramid.rootToTile(x, y, level, 8, 8) //x, y, level, xBins, yBins
val bin = TileIndex.tileBinIndexToUniversalBinIndex(tile, pyramid.rootToBin(x, y, tile))
// Find the universal bin index offset of the kernel matrix top left
val (kDimX, kDimY) = (kernel(0).length, kernel.length)
val (kOffsetX, kOffsetY) = (bin.getX - kDimX/2, bin.getY - kDimY/2) // Offset of kernel matrix in tile
// Check to see that we have the expected number of bins
assert(outputUBinIndex.length == expectedNumBins(level))
outputUBinIndex.foreach(binData => {
val binIndex = binData._1
val (binKIndexX,binKIndexY) = (binIndex.getX - kOffsetX, binIndex.getY - kOffsetY)
// All bins that were output should map to somewhere in the kernel space
// and the value should match the corresponding kernel value
assert(binKIndexX >= 0 && binKIndexX < kDimX && binKIndexY >= 0 && binKIndexY < kDimY)
assert(kernel(binKIndexY)(binKIndexX) == binData._2)
})
}}
}
// Create some data with which to test them
// One point firmly in bin (3, 3) of tile (0, 0, 0, 8, 8)
testGaussianTiling(Seq[Any](3.5, 3.5), List(25, 25))
// Test edge cases for starting points on edges and corners
// and where kernel crosses tile boundaries in different directions
// Edges
testGaussianTiling(Seq[Any](0.5, 3.5), List(15, 20))
testGaussianTiling(Seq[Any](3.5, 0.5), List(15, 20))
testGaussianTiling(Seq[Any](7.5, 3.5), List(15, 15))
testGaussianTiling(Seq[Any](3.5, 7.5), List(15, 15))
// Corners
testGaussianTiling(Seq[Any](0.5, 0.5), List(9, 16))
testGaussianTiling(Seq[Any](0.5, 7.5), List(9, 12))
testGaussianTiling(Seq[Any](7.5, 0.5), List(9, 12))
testGaussianTiling(Seq[Any](7.5, 7.5), List(9, 9))
// Inner tile boundary crossings
testGaussianTiling(Seq[Any](3.8, 3.8), List(25, 25))
testGaussianTiling(Seq[Any](4.2, 3.8), List(25, 25))
testGaussianTiling(Seq[Any](3.8, 4.2), List(25, 25))
testGaussianTiling(Seq[Any](4.2, 4.2), List(25, 25))
println("success")
}
test("for vs while") {
def time (f: () => Unit): Double = {
val start = System.nanoTime()
f()
val end = System.nanoTime()
(end-start)/1000000.0
}
val n = 100000000
println("For comprehension: %.4fms".format(time(() => for (x <- 1 to n){})))
println("While loop: %.4fms".format(time(() => {
var x=0
while (x < n) {x += 1}
})))
println("While iterator: %.4fms".format(time(() => {
var x = 0
new WhileIterator(() => (x < n), () => x = x + 1).foreach(x => {})
})))
var x = 0
val wi = new WhileIterator(() => (x < n), () => x = x + 1)
println("While iterator internals: %.4fms".format(time(() => wi.foreach(x => {}))))
}
test("Test various Bresneham line functions against each other") {
val sortTiles: (TileIndex, TileIndex) => Boolean = (a, b) => {
a.getX < b.getX || (a.getX == b.getX && a.getY < b.getY)
}
val sortBins: (BinIndex, BinIndex) => Boolean = (a, b) => {
a.getX < b.getX || (a.getX == b.getX && a.getY < b.getY)
}
// Test a set of endpoints to make see if the calculation of tiles through simple
// Bresneham and tiled Bresneham match
def testEndpoints (start: BinIndex, end: BinIndex, sample: TileIndex) = {
val bins = linearUniversalBins(start, end).map(TileIndex.universalBinIndexToTileBinIndex(sample, _))
val binTiles = bins.map(_.getTile).toSet.toList.sortWith(sortTiles)
val tiles = linearTiles(start, end, sample).toSet.toList.sortWith(sortTiles)
assert(binTiles == tiles)
tiles.map{tile =>
val subsetBins = bins.filter(_.getTile == tile).map(_.getBin).toList.sortWith(sortBins)
val tileBins = linearBinsForTile(start, end, tile, bin => 1.0).toList.map(_._1).sortWith(sortBins)
assert(subsetBins == tileBins)
}
}
// level 9: 131072 bins
val sample= new TileIndex(9, 0, 0)
Range(0, 256).foreach{offset =>
// Long lines
testEndpoints(new BinIndex(23309+offset, 55902), new BinIndex(24326+offset, 56447), sample)
testEndpoints(new BinIndex(23309, 55902+offset), new BinIndex(24326, 56447+offset), sample)
// Short, but multi-tile lines
testEndpoints(new BinIndex(23309+offset, 55902), new BinIndex(23701+offset, 55793), sample)
testEndpoints(new BinIndex(23309, 55902+offset), new BinIndex(23701, 55793+offset), sample)
// Very short lines
testEndpoints(new BinIndex(23309+offset, 55902), new BinIndex(23325+offset, 55912), sample)
testEndpoints(new BinIndex(23309, 55902+offset), new BinIndex(23325, 55912+offset), sample)
}
}
test("Test linear tiles with limmitted distance") {
// Make this simple - 4 bins/tile
// level 4 - 64 bins total
val sample = new TileIndex(4, 0, 0, 4, 4)
// Shift a bin at a time over boundaries to make sure tiles match perfectly.
// Test horizontally
assert(Set(new TileIndex(4, 0, 13, 4, 4), new TileIndex(4, 2, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(0, 10), new BinIndex(11, 10), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 13, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 3, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(1, 10), new BinIndex(12, 10), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 13, 4, 4), new TileIndex(4, 1, 13, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 3, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(2, 10), new BinIndex(13, 10), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 13, 4, 4), new TileIndex(4, 1, 13, 4, 4), new TileIndex(4, 3, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(3, 10), new BinIndex(14, 10), sample, 2).toSet)
// Test vertically
assert(Set(new TileIndex(4, 2, 15, 4, 4), new TileIndex(4, 2, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(10, 0), new BinIndex(10, 11), sample, 2).toSet)
assert(Set(new TileIndex(4, 2, 15, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 2, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(10, 1), new BinIndex(10, 12), sample, 2).toSet)
assert(Set(new TileIndex(4, 2, 15, 4, 4), new TileIndex(4, 2, 14, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 2, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(10, 2), new BinIndex(10, 13), sample, 2).toSet)
assert(Set(new TileIndex(4, 2, 15, 4, 4), new TileIndex(4, 2, 14, 4, 4), new TileIndex(4, 2, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(10, 3), new BinIndex(10, 14), sample, 2).toSet)
// Test diagonally
assert(Set(new TileIndex(4, 0, 15, 4, 4), new TileIndex(4, 2, 13, 4, 4)) ===
closeLinearTiles(new BinIndex(0, 0), new BinIndex(11, 11), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 15, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 3, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(1, 1), new BinIndex(12, 12), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 15, 4, 4), new TileIndex(4, 1, 14, 4, 4), new TileIndex(4, 2, 13, 4, 4), new TileIndex(4, 3, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(2, 2), new BinIndex(13, 13), sample, 2).toSet)
assert(Set(new TileIndex(4, 0, 15, 4, 4), new TileIndex(4, 1, 14, 4, 4), new TileIndex(4, 3, 12, 4, 4)) ===
closeLinearTiles(new BinIndex(3, 3), new BinIndex(14, 14), sample, 2).toSet)
}
test("Test linear functions with limitted distance - large gap") {
// level 9: 131072 bins
val sample= new TileIndex(9, 0, 0)
val start = new BinIndex(111437, 76960)
val end = new BinIndex(103773, 81927)
val distance = 1912
val closeBins = closeLinearTiles(start, end, sample, distance).flatMap(tile =>
closeLinearBinsForTile(start, end, tile, distance, bin => 1.0).map(binValue => (binValue._1, tile))
).toList
val allBins = linearTiles(start, end, sample).flatMap(tile =>
linearBinsForTile(start, end, tile, bin => 1.0).map(binValue => (binValue._1, tile))
).toSet
def axialDistance (a: BinIndex, b: BinIndex): Int =
math.max(math.abs(a.getX - b.getX), math.abs(a.getY-b.getY))
assert(1913*2 < allBins.size)
assert(1913*2 === closeBins.size)
closeBins.foreach{case (bin, tile) =>
assert(allBins.contains((bin, tile)))
val uBin = TileIndex.tileBinIndexToUniversalBinIndex(tile, bin)
val binDistance = math.min(axialDistance(uBin, start), axialDistance(uBin, end))
assert(binDistance <= distance,
"Bin "+bin+" in tile "+tile+" is more than "+distance+" from endpoints (distance is "+binDistance+")")
}
}
// Also test:
// No gap (barely)
// Gap of 1 bin in tile
// Gap of 1 bin at edge of tile (both directions)
// Gap of several bins in the same tile
// Gap of several bins in neighboring tiles, but missing bins in only one tile (both directions)
// Gap of several bins in neighboring tiles, missing bins in each tile
// Probably sufficient to test each of these vertically, horizontally, and diagonally both directions
test("Test arc initialization") {
val s2 = math.sqrt(2)
val s3 = math.sqrt(3)
def assertArcInfo (expected: (Double, Double, Double, Double, Double, Seq[Int]),
actual: (Double, Double, Double, Double, Double, Seq[(Int, Boolean, Boolean)])) {
assert(expected._1 === actual._1)
assert(expected._2 === actual._2)
assert(expected._3 === actual._3)
assert(expected._4 === actual._4)
assert(expected._5 === actual._5)
assert(expected._6.toList === actual._6.map(_._1).toList)
}
var arcInfo: (Double, Double, Double, Double, Double, Seq[(Int, Boolean, Boolean)]) = null
// Test the 4 basic axis-crossing chords
arcInfo = initializeArc(new BinIndex(10, 10), new BinIndex(10, -10))
assertArcInfo((10-10*s3, 0.0, 20.0, 1.0/s3, -1.0/s3, List(7, 0)), arcInfo)
arcInfo = initializeArc(new BinIndex(-10, 10), new BinIndex(10, 10))
assertArcInfo((0.0, 10-10*s3, 20.0, -s3, s3, List(1, 2)), arcInfo)
arcInfo = initializeArc(new BinIndex(-10, -10), new BinIndex(-10, 10))
assertArcInfo((10.0*s3-10.0, 0.0, 20.0, 1.0/s3, -1.0/s3, List(3, 4)), arcInfo)
arcInfo = initializeArc(new BinIndex(10, -10), new BinIndex(-10, -10))
assertArcInfo((0.0, 10*s3-10.0, 20.0, -s3, s3, List(5, 6)), arcInfo)
// Same thing, with reversed coordinate order
arcInfo = initializeArc(new BinIndex(10, -10), new BinIndex(10, 10))
assertArcInfo(( 10.0 + 10.0*s3, 0.0, 20.0, 1.0/s3, -1.0/s3, List(3, 4)), arcInfo)
arcInfo = initializeArc(new BinIndex(10, 10), new BinIndex(-10, 10))
assertArcInfo((0.0, 10.0 + 10.0*s3, 20.0, -s3, s3, List(5, 6)), arcInfo)
arcInfo = initializeArc(new BinIndex(-10, 10), new BinIndex(-10, -10))
assertArcInfo((-10.0 - 10.0*s3, 0.0, 20.0, 1.0/s3, -1.0/s3, List(7, 0)), arcInfo)
arcInfo = initializeArc(new BinIndex(-10, -10), new BinIndex(10, -10))
assertArcInfo((0.0, -10.0 - 10.0*s3, 20.0, -s3, s3, List(1, 2)), arcInfo)
// Test the 4 basic diagonals
val cp = 5.0 * s3 + 5.0
val cm = 5.0 * s3 - 5.0
arcInfo = initializeArc(new BinIndex(0, 10), new BinIndex(10, 0))
assertArcInfo((-cm, -cm, 10.0*s2, cp / cm, cm / cp, List(0, 1)), arcInfo)
arcInfo = initializeArc(new BinIndex(-10, 0), new BinIndex(0, 10))
assertArcInfo((cm, -cm, 10.0*s2, - cm / cp, - cp / cm, List(2, 3)), arcInfo)
arcInfo = initializeArc(new BinIndex(0, -10), new BinIndex(-10, 0))
assertArcInfo((cm, cm, 10.0*s2, cp / cm, cm / cp, List(4, 5)), arcInfo)
arcInfo = initializeArc(new BinIndex(10, 0), new BinIndex(0, -10))
assertArcInfo((-cm, cm, 10.0*s2, - cm / cp, - cp / cm, List(6, 7)), arcInfo)
// test all 0-centerd arcs in a circle
val slopeEpsilon = 0.1
// Our basic maximum point offset
val epsilon = math.sqrt(2)/2
(0 to 359).foreach{theta2 =>
val theta1 = theta2+60
val t1 = math.toRadians(theta1)
val t2 = math.toRadians(theta2)
val arcInfo = initializeArc(new BinIndex(math.round(100*math.cos(t1)).toInt,
math.round(100*math.sin(t1)).toInt),
new BinIndex(math.round(100*math.cos(t2)).toInt,
math.round(100*math.sin(t2)).toInt))
assert(ApproximateNumber(0.0, epsilon*3) === arcInfo._1, "(X center coordinate differed)")
assert(ApproximateNumber(0.0, epsilon*3) === arcInfo._2, "(Y center coordinate differed)")
assert(ApproximateNumber(100.0, epsilon*2) === arcInfo._3, "(Radius differed)")
// Tiny perturbations in rounding can cause huge perturbations in the slope (like
// changing 1E6 to -1E3), so we really can't test slopes.
val o1 = theta1/45
val o2 = theta2/45
val o1s = if (theta1%45 == 0) List(o1, (o1+1)%8) else List(o1)
val o2s = if (theta2%45 == 0) List(o2, (o2+7)%8) else List(o2)
val possibleOctants = for (oct1 <- o1s; oct2 <- o2s) yield
if (oct2 < oct1) (oct2 to oct1).map(_ % 8).toList
else (oct2 to (oct1 + 8)).map(_ % 8).toList
assert(possibleOctants.map(_ == arcInfo._6.map(_._1).toList).reduce(_ || _),
"Octants differed, got "+arcInfo._6.toList+", expected one of "+possibleOctants)
}
}
private def bi (x: Int, y: Int): BinIndex = new BinIndex(x, y)
private val tileSorter: (TileIndex, TileIndex) => Boolean = (a, b) => {
a.getX < b.getX || (a.getX == b.getX && a.getY < b.getY)
}
private val binSorter: (BinIndex, BinIndex) => Boolean = (a, b) => {
val angleA = math.atan2(a.getY, a.getX)
val angleB = math.atan2(b.getY, b.getX)
angleA < angleB
}
test("Test simple arcs - symetrical across axis") {
val bins = arcUniversalBins(bi(-7, 12), bi(7, 12)).toList.sortWith(binSorter)
// Make sure our arc bounds are correct
assert(12 === bins.map(_.getY).reduce(_ min _))
assert(14 === bins.map(_.getY).reduce(_ max _))
assert(-7 === bins.map(_.getX).reduce(_ min _))
assert(7 === bins.map(_.getX).reduce(_ max _))
// Make sure the results are symetrical around the X axis
bins.foreach(bin => bins.contains(new BinIndex(-bin.getX, bin.getY)))
// Make sure there are no gaps
bins.sliding(2).foreach{pair =>
assert(math.abs(pair(1).getX-pair(0).getX) < 2, "Gap between "+pair(0)+" and "+pair(1))
assert(math.abs(pair(1).getY-pair(0).getY) < 2, "Gap between "+pair(0)+" and "+pair(1))
}
// Make sure there are no duplicate points
assert(bins.size === bins.toSet.size)
// Make sure the results are all approximately 14 from (0, 12 - 7 sqrt(3)) (i.e., the real center)
val idealY = 12.0 - 7.0 * math.sqrt(3)
bins.foreach{bin =>
val x = bin.getX
val y = bin.getY - idealY
val r = math.sqrt((x * x) + (y * y))
assert(new ApproximateNumber(14.0, 0.75) === r)
}
}
test("Test simple arcs - symetrical across diagonal") {
val bins = arcUniversalBins(bi(7, 27), bi(27, 7)).toList.sortWith(binSorter)
// Make sure our arc bounds are correct
assert(7 === bins.map(_.getY).reduce(_ min _))
assert(27 === bins.map(_.getY).reduce(_ max _))
assert(7 === bins.map(_.getX).reduce(_ min _))
assert(27 === bins.map(_.getX).reduce(_ max _))
// Make sure the results are symetrical around the diagonal
bins.foreach(bin => bins.contains(new BinIndex(bin.getY, bin.getX)))
// Make sure there are no gaps
bins.sliding(2).foreach{pair =>
assert(math.abs(pair(1).getX-pair(0).getX) < 2, "Gap between "+pair(0)+" and "+pair(1))
assert(math.abs(pair(1).getY-pair(0).getY) < 2, "Gap between "+pair(0)+" and "+pair(1))
}
// Make sure there are no duplicates
assert(bins.size == bins.toSet.size)
// Make sure the results are all the right distance from the true center.
// The chord is 20 sqrt(2) long
// so the distance from the chord to the center is 10 sqrt(6)
// so the distance along each axis from the chord center to the center is 10 sqrt(3)
val idealR = 20 * math.sqrt(2)
val idealC = 17.0 - 10.0 * math.sqrt(3)
bins.foreach{bin =>
val x = bin.getX - idealC
val y = bin.getY - idealC
val r = math.sqrt((x * x) + (y * y))
assert(new ApproximateNumber(idealR, 0.75) === r)
}
}
test("Test arc tiles") {
val startBin = new BinIndex(5, 38)
val endBin = new BinIndex(41, 28)
// level 4, 4 bins per tile = 64 bins
val sample = new TileIndex(4, 0, 0, 4, 4)
val expected = arcUniversalBins(startBin, endBin)
.map(bin => TileIndex.universalBinIndexToTileBinIndex(sample, bin).getTile)
.toSet.toList.sortWith(tileSorter)
val actual = arcTiles(startBin, endBin, sample).toList.sortWith(tileSorter)
assert(expected === actual)
}
test("Test arc bins") {
val startBin = new BinIndex(5, 38)
val endBin = new BinIndex(41, 28)
// level 4, 4 bins per tile = 64 bins
val sample = new TileIndex(4, 0, 0, 4, 4)
val expected = arcUniversalBins(startBin, endBin).toList.sortWith(binSorter)
val actual = arcTiles(startBin, endBin, sample).flatMap(tile =>
arcBinsForTile(startBin, endBin, tile).map(bin => TileIndex.tileBinIndexToUniversalBinIndex(tile, bin))
).toList.sortWith(binSorter)
assert(expected === actual)
}
test("Test arc bins with limitted distance") {
def distance (a: BinIndex, b: BinIndex): Int = {
math.abs(a.getX - b.getX) max math.abs(a.getY - b.getY)
}
val startBin = new BinIndex(5, 38)
val endBin = new BinIndex(41, 28)
// level 4, 4 bins per tile = 64 bins
// Distance is 5 => 41 = 36ish; limitting to 9, should include about half
val limit = 9
val sample = new TileIndex(4, 0, 0, 4, 4)
val expected = arcUniversalBins(startBin, endBin)
.filter(bin => distance(startBin, bin) <= 9 || distance(endBin, bin) <= 9)
.toList.sortWith(binSorter)
val actual = arcTiles(startBin, endBin, sample, Some(limit)).flatMap { tile =>
val bins = arcBinsForTile(startBin, endBin, tile, Some(limit)).toList
// Make sure none of our tiles produce no bins
assert(bins.size > 0)
bins.map(bin => TileIndex.tileBinIndexToUniversalBinIndex(tile, bin))
}.toList.sortWith(binSorter)
assert(expected === actual)
}
}
object ApproximateNumber {
def apply[T: ExtendedNumeric] (target: T, epsilon: T) = new ApproximateNumber[T](target, epsilon)
}
class ApproximateNumber [T: ExtendedNumeric] (val target: T, val epsilon: T) {
override def toString = target+"+/-"+epsilon
override def equals (that: Any): Boolean = {
val numeric = implicitly[ExtendedNumeric[T]]
import numeric.mkNumericOps
import numeric.mkOrderingOps
that match {
case approx: ApproximateNumber[T] =>
if (approx.epsilon > epsilon) approx.equals(target)
else this.equals(approx.target)
case other => {
Try({
val exact = numeric.fromAny(other)
(target-epsilon <= exact && exact <= target+epsilon)
}).getOrElse(false)
}
}
}
}
| unchartedsoftware/aperture-tiles | tile-generation/src/test/scala/com/oculusinfo/tilegen/tiling/StandardBinningFunctionsTestSuite.scala | Scala | mit | 22,571 |
package almhirt.aggregates
import org.scalatest._
import scalaz._, Scalaz._
import almhirt.common._
class AggregateRootEventHandlerTests extends FlatSpec with Matchers with UserEventHandler {
import aggregatesforthelazyones._
implicit val ccuad = CanCreateUuidsAndDateTimes()
behavior of "AggregateRootEventHandler"
it should "create an aggregate root" in {
val event = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
val actual = fromEvent(event)
actual should equal(Vivus(User("a", 1L, "hans", "meier", None)))
}
it should "modify an aggregate root" in {
val event = UserAgeChanged(EventHeader(), "a", 1L, 2)
val actual = applyEvent(User("a", 1L, "hans", "meier", None), event)
actual should equal(Vivus(User("a", 2L, "hans", "meier", Some(2))))
}
it should "create and modify an aggregate root" in {
val event1 = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
val event2 = UserAgeChanged(EventHeader(), "a", 1L, 2)
val actual = applyEventPostnatalis(fromEvent(event1), event2)
actual should equal(Vivus(User("a", 2L, "hans", "meier", Some(2))))
}
it should "create and delete an aggregate root" in {
val event1 = UserCreated(EventHeader(), "a", 0L, "hans", "meier")
val event2 = UserDied(EventHeader(), "a", 1L)
val actual = applyEventPostnatalis(fromEvent(event1), event2)
actual should equal(Mortuus("a", 2L))
}
it should "create a dead aggregate root" in {
val event = UserNotAccepted(EventHeader(), arid("a"), arv(0L), "hans", "meier")
val actual = fromEvent(event)
actual should equal(Mortuus("a", 1L))
}
it should "create, modify and delete an aggregate root" in {
val event1 = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
val event2 = UserAgeChanged(EventHeader(), "a", 1L, 2)
val event3 = UserLeft(EventHeader(), "a", 2L)
val actual = applyEventsPostnatalis(fromEvent(event1), event2 :: event3 :: Nil)
actual should equal(Mortuus("a", 3L))
}
it should "return the aggregate for applyevents when there are no events" in {
val agg = User("a", 1L, "hans", "meier", Some(2))
applyEvents(User("a", 1L, "hans", "meier", Some(2)), Nil) should equal(Vivus(agg))
}
it should "return Vivus(aggregate) for applyEventsPostnatalis when there are no events" in {
val agg = Vivus(User("a", 1L, "hans", "meier", Some(2)))
applyEventsPostnatalis(agg, Nil) should equal(agg)
}
it should "return Mortuus for applyEventsPostnatalis when there are no events" in {
val agg = Mortuus("a", 1L)
applyEventsPostnatalis(agg, Nil) should equal(agg)
}
it should "throw an exception for applyevents when an event follows dead state " in {
val event1 = UserDied(EventHeader(), "a", 1L)
val event2 = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
intercept[Exception] {
applyEvents(User("a", 1L, "hans", "meier", Some(2)), event1 :: event2 :: Nil)
}
}
it should "throw an exception for applyEventPostnatalis when the state is dead" in {
val event = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
intercept[Exception] {
applyEventPostnatalis(Mortuus("a", 1L), event)
}
}
it should "throw an exception for applyEventPostnatalis when the state is dead and there are events" in {
val event = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
intercept[Exception] {
applyEventsPostnatalis(Mortuus("a", 1L), event :: Nil)
}
}
it should "throw an exception for applyEventLifecycleAgnostic when the state is dead" in {
val event = UserCreated(EventHeader(), arid("a"), arv(0L), "hans", "meier")
intercept[Exception] {
applyEventLifecycleAgnostic(Mortuus("a", 1L), event)
}
}
} | chridou/almhirt | almhirt-common/src/test/scala/almhirt/aggregates/AggregateRootEventHandlerTests.scala | Scala | apache-2.0 | 3,812 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalInteger, Input}
case class CP82(value: Option[Int]) extends CtBoxIdentifier(name = "Additions qualifying for writing down allowance") with CtOptionalInteger with Input
object CP82 {
def apply(value: Int): CP82 = CP82(Some(value))
}
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP82.scala | Scala | apache-2.0 | 933 |
package spark.scheduler
import java.net.URI
import spark._
import spark.storage.BlockManagerId
/**
* A stage is a set of independent tasks all computing the same function that need to run as part
* of a Spark job, where all the tasks have the same shuffle dependencies. Each DAG of tasks run
* by the scheduler is split up into stages at the boundaries where shuffle occurs, and then the
* DAGScheduler runs these stages in topological order.
*
* Each Stage can either be a shuffle map stage, in which case its tasks' results are input for
* another stage, or a result stage, in which case its tasks directly compute the action that
* initiated a job (e.g. count(), save(), etc). For shuffle map stages, we also track the nodes
* that each output partition is on.
*
* Each Stage also has a priority, which is (by default) based on the job it was submitted in.
* This allows Stages from earlier jobs to be computed first or recovered faster on failure.
*/
class Stage(
val id: Int,
val rdd: RDD[_],
val shuffleDep: Option[ShuffleDependency[_,_,_]], // Output shuffle if stage is a map stage
val parents: List[Stage],
val priority: Int)
extends Logging {
val isShuffleMap = shuffleDep != None
val numPartitions = rdd.splits.size
val outputLocs = Array.fill[List[BlockManagerId]](numPartitions)(Nil)
var numAvailableOutputs = 0
private var nextAttemptId = 0
def isAvailable: Boolean = {
if (/*parents.size == 0 &&*/ !isShuffleMap) {
true
} else {
numAvailableOutputs == numPartitions
}
}
def addOutputLoc(partition: Int, bmAddress: BlockManagerId) {
val prevList = outputLocs(partition)
outputLocs(partition) = bmAddress :: prevList
if (prevList == Nil)
numAvailableOutputs += 1
}
def removeOutputLoc(partition: Int, bmAddress: BlockManagerId) {
val prevList = outputLocs(partition)
val newList = prevList.filterNot(_ == bmAddress)
outputLocs(partition) = newList
if (prevList != Nil && newList == Nil) {
numAvailableOutputs -= 1
}
}
def removeOutputsOnHost(host: String) {
var becameUnavailable = false
for (partition <- 0 until numPartitions) {
val prevList = outputLocs(partition)
val newList = prevList.filterNot(_.ip == host)
outputLocs(partition) = newList
if (prevList != Nil && newList == Nil) {
becameUnavailable = true
numAvailableOutputs -= 1
}
}
if (becameUnavailable) {
logInfo("%s is now unavailable on %s (%d/%d, %s)".format(this, host, numAvailableOutputs, numPartitions, isAvailable))
}
}
def newAttemptId(): Int = {
val id = nextAttemptId
nextAttemptId += 1
return id
}
override def toString = "Stage " + id // + ": [RDD = " + rdd.id + ", isShuffle = " + isShuffleMap + "]"
override def hashCode(): Int = id
}
| ankurdave/arthur | core/src/main/scala/spark/scheduler/Stage.scala | Scala | bsd-3-clause | 2,862 |
package org.wartremover
package contrib.test
import org.scalatest.Assertions
import org.wartremover.test.WartTestTraverser
trait ResultAssertions extends Assertions {
def assertEmpty(result: WartTestTraverser.Result) = {
assertResult(List.empty, "result.errors")(result.errors)
assertResult(List.empty, "result.warnings")(result.warnings)
}
def assertError(result: WartTestTraverser.Result)(message: String) = assertErrors(result)(message, 1)
def assertErrors(result: WartTestTraverser.Result)(message: String, times: Int) = {
assertResult(List.fill(times)(message), "result.errors")(result.errors.map(skipTraverserPrefix))
assertResult(List.empty, "result.warnings")(result.warnings.map(skipTraverserPrefix))
}
def assertWarnings(result: WartTestTraverser.Result)(message: String, times: Int) = {
assertResult(List.empty, "result.errors")(result.errors.map(skipTraverserPrefix))
assertResult(List.fill(times)(message), "result.warnings")(result.warnings.map(skipTraverserPrefix))
}
private val messageFormat = """^\\[wartremover:\\S+\\] (.+)$""".r
private def skipTraverserPrefix(msg: String) = msg match {
case messageFormat(rest) => rest
case s => s
}
}
| tim-zh/wartremover-contrib | core/src/test/scala/wartremover/contrib/ResultAssertions.scala | Scala | apache-2.0 | 1,215 |
package model
import skinny.DBSettings
import skinny.test._
import org.scalatest.fixture.FlatSpec
import org.scalatest._
import scalikejdbc._
import scalikejdbc.scalatest._
import org.joda.time._
class SchoolSpec extends FlatSpec with Matchers with DBSettings with AutoRollback {
}
| yoshitakes/skinny-task-example | src/test/scala/model/SchoolSpec.scala | Scala | mit | 284 |
package com.github.blemale.scaffeine
import com.github.benmanes.caffeine.cache.{AsyncLoadingCache => CaffeineAsyncLoadingCache}
import scala.collection.JavaConverters._
import scala.compat.java8.FutureConverters._
import scala.concurrent.{ExecutionContext, Future}
object AsyncLoadingCache {
def apply[K, V](
asyncLoadingCache: CaffeineAsyncLoadingCache[K, V]
): AsyncLoadingCache[K, V] =
new AsyncLoadingCache(asyncLoadingCache)
}
class AsyncLoadingCache[K, V](
override val underlying: CaffeineAsyncLoadingCache[K, V]
) extends AsyncCache[K, V](underlying) {
implicit private[this] val ec: ExecutionContext = DirectExecutionContext
/** Returns the future associated with `key` in this cache, obtaining that
* value from `loader` if necessary. If the asynchronous computation fails,
* the entry will be automatically removed.
*
* @param key
* key with which the specified value is to be associated
* @return
* the current (existing or computed) future value associated with the
* specified key
* @throws java.lang.RuntimeException
* or Error if the `loader` does when constructing the future, in which
* case the mapping is left unestablished
*/
def get(key: K): Future[V] =
underlying.get(key).toScala
/** Returns the future of a map of the values associated with `keys`, creating
* or retrieving those values if necessary. The returned map contains entries
* that were already cached, combined with newly loaded entries. If the any
* of the asynchronous computations fail, those entries will be automatically
* removed.
*
* @param keys
* the keys whose associated values are to be returned
* @return
* the future containing an mapping of keys to values for the specified
* keys in this cache
* @throws java.lang.RuntimeException
* or Error if the `loader` does so
*/
def getAll(keys: Iterable[K]): Future[Map[K, V]] =
underlying.getAll(keys.asJava).toScala.map(_.asScala.toMap)
/** Returns a view of the entries stored in this cache as a synchronous
* [[LoadingCache]]. A mapping is not present if the value is currently being
* loaded. Modifications made to the synchronous cache directly affect the
* asynchronous cache. If a modification is made to a mapping that is
* currently loading, the operation blocks until the computation completes.
*
* @return
* a thread-safe synchronous view of this cache
*/
override def synchronous(): LoadingCache[K, V] =
LoadingCache(underlying.synchronous())
override def toString = s"AsyncLoadingCache($underlying)"
}
| blemale/scaffeine | src/main/scala/com/github/blemale/scaffeine/AsyncLoadingCache.scala | Scala | apache-2.0 | 2,666 |
package org.scalamu.plugin.mutators.controllflow
import org.scalamu.plugin.{MutatingTransformer, ScalamuScalacConfig}
import scala.tools.nsc.Global
/**
* Mutation, that replaces conditional operators with their logical counterparts.
* e.g.
* {{{
* if (a > 10) {
* ..
* } else if (c == 10) {
* ..
* }
* }}}
* is replaced with
* {{{
* if (a <= 10) {
* ..
* } else if (c != 10) {
* ..
* }
* }}}
*/
case object NegateConditionals extends AbstractRelationalOperatorMutator {
override val description: String = "Negated conditional operator"
override def mutatingTransformer(global: Global, config: ScalamuScalacConfig): MutatingTransformer =
new RelationalOperatorTransformer(config)(global) {
override protected def operatorNameMapping: Map[String, String] = Map(
">" -> "<=",
"<" -> ">=",
">=" -> "<",
"<=" -> ">",
"==" -> "!=",
"!=" -> "=="
)
}
}
| sugakandrey/scalamu | scalac-plugin/src/main/scala/org/scalamu/plugin/mutators/controllflow/NegateConditionals.scala | Scala | gpl-3.0 | 948 |
package mesosphere
import java.util.concurrent.TimeUnit
import scala.annotation.tailrec
import scala.concurrent.duration.Duration
package object util {
/**
* Truncates the string output of a long list.
*
* This should be used to reduce the size of logging ids etc.
*
* @param it The iterable that will be truncated.
* @param showFirst The number of items that should be shown in string.
* @tparam T
* @return String representation of truncated sequence.
*/
def summarize[T](it: Iterator[T], showFirst: Int = 3): String = {
val s = new StringBuilder
s ++= "Seq("
s ++= it.take(showFirst).toSeq.mkString(", ")
if (it.hasNext)
s ++= s", ... ${it.length} more"
s ++= ")"
s.toString
}
implicit class DurationToHumanReadable(val d: Duration) extends AnyVal {
def toHumanReadable: String = {
import TimeUnit._
def appendIfPositive(value: Long, unit: TimeUnit, res: String): String =
if (value > 0) {
s"$res $value ${unit.name().toLowerCase}"
} else res
@tailrec
def loop(unit: TimeUnit, res: String = ""): String = {
unit match {
case DAYS =>
loop(HOURS, appendIfPositive(d.toDays, unit, res))
case HOURS =>
loop(MINUTES, appendIfPositive(d.toHours % 24, unit, res))
case MINUTES =>
loop(SECONDS, appendIfPositive(d.toMinutes % 60, unit, res))
case SECONDS =>
loop(MILLISECONDS, appendIfPositive(d.toSeconds % 60, unit, res))
case MILLISECONDS =>
val value = d.toMillis % 1000
if (res.isEmpty) {
s"$value milliseconds"
} else {
appendIfPositive(value, unit, res)
}
case MICROSECONDS =>
loop(MILLISECONDS, res)
case NANOSECONDS =>
loop(NANOSECONDS, res)
}
}
loop(DAYS).trim
}
}
}
| gsantovena/marathon | src/main/scala/mesosphere/util/package.scala | Scala | apache-2.0 | 1,960 |
object Test {
import scala.util.NotGiven
class Foo
implicit def foo: Foo = ???
def foo[T](implicit ev: NotGiven[T]) = ???
foo[Foo] // error
}
| lampepfl/dotty | tests/neg/i5234c.scala | Scala | apache-2.0 | 154 |
package lila.user
import org.joda.time.DateTime
case class Trophy(
_id: String, // random
user: String,
kind: Trophy.Kind,
date: DateTime)
object Trophy {
sealed abstract class Kind(
val key: String,
val name: String,
val icon: Option[String],
val url: Option[String],
val klass: Option[String])
object Kind {
object ZugMiracle extends Kind(
key = "zugMiracle",
name = "Zug miracle",
icon = none,
url = "http://lichess.org/qa/259/how-do-you-get-a-zug-miracle-trophy".some,
none)
object WayOfBerserk extends Kind(
key = "wayOfBerserk",
name = "The way of Berserk",
icon = "`".some,
url = "http://lichess.org/qa/340/way-of-berserk-trophy".some,
"fire_trophy".some)
object ExperimentalMarathonWinner extends Kind(
key = "marathonWinner",
name = "Marathon Winner",
icon = "\\\\".some,
url = "http://lichess.org/tournament/maratexp".some,
"fire_trophy".some)
object ExperimentalMarathonTopTen extends Kind(
key = "marathonTopTen",
name = "Marathon Top 10",
icon = "\\\\".some,
url = "http://lichess.org/tournament/maratexp".some,
"fire_trophy".some)
object MarathonWinner extends Kind(
key = "marathonWinner",
name = "Marathon Winner",
icon = "\\\\".some,
url = none,
"fire_trophy".some)
object MarathonTopTen extends Kind(
key = "marathonTopTen",
name = "Marathon Top 10",
icon = "\\\\".some,
url = none,
"fire_trophy".some)
object MarathonTopFifty extends Kind(
key = "marathonTopFifty",
name = "Marathon Top 50",
icon = "\\\\".some,
url = none,
"fire_trophy".some)
object MarathonSurvivor extends Kind(
key = "marathonSurvivor",
name = "Marathon #1 survivor",
icon = ",".some,
url = "http://lichess.org/blog/VXF45yYAAPQgLH4d/chess-marathon-1".some,
"fire_trophy".some)
object ExperimentalMarathon extends Kind(
key = "experimentalMarathon",
name = "Experimental Marathon Top 10",
icon = ",".some,
url = "http://lichess.org/tournament/maratexp".some,
"fire_trophy".some)
object BongcloudWarrior extends Kind(
key = "bongcloudWarrior",
name = "Bongcloud Warrior",
icon = "~".some,
url = "http://lichess.org/forum/lichess-feedback/bongcloud-trophy".some,
"fire_trophy".some)
object Developer extends Kind(
key = "developer",
name = "Lichess developer",
icon = "".some,
url = "https://github.com/ornicar/lila/graphs/contributors".some,
"icon3d".some)
object Moderator extends Kind(
key = "moderator",
name = "Lichess moderator",
icon = "".some,
url = "http://lichess.org/report".some,
"icon3d".some)
object Donor extends Kind(
key = "donor",
name = "Lichess donor",
icon = "".some,
url = "http://lichess.org/donate".some,
"icon3d".some)
object Streamer extends Kind(
key = "streamer",
name = "Lichess streamer",
icon = "".some,
url = "http://lichess.org/how-to-stream-on-lichess".some,
"icon3d".some)
val all = List(
ZugMiracle,
WayOfBerserk,
MarathonSurvivor,
ExperimentalMarathonWinner, ExperimentalMarathonTopTen,
MarathonWinner, MarathonTopTen, MarathonTopFifty,
BongcloudWarrior,
Developer, Moderator,
Streamer)
def byKey(key: String) = all find (_.key == key)
}
def make(userId: String, kind: Trophy.Kind) = Trophy(
_id = ornicar.scalalib.Random nextStringUppercase 8,
user = userId,
kind = kind,
date = DateTime.now)
}
| abougouffa/lila | modules/user/src/main/Trophy.scala | Scala | mit | 3,747 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.queryapitests.scalaxml
import eu.cdevreeze.yaidom.queryapitests.AbstractRobustQueryTest
import eu.cdevreeze.yaidom.scalaxml.ScalaXmlElem
import eu.cdevreeze.yaidom.scalaxml.ScalaXmlNode
/**
* Query test case for Scala XML wrapper elements.
*
* @author Chris de Vreeze
*/
class RobustQueryTest extends AbstractRobustQueryTest {
final type E = ScalaXmlElem
protected final val contactsElem: ScalaXmlElem = {
// See http://richard.dallaway.com/2013-02-06
val resolvingXmlLoader = new scala.xml.factory.XMLLoader[scala.xml.Elem] {
override def adapter: scala.xml.parsing.FactoryAdapter = new scala.xml.parsing.NoBindingFactoryAdapter() {
override def resolveEntity(publicId: String, systemId: String): org.xml.sax.InputSource = {
null
}
}
}
val is = classOf[RobustQueryTest].getResourceAsStream("/eu/cdevreeze/yaidom/queryapitests/contacts.xml")
val root: ScalaXmlElem = ScalaXmlNode.wrapElement(resolvingXmlLoader.load(is))
root
}
}
| dvreeze/yaidom | jvm/src/test/scala/eu/cdevreeze/yaidom/queryapitests/scalaxml/RobustQueryTest.scala | Scala | apache-2.0 | 1,638 |
package eu.inn.binders.value.internal
import scala.language.experimental.macros
import scala.language.reflectiveCalls
import scala.reflect.macros.Context
private [value] trait DynamicMacroImpl {
val c: Context
import c.universe._
def fromValue[O: c.WeakTypeTag]: c.Tree = {
val t = fresh("t")
val d = fresh("s")
val block = q"""{
val $t = ${c.prefix.tree}
ValueSerializerFactory.findFactory().withDeserializer[${weakTypeOf[O]}]($t.value) { case($d) => {
$d.unbind[${weakTypeOf[O]}]
}}
}"""
//println(block)
block
}
def toValue[O: c.WeakTypeTag]: c.Tree = {
val t = fresh("t")
val s = fresh("s")
val block = q"""{
val $t = ${c.prefix.tree}
ValueSerializerFactory.findFactory().withSerializer {case ($s) => {
$s.bind[${weakTypeOf[O]}]($t.obj)
}}
}"""
//println(block)
block
}
def fresh(prefix: String): TermName = newTermName(c.fresh(prefix))
}
| InnovaCo/binders | src/main/scala/eu/inn/binders/value/internal/DynamicMacroImpl.scala | Scala | bsd-3-clause | 962 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.lang.{Boolean => JavaBoolean}
import java.lang.{Byte => JavaByte}
import java.lang.{Double => JavaDouble}
import java.lang.{Float => JavaFloat}
import java.lang.{Integer => JavaInteger}
import java.lang.{Long => JavaLong}
import java.lang.{Short => JavaShort}
import java.math.{BigDecimal => JavaBigDecimal}
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.time.{Instant, LocalDate}
import java.util
import java.util.Objects
import javax.xml.bind.DatatypeConverter
import scala.math.{BigDecimal, BigInt}
import scala.reflect.runtime.universe.TypeTag
import scala.util.Try
import org.json4s.JsonAST._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.DateTimeUtils.instantToMicros
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types._
import org.apache.spark.util.Utils
object Literal {
val TrueLiteral: Literal = Literal(true, BooleanType)
val FalseLiteral: Literal = Literal(false, BooleanType)
def apply(v: Any): Literal = v match {
case i: Int => Literal(i, IntegerType)
case l: Long => Literal(l, LongType)
case d: Double => Literal(d, DoubleType)
case f: Float => Literal(f, FloatType)
case b: Byte => Literal(b, ByteType)
case s: Short => Literal(s, ShortType)
case s: String => Literal(UTF8String.fromString(s), StringType)
case c: Char => Literal(UTF8String.fromString(c.toString), StringType)
case b: Boolean => Literal(b, BooleanType)
case d: BigDecimal => Literal(Decimal(d), DecimalType.fromBigDecimal(d))
case d: JavaBigDecimal =>
Literal(Decimal(d), DecimalType(Math.max(d.precision, d.scale), d.scale()))
case d: Decimal => Literal(d, DecimalType(Math.max(d.precision, d.scale), d.scale))
case i: Instant => Literal(instantToMicros(i), TimestampType)
case t: Timestamp => Literal(DateTimeUtils.fromJavaTimestamp(t), TimestampType)
case ld: LocalDate => Literal(ld.toEpochDay.toInt, DateType)
case d: Date => Literal(DateTimeUtils.fromJavaDate(d), DateType)
case a: Array[Byte] => Literal(a, BinaryType)
case a: collection.mutable.WrappedArray[_] => apply(a.array)
case a: Array[_] =>
val elementType = componentTypeToDataType(a.getClass.getComponentType())
val dataType = ArrayType(elementType)
val convert = CatalystTypeConverters.createToCatalystConverter(dataType)
Literal(convert(a), dataType)
case i: CalendarInterval => Literal(i, CalendarIntervalType)
case null => Literal(null, NullType)
case v: Literal => v
case _ =>
throw new RuntimeException("Unsupported literal type " + v.getClass + " " + v)
}
/**
* Returns the Spark SQL DataType for a given class object. Since this type needs to be resolved
* in runtime, we use match-case idioms for class objects here. However, there are similar
* functions in other files (e.g., HiveInspectors), so these functions need to merged into one.
*/
private[this] def componentTypeToDataType(clz: Class[_]): DataType = clz match {
// primitive types
case JavaShort.TYPE => ShortType
case JavaInteger.TYPE => IntegerType
case JavaLong.TYPE => LongType
case JavaDouble.TYPE => DoubleType
case JavaByte.TYPE => ByteType
case JavaFloat.TYPE => FloatType
case JavaBoolean.TYPE => BooleanType
// java classes
case _ if clz == classOf[LocalDate] => DateType
case _ if clz == classOf[Date] => DateType
case _ if clz == classOf[Instant] => TimestampType
case _ if clz == classOf[Timestamp] => TimestampType
case _ if clz == classOf[JavaBigDecimal] => DecimalType.SYSTEM_DEFAULT
case _ if clz == classOf[Array[Byte]] => BinaryType
case _ if clz == classOf[JavaShort] => ShortType
case _ if clz == classOf[JavaInteger] => IntegerType
case _ if clz == classOf[JavaLong] => LongType
case _ if clz == classOf[JavaDouble] => DoubleType
case _ if clz == classOf[JavaByte] => ByteType
case _ if clz == classOf[JavaFloat] => FloatType
case _ if clz == classOf[JavaBoolean] => BooleanType
// other scala classes
case _ if clz == classOf[String] => StringType
case _ if clz == classOf[BigInt] => DecimalType.SYSTEM_DEFAULT
case _ if clz == classOf[BigDecimal] => DecimalType.SYSTEM_DEFAULT
case _ if clz == classOf[CalendarInterval] => CalendarIntervalType
case _ if clz.isArray => ArrayType(componentTypeToDataType(clz.getComponentType))
case _ => throw new AnalysisException(s"Unsupported component type $clz in arrays")
}
/**
* Constructs a [[Literal]] of [[ObjectType]], for example when you need to pass an object
* into code generation.
*/
def fromObject(obj: Any, objType: DataType): Literal = new Literal(obj, objType)
def fromObject(obj: Any): Literal = new Literal(obj, ObjectType(obj.getClass))
def create(v: Any, dataType: DataType): Literal = {
Literal(CatalystTypeConverters.convertToCatalyst(v), dataType)
}
def create[T : TypeTag](v: T): Literal = Try {
val ScalaReflection.Schema(dataType, _) = ScalaReflection.schemaFor[T]
val convert = CatalystTypeConverters.createToCatalystConverter(dataType)
Literal(convert(v), dataType)
}.getOrElse {
Literal(v)
}
/**
* Create a literal with default value for given DataType
*/
def default(dataType: DataType): Literal = dataType match {
case NullType => create(null, NullType)
case BooleanType => Literal(false)
case ByteType => Literal(0.toByte)
case ShortType => Literal(0.toShort)
case IntegerType => Literal(0)
case LongType => Literal(0L)
case FloatType => Literal(0.0f)
case DoubleType => Literal(0.0)
case dt: DecimalType => Literal(Decimal(0, dt.precision, dt.scale))
case DateType => create(0, DateType)
case TimestampType => create(0L, TimestampType)
case StringType => Literal("")
case BinaryType => Literal("".getBytes(StandardCharsets.UTF_8))
case CalendarIntervalType => Literal(new CalendarInterval(0, 0, 0))
case arr: ArrayType => create(Array(), arr)
case map: MapType => create(Map(), map)
case struct: StructType =>
create(InternalRow.fromSeq(struct.fields.map(f => default(f.dataType).value)), struct)
case udt: UserDefinedType[_] => Literal(default(udt.sqlType).value, udt)
case other =>
throw new RuntimeException(s"no default for type $dataType")
}
private[expressions] def validateLiteralValue(value: Any, dataType: DataType): Unit = {
def doValidate(v: Any, dataType: DataType): Boolean = dataType match {
case _ if v == null => true
case BooleanType => v.isInstanceOf[Boolean]
case ByteType => v.isInstanceOf[Byte]
case ShortType => v.isInstanceOf[Short]
case IntegerType | DateType => v.isInstanceOf[Int]
case LongType | TimestampType => v.isInstanceOf[Long]
case FloatType => v.isInstanceOf[Float]
case DoubleType => v.isInstanceOf[Double]
case _: DecimalType => v.isInstanceOf[Decimal]
case CalendarIntervalType => v.isInstanceOf[CalendarInterval]
case BinaryType => v.isInstanceOf[Array[Byte]]
case StringType => v.isInstanceOf[UTF8String]
case st: StructType =>
v.isInstanceOf[InternalRow] && {
val row = v.asInstanceOf[InternalRow]
st.fields.map(_.dataType).zipWithIndex.forall {
case (dt, i) => doValidate(row.get(i, dt), dt)
}
}
case at: ArrayType =>
v.isInstanceOf[ArrayData] && {
val ar = v.asInstanceOf[ArrayData]
ar.numElements() == 0 || doValidate(ar.get(0, at.elementType), at.elementType)
}
case mt: MapType =>
v.isInstanceOf[MapData] && {
val map = v.asInstanceOf[MapData]
doValidate(map.keyArray(), ArrayType(mt.keyType)) &&
doValidate(map.valueArray(), ArrayType(mt.valueType))
}
case ObjectType(cls) => cls.isInstance(v)
case udt: UserDefinedType[_] => doValidate(v, udt.sqlType)
case _ => false
}
require(doValidate(value, dataType),
s"Literal must have a corresponding value to ${dataType.catalogString}, " +
s"but class ${Utils.getSimpleName(value.getClass)} found.")
}
}
/**
* An extractor that matches non-null literal values
*/
object NonNullLiteral {
def unapply(literal: Literal): Option[(Any, DataType)] = {
Option(literal.value).map(_ => (literal.value, literal.dataType))
}
}
/**
* Extractor for retrieving Float literals.
*/
object FloatLiteral {
def unapply(a: Any): Option[Float] = a match {
case Literal(a: Float, FloatType) => Some(a)
case _ => None
}
}
/**
* Extractor for retrieving Double literals.
*/
object DoubleLiteral {
def unapply(a: Any): Option[Double] = a match {
case Literal(a: Double, DoubleType) => Some(a)
case _ => None
}
}
/**
* Extractor for retrieving Int literals.
*/
object IntegerLiteral {
def unapply(a: Any): Option[Int] = a match {
case Literal(a: Int, IntegerType) => Some(a)
case _ => None
}
}
/**
* Extractor for retrieving String literals.
*/
object StringLiteral {
def unapply(a: Any): Option[String] = a match {
case Literal(s: UTF8String, StringType) => Some(s.toString)
case _ => None
}
}
/**
* Extractor for and other utility methods for decimal literals.
*/
object DecimalLiteral {
def apply(v: Long): Literal = Literal(Decimal(v))
def apply(v: Double): Literal = Literal(Decimal(v))
def unapply(e: Expression): Option[Decimal] = e match {
case Literal(v, _: DecimalType) => Some(v.asInstanceOf[Decimal])
case _ => None
}
def largerThanLargestLong(v: Decimal): Boolean = v > Decimal(Long.MaxValue)
def smallerThanSmallestLong(v: Decimal): Boolean = v < Decimal(Long.MinValue)
}
/**
* In order to do type checking, use Literal.create() instead of constructor
*/
case class Literal (value: Any, dataType: DataType) extends LeafExpression {
Literal.validateLiteralValue(value, dataType)
override def foldable: Boolean = true
override def nullable: Boolean = value == null
override def toString: String = value match {
case null => "null"
case binary: Array[Byte] => s"0x" + DatatypeConverter.printHexBinary(binary)
case other => other.toString
}
override def hashCode(): Int = {
val valueHashCode = value match {
case null => 0
case binary: Array[Byte] => util.Arrays.hashCode(binary)
case other => other.hashCode()
}
31 * Objects.hashCode(dataType) + valueHashCode
}
override def equals(other: Any): Boolean = other match {
case o: Literal if !dataType.equals(o.dataType) => false
case o: Literal =>
(value, o.value) match {
case (null, null) => true
case (a: Array[Byte], b: Array[Byte]) => util.Arrays.equals(a, b)
case (a, b) => a != null && a.equals(b)
}
case _ => false
}
override protected def jsonFields: List[JField] = {
// Turns all kinds of literal values to string in json field, as the type info is hard to
// retain in json format, e.g. {"a": 123} can be an int, or double, or decimal, etc.
val jsonValue = (value, dataType) match {
case (null, _) => JNull
case (i: Int, DateType) => JString(DateTimeUtils.toJavaDate(i).toString)
case (l: Long, TimestampType) => JString(DateTimeUtils.toJavaTimestamp(l).toString)
case (other, _) => JString(other.toString)
}
("value" -> jsonValue) :: ("dataType" -> dataType.jsonValue) :: Nil
}
override def eval(input: InternalRow): Any = value
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
if (value == null) {
ExprCode.forNullValue(dataType)
} else {
def toExprCode(code: String): ExprCode = {
ExprCode.forNonNullValue(JavaCode.literal(code, dataType))
}
dataType match {
case BooleanType | IntegerType | DateType =>
toExprCode(value.toString)
case FloatType =>
value.asInstanceOf[Float] match {
case v if v.isNaN =>
toExprCode("Float.NaN")
case Float.PositiveInfinity =>
toExprCode("Float.POSITIVE_INFINITY")
case Float.NegativeInfinity =>
toExprCode("Float.NEGATIVE_INFINITY")
case _ =>
toExprCode(s"${value}F")
}
case DoubleType =>
value.asInstanceOf[Double] match {
case v if v.isNaN =>
toExprCode("Double.NaN")
case Double.PositiveInfinity =>
toExprCode("Double.POSITIVE_INFINITY")
case Double.NegativeInfinity =>
toExprCode("Double.NEGATIVE_INFINITY")
case _ =>
toExprCode(s"${value}D")
}
case ByteType | ShortType =>
ExprCode.forNonNullValue(JavaCode.expression(s"($javaType)$value", dataType))
case TimestampType | LongType =>
toExprCode(s"${value}L")
case _ =>
val constRef = ctx.addReferenceObj("literal", value, javaType)
ExprCode.forNonNullValue(JavaCode.global(constRef, dataType))
}
}
}
override def sql: String = (value, dataType) match {
case (_, NullType | _: ArrayType | _: MapType | _: StructType) if value == null => "NULL"
case _ if value == null => s"CAST(NULL AS ${dataType.sql})"
case (v: UTF8String, StringType) =>
// Escapes all backslashes and single quotes.
"'" + v.toString.replace("\\\\", "\\\\\\\\").replace("'", "\\\\'") + "'"
case (v: Byte, ByteType) => v + "Y"
case (v: Short, ShortType) => v + "S"
case (v: Long, LongType) => v + "L"
// Float type doesn't have a suffix
case (v: Float, FloatType) =>
val castedValue = v match {
case _ if v.isNaN => "'NaN'"
case Float.PositiveInfinity => "'Infinity'"
case Float.NegativeInfinity => "'-Infinity'"
case _ => s"'$v'"
}
s"CAST($castedValue AS ${FloatType.sql})"
case (v: Double, DoubleType) =>
v match {
case _ if v.isNaN => s"CAST('NaN' AS ${DoubleType.sql})"
case Double.PositiveInfinity => s"CAST('Infinity' AS ${DoubleType.sql})"
case Double.NegativeInfinity => s"CAST('-Infinity' AS ${DoubleType.sql})"
case _ => v + "D"
}
case (v: Decimal, t: DecimalType) => v + "BD"
case (v: Int, DateType) =>
val formatter = DateFormatter(DateTimeUtils.getZoneId(SQLConf.get.sessionLocalTimeZone))
s"DATE '${formatter.format(v)}'"
case (v: Long, TimestampType) =>
val formatter = TimestampFormatter.getFractionFormatter(
DateTimeUtils.getZoneId(SQLConf.get.sessionLocalTimeZone))
s"TIMESTAMP '${formatter.format(v)}'"
case (i: CalendarInterval, CalendarIntervalType) =>
s"INTERVAL '${IntervalUtils.toMultiUnitsString(i)}'"
case (v: Array[Byte], BinaryType) => s"X'${DatatypeConverter.printHexBinary(v)}'"
case _ => value.toString
}
}
| jkbradley/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/literals.scala | Scala | apache-2.0 | 16,109 |
package jp.mwsoft.wikipedia.categorizer.common
import java.io.Closeable
object Controls {
def managed[T, U <: Closeable](resource: U)(f: U => T): T = {
try f(resource)
finally resource.close()
}
def managed[T, U <: Closeable, V <: Closeable](resource1: U, resource2: V)(f: (U, V) => T): T = {
try f(resource1, resource2)
finally {
resource1.close()
resource2.close()
}
}
def tryOpt[T](f: => T): Option[T] = {
try Some(f) catch { case e: Throwable => None }
}
def retry[T](retryCount: Int)(f: => T): T = {
try f
catch {
case e: Throwable => {
if (retryCount == 0) throw e
else retry(retryCount - 1)(f)
}
}
}
def retryOpt[T](retryCount: Int)(f: => T): Option[T] = {
tryOpt(retry(retryCount)(f))
}
} | mwsoft/wikipedia_categorizer | src/main/scala/jp/mwsoft/wikipedia/categorizer/common/Controls.scala | Scala | mit | 804 |
/*
* DARWIN Genetic Algorithms Framework Project.
* Copyright (c) 2003, 2005, 2007, 2009, 2011, 2016, 2017. Phasmid Software
*
* Originally, developed in Java by Rubecula Software, LLC and hosted by SourceForge.
* Converted to Scala by Phasmid Software and hosted by github at https://github.com/rchillyard/Darwin
*
* This file is part of Darwin.
*
* Darwin is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.phasmid.darwin.eco
import com.phasmid.darwin.base.{CaseIdentifiable, Identifiable, Identifying}
import com.phasmid.laScala.{OldRenderableCaseClass, Prefix}
/**
* TODO redefine this: it should be a pair (or collection) of Ecologies, where there are boundaries between pairs.
*
* TODO there should be a type, similar to Genome/Phenome, perhaps called Biome, that defines the characteristics of an eco system, and a another type like Biotype that actually defines those characteristics for a specific environment.
*
* An Environment is where the fitness of phenotypes (or organisms) is evaluated to determine viability.
* An Environment is essentially the intersection of a number of EcoFactors, for each of which an organism
* is evaluated. The fitness of the various eco factors are then combined to generate the overall fitness
* for the environment.
*
* @tparam X underlying type of Environment
*
* Created by scalaprof on 5/5/16.
*/
case class Environment[T, X](name: String, ecology: Ecology[T, X], habitat: Habitat[X]) extends Identifying with Identifiable {
override def render(indent: Int)(implicit tab: (Int) => Prefix): String = CaseIdentifiable.renderAsCaseClass(this.asInstanceOf[Environment[Any, Any]])(indent)
}
trait Environmental[T, X] {
def environment: Environment[T, X]
}
case class EcoFactor[X](factor: Factor, x: X) extends Identifiable {
val name: String = factor.name
override def render(indent: Int = 0)(implicit tab: (Int) => Prefix): String = OldRenderableCaseClass(this.asInstanceOf[EcoFactor[Any]]).render(indent)(tab)
}
| rchillyard/Darwin | src/main/scala/com/phasmid/darwin/eco/Environment.scala | Scala | gpl-3.0 | 2,665 |
package com.sksamuel.avro4s.schema
import com.sksamuel.avro4s.{AvroNoDefault, AvroSchema}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class AvroNoDefaultTest extends AnyFunSuite with Matchers {
test("a field annotated with @AvroNoDefault should ignore a scala default") {
val schema = AvroSchema[NoDefaultTest]
val expected = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/avro_nodefault.json"))
schema.toString(true) shouldBe expected.toString(true)
}
}
case class NoDefaultTest(@AvroNoDefault a: String = "foowoo")
| sksamuel/avro4s | avro4s-core/src/test/scala/com/sksamuel/avro4s/schema/AvroNoDefaultTest.scala | Scala | apache-2.0 | 606 |
import org.workcraft.pluginmanager.Plugin
/*
// Commented because otherwise they get on the default class-path
package org.workcraft.plugins {
class NotAPlugin
class GoodPluginA extends Plugin
class GoodPluginB extends Plugin
class GoodPluginC extends Plugin
abstract class AbstractPlugin extends Plugin
class PluginWithNoDefaultConstructor(val x: Int) extends Plugin
}
package somepackage {
class PluginInAWrongPackage extends Plugin
}
*/ | tuura/workcraft-2.2 | PluginManager/src/test/scala/org/workcraft/pluginmanager/TestPlugins.scala | Scala | gpl-3.0 | 475 |
package org.crudible.core.binding.model
import org.crudible.core.binding.traits.HasLabel
import org.crudible.core.binding.traits.HasPlaceholder
import org.crudible.core.binding.traits.HasPrefix
import org.crudible.core.binding.traits.HasMax
import org.crudible.core.binding.traits.HasReadonly
import org.crudible.core.binding.traits.HasInvalidInputFormatMessage
object TextComponent {
val TEXT = 20
val PASSWORD = 30
}
class TextComponent extends HasLabel with HasPlaceholder with HasPrefix with HasMax with HasReadonly with HasInvalidInputFormatMessage {
case class StyleDecorator(style: Int)
def style(style: Int) = {
this.decorateWith(StyleDecorator(style))
}
protected def style() = {
this.getDecorator[StyleDecorator].map(_.style).getOrElse(TextComponent.TEXT)
}
def isPassword() = {
this.style() == TextComponent.PASSWORD
}
def isText() = {
this.style() == TextComponent.TEXT
}
} | rehei/crudible | crudible-core/src/main/scala/org/crudible/core/binding/model/TextComponent.scala | Scala | apache-2.0 | 932 |
// Copyright (C) 2017 Calin Cruceru <[email protected]>.
//
// See the LICENCE file distributed with this work for additional
// information regarding copyright ownership.
package org.symnet
package models.iptables
package extensions.tcp
// 3rd-party
// -> Symnet
import org.change.v2.analysis.expression.concrete.ConstantValue
import org.change.v2.analysis.processingmodels.instructions.{:&:, :>=:, :<=:, Constrain}
import org.change.v2.util.canonicalnames.{Proto, TcpSrc, TcpDst, TCPProto}
// project
import core._
import types.net.{Ipv4, Port}
case class SourcePortMatch(
lowerPort: Port,
upperPort: Option[Port]) extends Match {
type Self = this.type
override def seflCondition(options: SeflGenOptions): SeflCondition = {
val (lower, upper) = (lowerPort, upperPort getOrElse lowerPort)
SeflCondition.single(Constrain(TcpSrc, :&:(:>=:(ConstantValue(lower)),
:<=:(ConstantValue(upper)))))
}
}
case class DestinationPortMatch(
lowerPort: Port,
upperPort: Option[Port]) extends Match {
type Self = this.type
override def seflCondition(options: SeflGenOptions): SeflCondition = {
val (lower, upper) = (lowerPort, upperPort getOrElse lowerPort)
SeflCondition.single(Constrain(TcpDst, :&:(:>=:(ConstantValue(lower)),
:<=:(ConstantValue(upper)))))
}
}
object SourcePortMatch extends BaseParsers {
import ParserMP.monadPlusSyntax._
def parser: Parser[Match] =
for {
_ <- spacesParser
n1 <- optional(parseChar('!') >> someSpacesParser)
_ <- oneOf(parseString("--source-port"), parseString("--sport"))
n2 <- conditional(optional(someSpacesParser >> parseChar('!')),
!n1.isDefined)
lowerPort <- someSpacesParser >> portParser
maybeUpperPort <- optional(parseChar(':') >> portParser)
} yield Match.maybeNegated(
SourcePortMatch(lowerPort, maybeUpperPort), n1 orElse n2.flatten)
}
object DestinationPortMatch extends BaseParsers {
import ParserMP.monadPlusSyntax._
def parser: Parser[Match] =
for {
_ <- spacesParser
n1 <- optional(parseChar('!') >> someSpacesParser)
_ <- oneOf(parseString("--destination-port"), parseString("--dport"))
n2 <- conditional(optional(someSpacesParser >> parseChar('!')),
!n1.isDefined)
lowerPort <- someSpacesParser >> portParser
maybeUpperPort <- optional(parseChar(':') >> portParser)
} yield Match.maybeNegated(
DestinationPortMatch(lowerPort, maybeUpperPort), n1 orElse n2.flatten)
}
| calincru/iptables-sefl | src/main/scala/org/symnet/models/iptables/extensions/tcp/PortMatch.scala | Scala | mit | 2,632 |
package openreveal.service
import java.nio.file.Files
import com.hp.hpl.jena.vocabulary.RDF
import openreveal.rdf.RdfTdbModelProvider
import openreveal.schema.OpenRevealSchema
import openreveal.service.impl.JenaFactStorage
import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfter, FlatSpec}
/**
* Created by Paul Lysak on 03.06.15.
*
*/
class TDBPersistenceSpec extends FlatSpec with BeforeAndAfterAll {
val tmpDir = Files.createTempDirectory("openreveal_tdb_test")
"TDB" should "persist some data" in {
val modelProvider = new RdfTdbModelProvider(tmpDir.toString)
val storage1 = new JenaFactStorage(modelProvider, DefaultClock)
storage1.createUser("user1", "[email protected]")
modelProvider.readWithModel({ model =>
val userRes1 = model.getResource("user1")
userRes1.getRequiredProperty(RDF.`type`).getObject === OpenRevealSchema.User.a
})
}
override def afterAll() {
FileUtils.deleteDirectory(tmpDir.toFile)
}
}
| paul-lysak/OpenReveal | src/test/scala/openreveal/service/TDBPersistenceSpec.scala | Scala | apache-2.0 | 1,005 |
package fs2
package io
package tcp
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import java.net.{InetSocketAddress, SocketAddress, StandardSocketOptions}
import java.nio.ByteBuffer
import java.nio.channels.spi.AsynchronousChannelProvider
import java.nio.channels.{
AsynchronousChannelGroup,
AsynchronousCloseException,
AsynchronousServerSocketChannel,
AsynchronousSocketChannel,
CompletionHandler
}
import java.util.concurrent.TimeUnit
import cats.effect.{Effect, IO}
import cats.implicits._
import fs2.Stream._
/**
* Provides the ability to read/write from a TCP socket in the effect `F`.
*
* To construct a `Socket`, use the methods in the [[fs2.io.tcp]] package object.
*/
trait Socket[F[_]] {
/**
* Reads up to `maxBytes` from the peer.
*
* Evaluates to None, if there are no more bytes to be read in future, due stream reached End-Of-Stream state
* before returning even single byte. Otherwise returns Some(bytes) with bytes that were ready to be read.
*
* If `timeout` is specified, then resulting `F` will evaluate to failure with `java.nio.channels.InterruptedByTimeoutException`
* if read was not satisfied in given timeout. Read is satisfied, when at least single Byte was received
* before `timeout` expires.
*
* This may return None, as well when end of stream has been reached before timeout expired and no data
* has been received.
*/
def read(maxBytes: Int, timeout: Option[FiniteDuration] = None): F[Option[Chunk[Byte]]]
/**
* Reads stream of bytes from this socket with `read` semantics. Terminates when eof is received.
* On timeout, this fails with `java.nio.channels.InterruptedByTimeoutException`.
*/
def reads(maxBytes: Int, timeout: Option[FiniteDuration] = None): Stream[F, Byte]
/**
* Reads exactly `numBytes` from the peer in a single chunk.
* If `timeout` is provided and no data arrives within the specified duration, then this results in
* failure with `java.nio.channels.InterruptedByTimeoutException`.
*
* When returned size of bytes is < `numBytes` that indicates end-of-stream has been reached.
*/
def readN(numBytes: Int, timeout: Option[FiniteDuration] = None): F[Option[Chunk[Byte]]]
/** Indicates that this channel will not read more data. Causes `End-Of-Stream` be signalled to `available`. */
def endOfInput: F[Unit]
/** Indicates to peer, we are done writing. **/
def endOfOutput: F[Unit]
/** Closes the connection corresponding to this `Socket`. */
def close: F[Unit]
/** Asks for the remote address of the peer. */
def remoteAddress: F[SocketAddress]
/** Asks for the local address of the socket. */
def localAddress: F[SocketAddress]
/**
* Writes `bytes` to the peer. If `timeout` is provided
* and the operation does not complete in the specified duration,
* the returned `Process` fails with a `java.nio.channels.InterruptedByTimeoutException`.
*
* Completes when bytes are written to the socket.
*/
def write(bytes: Chunk[Byte], timeout: Option[FiniteDuration] = None): F[Unit]
/**
* Writes the supplied stream of bytes to this socket via `write` semantics.
*/
def writes(timeout: Option[FiniteDuration] = None): Sink[F, Byte]
}
protected[tcp] object Socket {
/** see [[fs2.io.tcp.client]] **/
def client[F[_]](
to: InetSocketAddress,
reuseAddress: Boolean,
sendBufferSize: Int,
receiveBufferSize: Int,
keepAlive: Boolean,
noDelay: Boolean
)(
implicit AG: AsynchronousChannelGroup,
F: Effect[F],
ec: ExecutionContext
): Stream[F, Socket[F]] = Stream.suspend {
def setup: Stream[F, AsynchronousSocketChannel] = Stream.suspend {
val ch =
AsynchronousChannelProvider.provider().openAsynchronousSocketChannel(AG)
ch.setOption[java.lang.Boolean](StandardSocketOptions.SO_REUSEADDR, reuseAddress)
ch.setOption[Integer](StandardSocketOptions.SO_SNDBUF, sendBufferSize)
ch.setOption[Integer](StandardSocketOptions.SO_RCVBUF, receiveBufferSize)
ch.setOption[java.lang.Boolean](StandardSocketOptions.SO_KEEPALIVE, keepAlive)
ch.setOption[java.lang.Boolean](StandardSocketOptions.TCP_NODELAY, noDelay)
Stream.emit(ch)
}
def connect(ch: AsynchronousSocketChannel): F[AsynchronousSocketChannel] =
F.async { cb =>
ch.connect(
to,
null,
new CompletionHandler[Void, Void] {
def completed(result: Void, attachment: Void): Unit =
async.unsafeRunAsync(F.delay(cb(Right(ch))))(_ => IO.pure(()))
def failed(rsn: Throwable, attachment: Void): Unit =
async.unsafeRunAsync(F.delay(cb(Left(rsn))))(_ => IO.pure(()))
}
)
}
def cleanup(ch: AsynchronousSocketChannel): F[Unit] =
F.delay { ch.close() }
setup.flatMap { ch =>
Stream.bracket(connect(ch))({ _ =>
eval(mkSocket(ch))
}, cleanup)
}
}
def server[F[_]](address: InetSocketAddress,
maxQueued: Int,
reuseAddress: Boolean,
receiveBufferSize: Int)(
implicit AG: AsynchronousChannelGroup,
F: Effect[F],
ec: ExecutionContext
): Stream[F, Either[InetSocketAddress, Stream[F, Socket[F]]]] =
Stream.suspend {
def setup: F[AsynchronousServerSocketChannel] = F.delay {
val ch = AsynchronousChannelProvider
.provider()
.openAsynchronousServerSocketChannel(AG)
ch.setOption[java.lang.Boolean](StandardSocketOptions.SO_REUSEADDR, reuseAddress)
ch.setOption[Integer](StandardSocketOptions.SO_RCVBUF, receiveBufferSize)
ch.bind(address)
ch
}
def cleanup(sch: AsynchronousServerSocketChannel): F[Unit] = F.delay {
if (sch.isOpen) sch.close()
}
def acceptIncoming(sch: AsynchronousServerSocketChannel): Stream[F, Stream[F, Socket[F]]] = {
def go: Stream[F, Stream[F, Socket[F]]] = {
def acceptChannel: F[AsynchronousSocketChannel] =
F.async[AsynchronousSocketChannel] { cb =>
sch.accept(
null,
new CompletionHandler[AsynchronousSocketChannel, Void] {
def completed(ch: AsynchronousSocketChannel, attachment: Void): Unit =
async.unsafeRunAsync(F.delay(cb(Right(ch))))(_ => IO.pure(()))
def failed(rsn: Throwable, attachment: Void): Unit =
async.unsafeRunAsync(F.delay(cb(Left(rsn))))(_ => IO.pure(()))
}
)
}
def close(ch: AsynchronousSocketChannel): F[Unit] =
F.delay { if (ch.isOpen) ch.close() }.attempt.as(())
eval(acceptChannel.attempt).map {
case Left(err) => Stream.empty.covary[F]
case Right(accepted) =>
eval(mkSocket(accepted)).onFinalize(close(accepted))
} ++ go
}
go.handleErrorWith {
case err: AsynchronousCloseException =>
if (sch.isOpen) Stream.raiseError(err)
else Stream.empty
case err => Stream.raiseError(err)
}
}
Stream.bracket(setup)(
sch =>
Stream.emit(Left(sch.getLocalAddress.asInstanceOf[InetSocketAddress])) ++ acceptIncoming(
sch)
.map(Right(_)),
cleanup)
}
def mkSocket[F[_]](ch: AsynchronousSocketChannel)(implicit F: Effect[F],
ec: ExecutionContext): F[Socket[F]] = {
async.semaphore(1).flatMap { readSemaphore =>
async.refOf[F, ByteBuffer](ByteBuffer.allocate(0)).map { bufferRef =>
// Reads data to remaining capacity of supplied ByteBuffer
// Also measures time the read took returning this as tuple
// of (bytes_read, read_duration)
def readChunk(buff: ByteBuffer, timeoutMs: Long): F[(Int, Long)] =
F.async { cb =>
val started = System.currentTimeMillis()
ch.read(
buff,
timeoutMs,
TimeUnit.MILLISECONDS,
(),
new CompletionHandler[Integer, Unit] {
def completed(result: Integer, attachment: Unit): Unit = {
val took = System.currentTimeMillis() - started
async.unsafeRunAsync(F.delay(cb(Right((result, took)))))(_ => IO.unit)
}
def failed(err: Throwable, attachment: Unit): Unit =
async.unsafeRunAsync(F.delay(cb(Left(err))))(_ => IO.unit)
}
)
}
// gets buffer of desired capacity, ready for the first read operation
// If the buffer does not have desired capacity it is resized (recreated)
// buffer is also reset to be ready to be written into.
def getBufferOf(sz: Int): F[ByteBuffer] =
bufferRef.get.flatMap { buff =>
if (buff.capacity() < sz)
F.delay(ByteBuffer.allocate(sz)).flatTap(bufferRef.setSync)
else
F.delay {
buff.clear()
buff.limit(sz)
buff
}
}
// When the read operation is done, this will read up to buffer's position bytes from the buffer
// this expects the buffer's position to be at bytes read + 1
def releaseBuffer(buff: ByteBuffer): F[Chunk[Byte]] = F.delay {
val read = buff.position()
val result =
if (read == 0) Chunk.bytes(Array.empty)
else {
val dest = new Array[Byte](read)
buff.flip()
buff.get(dest)
Chunk.bytes(dest)
}
buff.clear()
result
}
def read0(max: Int, timeout: Option[FiniteDuration]): F[Option[Chunk[Byte]]] =
readSemaphore.decrement *>
F.attempt[Option[Chunk[Byte]]](getBufferOf(max).flatMap { buff =>
readChunk(buff, timeout.map(_.toMillis).getOrElse(0l)).flatMap {
case (read, _) =>
if (read < 0) F.pure(None)
else releaseBuffer(buff).map(Some(_))
}
})
.flatMap { r =>
readSemaphore.increment *> (r match {
case Left(err) => F.raiseError(err)
case Right(maybeChunk) => F.pure(maybeChunk)
})
}
def readN0(max: Int, timeout: Option[FiniteDuration]): F[Option[Chunk[Byte]]] =
(readSemaphore.decrement *>
F.attempt(getBufferOf(max).flatMap { buff =>
def go(timeoutMs: Long): F[Option[Chunk[Byte]]] =
readChunk(buff, timeoutMs).flatMap {
case (readBytes, took) =>
if (readBytes < 0 || buff.position() >= max) {
// read is done
releaseBuffer(buff).map(Some(_))
} else go((timeoutMs - took).max(0))
}
go(timeout.map(_.toMillis).getOrElse(0l))
})).flatMap { r =>
readSemaphore.increment *> (r match {
case Left(err) => F.raiseError(err)
case Right(maybeChunk) => F.pure(maybeChunk)
})
}
def write0(bytes: Chunk[Byte], timeout: Option[FiniteDuration]): F[Unit] = {
def go(buff: ByteBuffer, remains: Long): F[Unit] =
F.async[Option[Long]] { cb =>
val start = System.currentTimeMillis()
ch.write(
buff,
remains,
TimeUnit.MILLISECONDS,
(),
new CompletionHandler[Integer, Unit] {
def completed(result: Integer, attachment: Unit): Unit =
async.unsafeRunAsync(
F.delay(cb(Right(
if (buff.remaining() <= 0) None
else Some(System.currentTimeMillis() - start)
))))(_ => IO.pure(()))
def failed(err: Throwable, attachment: Unit): Unit =
async.unsafeRunAsync(F.delay(cb(Left(err))))(_ => IO.pure(()))
}
)
}
.flatMap {
case None => F.pure(())
case Some(took) => go(buff, (remains - took).max(0))
}
go(bytes.toBytes.toByteBuffer, timeout.map(_.toMillis).getOrElse(0l))
}
///////////////////////////////////
///////////////////////////////////
new Socket[F] {
def readN(numBytes: Int, timeout: Option[FiniteDuration]): F[Option[Chunk[Byte]]] =
readN0(numBytes, timeout)
def read(maxBytes: Int, timeout: Option[FiniteDuration]): F[Option[Chunk[Byte]]] =
read0(maxBytes, timeout)
def reads(maxBytes: Int, timeout: Option[FiniteDuration]): Stream[F, Byte] =
Stream.eval(read(maxBytes, timeout)).flatMap {
case Some(bytes) =>
Stream.chunk(bytes) ++ reads(maxBytes, timeout)
case None => Stream.empty
}
def write(bytes: Chunk[Byte], timeout: Option[FiniteDuration]): F[Unit] =
write0(bytes, timeout)
def writes(timeout: Option[FiniteDuration]): Sink[F, Byte] =
_.chunks.flatMap { bs =>
Stream.eval(write(bs, timeout))
}
def localAddress: F[SocketAddress] = F.delay(ch.getLocalAddress)
def remoteAddress: F[SocketAddress] = F.delay(ch.getRemoteAddress)
def close: F[Unit] = F.delay(ch.close())
def endOfOutput: F[Unit] = F.delay { ch.shutdownOutput(); () }
def endOfInput: F[Unit] = F.delay { ch.shutdownInput(); () }
}
}
}
}
}
| zaneli/fs2 | io/src/main/scala/fs2/io/tcp/Socket.scala | Scala | mit | 13,986 |
package org.scalaide.core.semantichighlighting.classifier
import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolTypes._
import org.scalaide.core.internal.decorators.semantichighlighting.classifier.SymbolInfo
import org.junit._
class DeprecatedMethodTest extends AbstractSymbolClassifierTest {
@Test
def deprecated_method(): Unit = {
checkSymbolInfoClassification("""
object A {
@deprecated def deprecatedMethod() = 12
val a = deprecatedMethod
}""", """
object A {
@deprecated def $ DEP_METH $() = 12
val a = $ DEP_METH $
}""",
Map("DEP_METH" -> SymbolInfo(Method, Nil, deprecated = true, inInterpolatedString = false)))
}
} | Kwestor/scala-ide | org.scala-ide.sdt.core.tests/src/org/scalaide/core/semantichighlighting/classifier/DeprecatedMethodTest.scala | Scala | bsd-3-clause | 735 |
package ch.wsl.box.client.views.components.widget.labels
import ch.wsl.box.client.routes.Routes
import ch.wsl.box.client.views.components.widget.{ComponentWidgetFactory, Widget, WidgetParams}
import ch.wsl.box.model.shared.{JSONField, WidgetsNames}
import ch.wsl.box.shared.utils.JSONUtils.EnhancedJson
import io.udash._
import scalatags.JsDom
import scalatags.JsDom.all._
import yamusca.imports._
import io.circe._
object HtmlWidget extends ComponentWidgetFactory {
override def name: String = WidgetsNames.html
override def create(params: WidgetParams): Widget = HtmlWidgetImpl(params.field,params.allData)
case class HtmlWidgetImpl(field:JSONField,data:ReadableProperty[Json]) extends Widget {
val _text:String = field.label.getOrElse(field.name)
val template = mustache.parse(_text)
override protected def show(): JsDom.all.Modifier = template match {
case Left(_) => raw(_text)
case Right(tmpl) => {
val renderer = mustache.render(tmpl)
val variables = tmpl.els.flatMap{
case Variable(key, _) => Some(key)
case Section(key, _, _) => Some(key)
case _ => None
}
val watchedVariables:ReadableProperty[Context] = data.transform{ js =>
val values = variables.map{v =>
v -> js.js(v).toMustacheValue
} ++ Seq(
"BASE_URI" -> Value.of(Routes.baseUri),
"FULL_URL" -> Value.of(Routes.fullUrl)
)
Context(values:_*)
}
autoRelease(produce(watchedVariables) { context =>
raw(renderer(context)).render
})
}
}
override protected def edit(): JsDom.all.Modifier = show()
}
} | Insubric/box | client/src/main/scala/ch/wsl/box/client/views/components/widget/labels/HtmlWidget.scala | Scala | apache-2.0 | 1,697 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.util
import java.sql.{Date, Timestamp}
import java.text.{DateFormat, SimpleDateFormat}
import java.util.{Calendar, Locale, TimeZone}
import java.util.concurrent.ConcurrentHashMap
import java.util.function.{Function => JFunction}
import javax.xml.bind.DatatypeConverter
import scala.annotation.tailrec
import org.apache.spark.unsafe.types.UTF8String
/**
* Helper functions for converting between internal and external date and time representations.
* Dates are exposed externally as java.sql.Date and are represented internally as the number of
* dates since the Unix epoch (1970-01-01). Timestamps are exposed externally as java.sql.Timestamp
* and are stored internally as longs, which are capable of storing timestamps with microsecond
* precision.
*/
object DateTimeUtils {
// we use Int and Long internally to represent [[DateType]] and [[TimestampType]]
type SQLDate = Int
type SQLTimestamp = Long
// see http://stackoverflow.com/questions/466321/convert-unix-timestamp-to-julian
// it's 2440587.5, rounding up to compatible with Hive
final val JULIAN_DAY_OF_EPOCH = 2440588
final val SECONDS_PER_DAY = 60 * 60 * 24L
final val MICROS_PER_MILLIS = 1000L
final val MICROS_PER_SECOND = MICROS_PER_MILLIS * MILLIS_PER_SECOND
final val MILLIS_PER_SECOND = 1000L
final val NANOS_PER_SECOND = MICROS_PER_SECOND * 1000L
final val MICROS_PER_DAY = MICROS_PER_SECOND * SECONDS_PER_DAY
final val MILLIS_PER_DAY = SECONDS_PER_DAY * 1000L
// number of days in 400 years
final val daysIn400Years: Int = 146097
// number of days between 1.1.1970 and 1.1.2001
final val to2001 = -11323
// this is year -17999, calculation: 50 * daysIn400Year
final val YearZero = -17999
final val toYearZero = to2001 + 7304850
final val TimeZoneGMT = TimeZone.getTimeZone("GMT")
final val TimeZoneUTC = TimeZone.getTimeZone("UTC")
final val MonthOf31Days = Set(1, 3, 5, 7, 8, 10, 12)
val TIMEZONE_OPTION = "timeZone"
def defaultTimeZone(): TimeZone = TimeZone.getDefault()
// Reuse the Calendar object in each thread as it is expensive to create in each method call.
private val threadLocalGmtCalendar = new ThreadLocal[Calendar] {
override protected def initialValue: Calendar = {
Calendar.getInstance(TimeZoneGMT)
}
}
// `SimpleDateFormat` is not thread-safe.
private val threadLocalTimestampFormat = new ThreadLocal[DateFormat] {
override def initialValue(): SimpleDateFormat = {
new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US)
}
}
def getThreadLocalTimestampFormat(timeZone: TimeZone): DateFormat = {
val sdf = threadLocalTimestampFormat.get()
sdf.setTimeZone(timeZone)
sdf
}
// `SimpleDateFormat` is not thread-safe.
private val threadLocalDateFormat = new ThreadLocal[DateFormat] {
override def initialValue(): SimpleDateFormat = {
new SimpleDateFormat("yyyy-MM-dd", Locale.US)
}
}
def getThreadLocalDateFormat(): DateFormat = {
val sdf = threadLocalDateFormat.get()
sdf.setTimeZone(defaultTimeZone())
sdf
}
private val computedTimeZones = new ConcurrentHashMap[String, TimeZone]
private val computeTimeZone = new JFunction[String, TimeZone] {
override def apply(timeZoneId: String): TimeZone = TimeZone.getTimeZone(timeZoneId)
}
def getTimeZone(timeZoneId: String): TimeZone = {
computedTimeZones.computeIfAbsent(timeZoneId, computeTimeZone)
}
def newDateFormat(formatString: String, timeZone: TimeZone): DateFormat = {
val sdf = new SimpleDateFormat(formatString, Locale.US)
sdf.setTimeZone(timeZone)
// Enable strict parsing, if the input date/format is invalid, it will throw an exception.
// e.g. to parse invalid date '2016-13-12', or '2016-01-12' with invalid format 'yyyy-aa-dd',
// an exception will be throwed.
sdf.setLenient(false)
sdf
}
// we should use the exact day as Int, for example, (year, month, day) -> day
def millisToDays(millisUtc: Long): SQLDate = {
millisToDays(millisUtc, defaultTimeZone())
}
def millisToDays(millisUtc: Long, timeZone: TimeZone): SQLDate = {
// SPARK-6785: use Math.floor so negative number of days (dates before 1970)
// will correctly work as input for function toJavaDate(Int)
val millisLocal = millisUtc + timeZone.getOffset(millisUtc)
Math.floor(millisLocal.toDouble / MILLIS_PER_DAY).toInt
}
// reverse of millisToDays
def daysToMillis(days: SQLDate): Long = {
daysToMillis(days, defaultTimeZone())
}
def daysToMillis(days: SQLDate, timeZone: TimeZone): Long = {
val millisLocal = days.toLong * MILLIS_PER_DAY
millisLocal - getOffsetFromLocalMillis(millisLocal, timeZone)
}
def dateToString(days: SQLDate): String =
getThreadLocalDateFormat.format(toJavaDate(days))
// Converts Timestamp to string according to Hive TimestampWritable convention.
def timestampToString(us: SQLTimestamp): String = {
timestampToString(us, defaultTimeZone())
}
// Converts Timestamp to string according to Hive TimestampWritable convention.
def timestampToString(us: SQLTimestamp, timeZone: TimeZone): String = {
val ts = toJavaTimestamp(us)
val timestampString = ts.toString
val timestampFormat = getThreadLocalTimestampFormat(timeZone)
val formatted = timestampFormat.format(ts)
if (timestampString.length > 19 && timestampString.substring(19) != ".0") {
formatted + timestampString.substring(19)
} else {
formatted
}
}
@tailrec
def stringToTime(s: String): java.util.Date = {
val indexOfGMT = s.indexOf("GMT")
if (indexOfGMT != -1) {
// ISO8601 with a weird time zone specifier (2000-01-01T00:00GMT+01:00)
val s0 = s.substring(0, indexOfGMT)
val s1 = s.substring(indexOfGMT + 3)
// Mapped to 2000-01-01T00:00+01:00
stringToTime(s0 + s1)
} else if (!s.contains('T')) {
// JDBC escape string
if (s.contains(' ')) {
Timestamp.valueOf(s)
} else {
Date.valueOf(s)
}
} else {
DatatypeConverter.parseDateTime(s).getTime()
}
}
/**
* Returns the number of days since epoch from java.sql.Date.
*/
def fromJavaDate(date: Date): SQLDate = {
millisToDays(date.getTime)
}
/**
* Returns a java.sql.Date from number of days since epoch.
*/
def toJavaDate(daysSinceEpoch: SQLDate): Date = {
new Date(daysToMillis(daysSinceEpoch))
}
/**
* Returns a java.sql.Timestamp from number of micros since epoch.
*/
def toJavaTimestamp(us: SQLTimestamp): Timestamp = {
// setNanos() will overwrite the millisecond part, so the milliseconds should be
// cut off at seconds
var seconds = us / MICROS_PER_SECOND
var micros = us % MICROS_PER_SECOND
// setNanos() can not accept negative value
if (micros < 0) {
micros += MICROS_PER_SECOND
seconds -= 1
}
val t = new Timestamp(seconds * 1000)
t.setNanos(micros.toInt * 1000)
t
}
/**
* Returns the number of micros since epoch from java.sql.Timestamp.
*/
def fromJavaTimestamp(t: Timestamp): SQLTimestamp = {
if (t != null) {
t.getTime() * 1000L + (t.getNanos().toLong / 1000) % 1000L
} else {
0L
}
}
/**
* Returns the number of microseconds since epoch from Julian day
* and nanoseconds in a day
*/
def fromJulianDay(day: Int, nanoseconds: Long): SQLTimestamp = {
// use Long to avoid rounding errors
val seconds = (day - JULIAN_DAY_OF_EPOCH).toLong * SECONDS_PER_DAY
seconds * MICROS_PER_SECOND + nanoseconds / 1000L
}
/**
* Returns Julian day and nanoseconds in a day from the number of microseconds
*
* Note: support timestamp since 4717 BC (without negative nanoseconds, compatible with Hive).
*/
def toJulianDay(us: SQLTimestamp): (Int, Long) = {
val julian_us = us + JULIAN_DAY_OF_EPOCH * MICROS_PER_DAY
val day = julian_us / MICROS_PER_DAY
val micros = julian_us % MICROS_PER_DAY
(day.toInt, micros * 1000L)
}
/*
* Converts the timestamp to milliseconds since epoch. In spark timestamp values have microseconds
* precision, so this conversion is lossy.
*/
def toMillis(us: SQLTimestamp): Long = {
// When the timestamp is negative i.e before 1970, we need to adjust the millseconds portion.
// Example - 1965-01-01 10:11:12.123456 is represented as (-157700927876544) in micro precision.
// In millis precision the above needs to be represented as (-157700927877).
Math.floor(us.toDouble / MILLIS_PER_SECOND).toLong
}
/*
* Converts millseconds since epoch to SQLTimestamp.
*/
def fromMillis(millis: Long): SQLTimestamp = {
millis * 1000L
}
/**
* Parses a given UTF8 date string to the corresponding a corresponding [[Long]] value.
* The return type is [[Option]] in order to distinguish between 0L and null. The following
* formats are allowed:
*
* `yyyy`
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]d [h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `yyyy-[m]m-[d]dT[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]Z`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]-[h]h:[m]m`
* `T[h]h:[m]m:[s]s.[ms][ms][ms][us][us][us]+[h]h:[m]m`
*/
def stringToTimestamp(s: UTF8String): Option[SQLTimestamp] = {
stringToTimestamp(s, defaultTimeZone(), rejectTzInString = false)
}
def stringToTimestamp(s: UTF8String, timeZone: TimeZone): Option[SQLTimestamp] = {
stringToTimestamp(s, timeZone, rejectTzInString = false)
}
/**
* Converts a timestamp string to microseconds from the unix epoch, w.r.t. the given timezone.
* Returns None if the input string is not a valid timestamp format.
*
* @param s the input timestamp string.
* @param timeZone the timezone of the timestamp string, will be ignored if the timestamp string
* already contains timezone information and `forceTimezone` is false.
* @param rejectTzInString if true, rejects timezone in the input string, i.e., if the
* timestamp string contains timezone, like `2000-10-10 00:00:00+00:00`,
* return None.
*/
def stringToTimestamp(
s: UTF8String,
timeZone: TimeZone,
rejectTzInString: Boolean): Option[SQLTimestamp] = {
if (s == null) {
return None
}
var tz: Option[Byte] = None
val segments: Array[Int] = Array[Int](1, 1, 1, 0, 0, 0, 0, 0, 0)
var i = 0
var currentSegmentValue = 0
val bytes = s.getBytes
var j = 0
var digitsMilli = 0
var justTime = false
while (j < bytes.length) {
val b = bytes(j)
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
if (j == 0 && b == 'T') {
justTime = true
i += 3
} else if (i < 2) {
if (b == '-') {
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else if (i == 0 && b == ':') {
justTime = true
segments(3) = currentSegmentValue
currentSegmentValue = 0
i = 4
} else {
return None
}
} else if (i == 2) {
if (b == ' ' || b == 'T') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 3 || i == 4) {
if (b == ':') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
} else if (i == 5 || i == 6) {
if (b == 'Z') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
tz = Some(43)
} else if (b == '-' || b == '+') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
tz = Some(b)
} else if (b == '.' && i == 5) {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
if (i == 6 && b != '.') {
i += 1
}
} else {
if (b == ':' || b == ' ') {
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
return None
}
}
} else {
if (i == 6) {
digitsMilli += 1
}
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
j += 1
}
segments(i) = currentSegmentValue
if (!justTime && i == 0 && j != 4) {
// year should have exact four digits
return None
}
while (digitsMilli < 6) {
segments(6) *= 10
digitsMilli += 1
}
// We are truncating the nanosecond part, which results in loss of precision
while (digitsMilli > 6) {
segments(6) /= 10
digitsMilli -= 1
}
if (!justTime && isInvalidDate(segments(0), segments(1), segments(2))) {
return None
}
if (segments(3) < 0 || segments(3) > 23 || segments(4) < 0 || segments(4) > 59 ||
segments(5) < 0 || segments(5) > 59 || segments(6) < 0 || segments(6) > 999999 ||
segments(7) < 0 || segments(7) > 23 || segments(8) < 0 || segments(8) > 59) {
return None
}
if (tz.isDefined && rejectTzInString) return None
val c = if (tz.isEmpty) {
Calendar.getInstance(timeZone)
} else {
Calendar.getInstance(
getTimeZone(f"GMT${tz.get.toChar}${segments(7)}%02d:${segments(8)}%02d"))
}
c.set(Calendar.MILLISECOND, 0)
if (justTime) {
c.set(Calendar.HOUR_OF_DAY, segments(3))
c.set(Calendar.MINUTE, segments(4))
c.set(Calendar.SECOND, segments(5))
} else {
c.set(segments(0), segments(1) - 1, segments(2), segments(3), segments(4), segments(5))
}
Some(c.getTimeInMillis * 1000 + segments(6))
}
/**
* Parses a given UTF8 date string to a corresponding [[Int]] value.
* The return type is [[Option]] in order to distinguish between 0 and null. The following
* formats are allowed:
*
* `yyyy`
* `yyyy-[m]m`
* `yyyy-[m]m-[d]d`
* `yyyy-[m]m-[d]d `
* `yyyy-[m]m-[d]d *`
* `yyyy-[m]m-[d]dT*`
*/
def stringToDate(s: UTF8String): Option[SQLDate] = {
if (s == null) {
return None
}
val segments: Array[Int] = Array[Int](1, 1, 1)
var i = 0
var currentSegmentValue = 0
val bytes = s.getBytes
var j = 0
while (j < bytes.length && (i < 3 && !(bytes(j) == ' ' || bytes(j) == 'T'))) {
val b = bytes(j)
if (i < 2 && b == '-') {
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
currentSegmentValue = 0
i += 1
} else {
val parsedValue = b - '0'.toByte
if (parsedValue < 0 || parsedValue > 9) {
return None
} else {
currentSegmentValue = currentSegmentValue * 10 + parsedValue
}
}
j += 1
}
if (i == 0 && j != 4) {
// year should have exact four digits
return None
}
segments(i) = currentSegmentValue
if (isInvalidDate(segments(0), segments(1), segments(2))) {
return None
}
val c = threadLocalGmtCalendar.get()
c.clear()
c.set(segments(0), segments(1) - 1, segments(2), 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
Some((c.getTimeInMillis / MILLIS_PER_DAY).toInt)
}
/**
* Return true if the date is invalid.
*/
private def isInvalidDate(year: Int, month: Int, day: Int): Boolean = {
if (year < 0 || year > 9999 || month < 1 || month > 12 || day < 1 || day > 31) {
return true
}
if (month == 2) {
if (isLeapYear(year) && day > 29) {
return true
} else if (!isLeapYear(year) && day > 28) {
return true
}
} else if (!MonthOf31Days.contains(month) && day > 30) {
return true
}
false
}
/**
* Returns the microseconds since year zero (-17999) from microseconds since epoch.
*/
private def absoluteMicroSecond(microsec: SQLTimestamp): SQLTimestamp = {
microsec + toYearZero * MICROS_PER_DAY
}
private def localTimestamp(microsec: SQLTimestamp): SQLTimestamp = {
localTimestamp(microsec, defaultTimeZone())
}
private def localTimestamp(microsec: SQLTimestamp, timeZone: TimeZone): SQLTimestamp = {
absoluteMicroSecond(microsec) + timeZone.getOffset(microsec / 1000) * 1000L
}
/**
* Returns the hour value of a given timestamp value. The timestamp is expressed in microseconds.
*/
def getHours(microsec: SQLTimestamp): Int = {
((localTimestamp(microsec) / MICROS_PER_SECOND / 3600) % 24).toInt
}
/**
* Returns the hour value of a given timestamp value. The timestamp is expressed in microseconds.
*/
def getHours(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
((localTimestamp(microsec, timeZone) / MICROS_PER_SECOND / 3600) % 24).toInt
}
/**
* Returns the minute value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getMinutes(microsec: SQLTimestamp): Int = {
((localTimestamp(microsec) / MICROS_PER_SECOND / 60) % 60).toInt
}
/**
* Returns the minute value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getMinutes(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
((localTimestamp(microsec, timeZone) / MICROS_PER_SECOND / 60) % 60).toInt
}
/**
* Returns the second value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getSeconds(microsec: SQLTimestamp): Int = {
((localTimestamp(microsec) / MICROS_PER_SECOND) % 60).toInt
}
/**
* Returns the second value of a given timestamp value. The timestamp is expressed in
* microseconds.
*/
def getSeconds(microsec: SQLTimestamp, timeZone: TimeZone): Int = {
((localTimestamp(microsec, timeZone) / MICROS_PER_SECOND) % 60).toInt
}
private[this] def isLeapYear(year: Int): Boolean = {
(year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)
}
/**
* Return the number of days since the start of 400 year period.
* The second year of a 400 year period (year 1) starts on day 365.
*/
private[this] def yearBoundary(year: Int): Int = {
year * 365 + ((year / 4 ) - (year / 100) + (year / 400))
}
/**
* Calculates the number of years for the given number of days. This depends
* on a 400 year period.
* @param days days since the beginning of the 400 year period
* @return (number of year, days in year)
*/
private[this] def numYears(days: Int): (Int, Int) = {
val year = days / 365
val boundary = yearBoundary(year)
if (days > boundary) (year, days - boundary) else (year - 1, days - yearBoundary(year - 1))
}
/**
* Calculates the year and the number of the day in the year for the given
* number of days. The given days is the number of days since 1.1.1970.
*
* The calculation uses the fact that the period 1.1.2001 until 31.12.2400 is
* equals to the period 1.1.1601 until 31.12.2000.
*/
private[this] def getYearAndDayInYear(daysSince1970: SQLDate): (Int, Int) = {
// add the difference (in days) between 1.1.1970 and the artificial year 0 (-17999)
var daysSince1970Tmp = daysSince1970
// Since Julian calendar was replaced with the Gregorian calendar,
// the 10 days after Oct. 4 were skipped.
// (1582-10-04) -141428 days since 1970-01-01
if (daysSince1970 <= -141428) {
daysSince1970Tmp -= 10
}
val daysNormalized = daysSince1970Tmp + toYearZero
val numOfQuarterCenturies = daysNormalized / daysIn400Years
val daysInThis400 = daysNormalized % daysIn400Years + 1
val (years, dayInYear) = numYears(daysInThis400)
val year: Int = (2001 - 20000) + 400 * numOfQuarterCenturies + years
(year, dayInYear)
}
/**
* Returns the 'day in year' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayInYear(date: SQLDate): Int = {
getYearAndDayInYear(date)._2
}
/**
* Returns the year value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getYear(date: SQLDate): Int = {
getYearAndDayInYear(date)._1
}
/**
* Returns the quarter for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getQuarter(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
dayInYear = dayInYear - 1
}
if (dayInYear <= 90) {
1
} else if (dayInYear <= 181) {
2
} else if (dayInYear <= 273) {
3
} else {
4
}
}
/**
* Split date (expressed in days since 1.1.1970) into four fields:
* year, month (Jan is Month 1), dayInMonth, daysToMonthEnd (0 if it's last day of month).
*/
def splitDate(date: SQLDate): (Int, Int, Int, Int) = {
var (year, dayInYear) = getYearAndDayInYear(date)
val isLeap = isLeapYear(year)
if (isLeap && dayInYear == 60) {
(year, 2, 29, 0)
} else {
if (isLeap && dayInYear > 60) dayInYear -= 1
if (dayInYear <= 181) {
if (dayInYear <= 31) {
(year, 1, dayInYear, 31 - dayInYear)
} else if (dayInYear <= 59) {
(year, 2, dayInYear - 31, if (isLeap) 60 - dayInYear else 59 - dayInYear)
} else if (dayInYear <= 90) {
(year, 3, dayInYear - 59, 90 - dayInYear)
} else if (dayInYear <= 120) {
(year, 4, dayInYear - 90, 120 - dayInYear)
} else if (dayInYear <= 151) {
(year, 5, dayInYear - 120, 151 - dayInYear)
} else {
(year, 6, dayInYear - 151, 181 - dayInYear)
}
} else {
if (dayInYear <= 212) {
(year, 7, dayInYear - 181, 212 - dayInYear)
} else if (dayInYear <= 243) {
(year, 8, dayInYear - 212, 243 - dayInYear)
} else if (dayInYear <= 273) {
(year, 9, dayInYear - 243, 273 - dayInYear)
} else if (dayInYear <= 304) {
(year, 10, dayInYear - 273, 304 - dayInYear)
} else if (dayInYear <= 334) {
(year, 11, dayInYear - 304, 334 - dayInYear)
} else {
(year, 12, dayInYear - 334, 365 - dayInYear)
}
}
}
}
/**
* Returns the month value for the given date. The date is expressed in days
* since 1.1.1970. January is month 1.
*/
def getMonth(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
if (dayInYear == 60) {
return 2
} else if (dayInYear > 60) {
dayInYear = dayInYear - 1
}
}
if (dayInYear <= 31) {
1
} else if (dayInYear <= 59) {
2
} else if (dayInYear <= 90) {
3
} else if (dayInYear <= 120) {
4
} else if (dayInYear <= 151) {
5
} else if (dayInYear <= 181) {
6
} else if (dayInYear <= 212) {
7
} else if (dayInYear <= 243) {
8
} else if (dayInYear <= 273) {
9
} else if (dayInYear <= 304) {
10
} else if (dayInYear <= 334) {
11
} else {
12
}
}
/**
* Returns the 'day of month' value for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getDayOfMonth(date: SQLDate): Int = {
var (year, dayInYear) = getYearAndDayInYear(date)
if (isLeapYear(year)) {
if (dayInYear == 60) {
return 29
} else if (dayInYear > 60) {
dayInYear = dayInYear - 1
}
}
if (dayInYear <= 31) {
dayInYear
} else if (dayInYear <= 59) {
dayInYear - 31
} else if (dayInYear <= 90) {
dayInYear - 59
} else if (dayInYear <= 120) {
dayInYear - 90
} else if (dayInYear <= 151) {
dayInYear - 120
} else if (dayInYear <= 181) {
dayInYear - 151
} else if (dayInYear <= 212) {
dayInYear - 181
} else if (dayInYear <= 243) {
dayInYear - 212
} else if (dayInYear <= 273) {
dayInYear - 243
} else if (dayInYear <= 304) {
dayInYear - 273
} else if (dayInYear <= 334) {
dayInYear - 304
} else {
dayInYear - 334
}
}
/**
* The number of days for each month (not leap year)
*/
private val monthDays = Array(31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
/**
* Returns the date value for the first day of the given month.
* The month is expressed in months since year zero (17999 BC), starting from 0.
*/
private def firstDayOfMonth(absoluteMonth: Int): SQLDate = {
val absoluteYear = absoluteMonth / 12
var monthInYear = absoluteMonth - absoluteYear * 12
var date = getDateFromYear(absoluteYear)
if (monthInYear >= 2 && isLeapYear(absoluteYear + YearZero)) {
date += 1
}
while (monthInYear > 0) {
date += monthDays(monthInYear - 1)
monthInYear -= 1
}
date
}
/**
* Returns the date value for January 1 of the given year.
* The year is expressed in years since year zero (17999 BC), starting from 0.
*/
private def getDateFromYear(absoluteYear: Int): SQLDate = {
val absoluteDays = (absoluteYear * 365 + absoluteYear / 400 - absoluteYear / 100
+ absoluteYear / 4)
absoluteDays - toYearZero
}
/**
* Add date and year-month interval.
* Returns a date value, expressed in days since 1.1.1970.
*/
def dateAddMonths(days: SQLDate, months: Int): SQLDate = {
val (year, monthInYear, dayOfMonth, daysToMonthEnd) = splitDate(days)
val absoluteMonth = (year - YearZero) * 12 + monthInYear - 1 + months
val nonNegativeMonth = if (absoluteMonth >= 0) absoluteMonth else 0
val currentMonthInYear = nonNegativeMonth % 12
val currentYear = nonNegativeMonth / 12
val leapDay = if (currentMonthInYear == 1 && isLeapYear(currentYear + YearZero)) 1 else 0
val lastDayOfMonth = monthDays(currentMonthInYear) + leapDay
val currentDayInMonth = if (daysToMonthEnd == 0 || dayOfMonth >= lastDayOfMonth) {
// last day of the month
lastDayOfMonth
} else {
dayOfMonth
}
firstDayOfMonth(nonNegativeMonth) + currentDayInMonth - 1
}
/**
* Add timestamp and full interval.
* Returns a timestamp value, expressed in microseconds since 1.1.1970 00:00:00.
*/
def timestampAddInterval(start: SQLTimestamp, months: Int, microseconds: Long): SQLTimestamp = {
timestampAddInterval(start, months, microseconds, defaultTimeZone())
}
/**
* Add timestamp and full interval.
* Returns a timestamp value, expressed in microseconds since 1.1.1970 00:00:00.
*/
def timestampAddInterval(
start: SQLTimestamp,
months: Int,
microseconds: Long,
timeZone: TimeZone): SQLTimestamp = {
val days = millisToDays(start / 1000L, timeZone)
val newDays = dateAddMonths(days, months)
start +
daysToMillis(newDays, timeZone) * 1000L - daysToMillis(days, timeZone) * 1000L +
microseconds
}
/**
* Returns number of months between time1 and time2. time1 and time2 are expressed in
* microseconds since 1.1.1970. If time1 is later than time2, the result is positive.
*
* If time1 and time2 are on the same day of month, or both are the last day of month,
* returns, time of day will be ignored.
*
* Otherwise, the difference is calculated based on 31 days per month.
* The result is rounded to 8 decimal places if `roundOff` is set to true.
*/
def monthsBetween(
time1: SQLTimestamp,
time2: SQLTimestamp,
roundOff: Boolean,
timeZone: TimeZone): Double = {
val millis1 = time1 / 1000L
val millis2 = time2 / 1000L
val date1 = millisToDays(millis1, timeZone)
val date2 = millisToDays(millis2, timeZone)
val (year1, monthInYear1, dayInMonth1, daysToMonthEnd1) = splitDate(date1)
val (year2, monthInYear2, dayInMonth2, daysToMonthEnd2) = splitDate(date2)
val months1 = year1 * 12 + monthInYear1
val months2 = year2 * 12 + monthInYear2
val monthDiff = (months1 - months2).toDouble
if (dayInMonth1 == dayInMonth2 || ((daysToMonthEnd1 == 0) && (daysToMonthEnd2 == 0))) {
return monthDiff
}
// using milliseconds can cause precision loss with more than 8 digits
// we follow Hive's implementation which uses seconds
val secondsInDay1 = (millis1 - daysToMillis(date1, timeZone)) / 1000L
val secondsInDay2 = (millis2 - daysToMillis(date2, timeZone)) / 1000L
val secondsDiff = (dayInMonth1 - dayInMonth2) * SECONDS_PER_DAY + secondsInDay1 - secondsInDay2
// 2678400D is the number of seconds in 31 days
// every month is considered to be 31 days long in this function
val diff = monthDiff + secondsDiff / 2678400D
if (roundOff) {
// rounding to 8 digits
math.round(diff * 1e8) / 1e8
} else {
diff
}
}
// Thursday = 0 since 1970/Jan/01 => Thursday
private val SUNDAY = 3
private val MONDAY = 4
private val TUESDAY = 5
private val WEDNESDAY = 6
private val THURSDAY = 0
private val FRIDAY = 1
private val SATURDAY = 2
/*
* Returns day of week from String. Starting from Thursday, marked as 0.
* (Because 1970-01-01 is Thursday).
*/
def getDayOfWeekFromString(string: UTF8String): Int = {
val dowString = string.toString.toUpperCase(Locale.ROOT)
dowString match {
case "SU" | "SUN" | "SUNDAY" => SUNDAY
case "MO" | "MON" | "MONDAY" => MONDAY
case "TU" | "TUE" | "TUESDAY" => TUESDAY
case "WE" | "WED" | "WEDNESDAY" => WEDNESDAY
case "TH" | "THU" | "THURSDAY" => THURSDAY
case "FR" | "FRI" | "FRIDAY" => FRIDAY
case "SA" | "SAT" | "SATURDAY" => SATURDAY
case _ => -1
}
}
/**
* Returns the first date which is later than startDate and is of the given dayOfWeek.
* dayOfWeek is an integer ranges in [0, 6], and 0 is Thu, 1 is Fri, etc,.
*/
def getNextDateForDayOfWeek(startDate: SQLDate, dayOfWeek: Int): SQLDate = {
startDate + 1 + ((dayOfWeek - 1 - startDate) % 7 + 7) % 7
}
/**
* Returns last day of the month for the given date. The date is expressed in days
* since 1.1.1970.
*/
def getLastDayOfMonth(date: SQLDate): SQLDate = {
val (_, _, _, daysToMonthEnd) = splitDate(date)
date + daysToMonthEnd
}
// Visible for testing.
private[sql] val TRUNC_TO_YEAR = 1
private[sql] val TRUNC_TO_MONTH = 2
private[sql] val TRUNC_TO_QUARTER = 3
private[sql] val TRUNC_TO_WEEK = 4
private[sql] val TRUNC_TO_DAY = 5
private[sql] val TRUNC_TO_HOUR = 6
private[sql] val TRUNC_TO_MINUTE = 7
private[sql] val TRUNC_TO_SECOND = 8
private[sql] val TRUNC_INVALID = -1
/**
* Returns the trunc date from original date and trunc level.
* Trunc level should be generated using `parseTruncLevel()`, should only be 1 or 2.
*/
def truncDate(d: SQLDate, level: Int): SQLDate = {
if (level == TRUNC_TO_YEAR) {
d - DateTimeUtils.getDayInYear(d) + 1
} else if (level == TRUNC_TO_MONTH) {
d - DateTimeUtils.getDayOfMonth(d) + 1
} else {
// caller make sure that this should never be reached
sys.error(s"Invalid trunc level: $level")
}
}
/**
* Returns the trunc date time from original date time and trunc level.
* Trunc level should be generated using `parseTruncLevel()`, should be between 1 and 8
*/
def truncTimestamp(t: SQLTimestamp, level: Int, timeZone: TimeZone): SQLTimestamp = {
var millis = t / MICROS_PER_MILLIS
val truncated = level match {
case TRUNC_TO_YEAR =>
val dDays = millisToDays(millis, timeZone)
daysToMillis(truncDate(dDays, level), timeZone)
case TRUNC_TO_MONTH =>
val dDays = millisToDays(millis, timeZone)
daysToMillis(truncDate(dDays, level), timeZone)
case TRUNC_TO_DAY =>
val offset = timeZone.getOffset(millis)
millis += offset
millis - millis % (MILLIS_PER_SECOND * SECONDS_PER_DAY) - offset
case TRUNC_TO_HOUR =>
val offset = timeZone.getOffset(millis)
millis += offset
millis - millis % (60 * 60 * MILLIS_PER_SECOND) - offset
case TRUNC_TO_MINUTE =>
millis - millis % (60 * MILLIS_PER_SECOND)
case TRUNC_TO_SECOND =>
millis - millis % MILLIS_PER_SECOND
case TRUNC_TO_WEEK =>
val dDays = millisToDays(millis, timeZone)
val prevMonday = getNextDateForDayOfWeek(dDays - 7, MONDAY)
daysToMillis(prevMonday, timeZone)
case TRUNC_TO_QUARTER =>
val dDays = millisToDays(millis, timeZone)
millis = daysToMillis(truncDate(dDays, TRUNC_TO_MONTH), timeZone)
val cal = Calendar.getInstance()
cal.setTimeInMillis(millis)
val quarter = getQuarter(dDays)
val month = quarter match {
case 1 => Calendar.JANUARY
case 2 => Calendar.APRIL
case 3 => Calendar.JULY
case 4 => Calendar.OCTOBER
}
cal.set(Calendar.MONTH, month)
cal.getTimeInMillis()
case _ =>
// caller make sure that this should never be reached
sys.error(s"Invalid trunc level: $level")
}
truncated * MICROS_PER_MILLIS
}
def truncTimestamp(d: SQLTimestamp, level: Int): SQLTimestamp = {
truncTimestamp(d, level, defaultTimeZone())
}
/**
* Returns the truncate level, could be TRUNC_YEAR, TRUNC_MONTH, TRUNC_TO_DAY, TRUNC_TO_HOUR,
* TRUNC_TO_MINUTE, TRUNC_TO_SECOND, TRUNC_TO_WEEK, TRUNC_TO_QUARTER or TRUNC_INVALID,
* TRUNC_INVALID means unsupported truncate level.
*/
def parseTruncLevel(format: UTF8String): Int = {
if (format == null) {
TRUNC_INVALID
} else {
format.toString.toUpperCase(Locale.ROOT) match {
case "YEAR" | "YYYY" | "YY" => TRUNC_TO_YEAR
case "MON" | "MONTH" | "MM" => TRUNC_TO_MONTH
case "DAY" | "DD" => TRUNC_TO_DAY
case "HOUR" => TRUNC_TO_HOUR
case "MINUTE" => TRUNC_TO_MINUTE
case "SECOND" => TRUNC_TO_SECOND
case "WEEK" => TRUNC_TO_WEEK
case "QUARTER" => TRUNC_TO_QUARTER
case _ => TRUNC_INVALID
}
}
}
/**
* Lookup the offset for given millis seconds since 1970-01-01 00:00:00 in given timezone.
* TODO: Improve handling of normalization differences.
* TODO: Replace with JSR-310 or similar system - see SPARK-16788
*/
private[sql] def getOffsetFromLocalMillis(millisLocal: Long, tz: TimeZone): Long = {
var guess = tz.getRawOffset
// the actual offset should be calculated based on milliseconds in UTC
val offset = tz.getOffset(millisLocal - guess)
if (offset != guess) {
guess = tz.getOffset(millisLocal - offset)
if (guess != offset) {
// fallback to do the reverse lookup using java.sql.Timestamp
// this should only happen near the start or end of DST
val days = Math.floor(millisLocal.toDouble / MILLIS_PER_DAY).toInt
val year = getYear(days)
val month = getMonth(days)
val day = getDayOfMonth(days)
var millisOfDay = (millisLocal % MILLIS_PER_DAY).toInt
if (millisOfDay < 0) {
millisOfDay += MILLIS_PER_DAY.toInt
}
val seconds = (millisOfDay / 1000L).toInt
val hh = seconds / 3600
val mm = seconds / 60 % 60
val ss = seconds % 60
val ms = millisOfDay % 1000
val calendar = Calendar.getInstance(tz)
calendar.set(year, month - 1, day, hh, mm, ss)
calendar.set(Calendar.MILLISECOND, ms)
guess = (millisLocal - calendar.getTimeInMillis()).toInt
}
}
guess
}
/**
* Convert the timestamp `ts` from one timezone to another.
*
* TODO: Because of DST, the conversion between UTC and human time is not exactly one-to-one
* mapping, the conversion here may return wrong result, we should make the timestamp
* timezone-aware.
*/
def convertTz(ts: SQLTimestamp, fromZone: TimeZone, toZone: TimeZone): SQLTimestamp = {
// We always use local timezone to parse or format a timestamp
val localZone = defaultTimeZone()
val utcTs = if (fromZone.getID == localZone.getID) {
ts
} else {
// get the human time using local time zone, that actually is in fromZone.
val localTs = ts + localZone.getOffset(ts / 1000L) * 1000L // in fromZone
localTs - getOffsetFromLocalMillis(localTs / 1000L, fromZone) * 1000L
}
if (toZone.getID == localZone.getID) {
utcTs
} else {
val localTs = utcTs + toZone.getOffset(utcTs / 1000L) * 1000L // in toZone
// treat it as local timezone, convert to UTC (we could get the expected human time back)
localTs - getOffsetFromLocalMillis(localTs / 1000L, localZone) * 1000L
}
}
/**
* Returns a timestamp of given timezone from utc timestamp, with the same string
* representation in their timezone.
*/
def fromUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
convertTz(time, TimeZoneGMT, getTimeZone(timeZone))
}
/**
* Returns a utc timestamp from a given timestamp from a given timezone, with the same
* string representation in their timezone.
*/
def toUTCTime(time: SQLTimestamp, timeZone: String): SQLTimestamp = {
convertTz(time, getTimeZone(timeZone), TimeZoneGMT)
}
/**
* Re-initialize the current thread's thread locals. Exposed for testing.
*/
private[util] def resetThreadLocals(): Unit = {
threadLocalGmtCalendar.remove()
threadLocalTimestampFormat.remove()
threadLocalDateFormat.remove()
}
}
| tejasapatil/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/DateTimeUtils.scala | Scala | apache-2.0 | 39,405 |
package com.alexitc.coinalerts.data.async
import javax.inject.Inject
import com.alexitc.coinalerts.config.DatabaseExecutionContext
import com.alexitc.coinalerts.data.{UserBlockingDataHandler, UserDataHandler}
import com.alexitc.coinalerts.models._
import com.alexitc.playsonify.core.FutureApplicationResult
import scala.concurrent.Future
class UserFutureDataHandler @Inject()(userBlockingDataHandler: UserBlockingDataHandler)(
implicit ec: DatabaseExecutionContext)
extends UserDataHandler[FutureApplicationResult] {
override def create(email: UserEmail, password: UserHiddenPassword): FutureApplicationResult[User] = Future {
userBlockingDataHandler.create(email, password)
}
override def createVerificationToken(userId: UserId): FutureApplicationResult[UserVerificationToken] = Future {
userBlockingDataHandler.createVerificationToken(userId)
}
override def verifyEmail(token: UserVerificationToken): FutureApplicationResult[User] = Future {
userBlockingDataHandler.verifyEmail(token)
}
override def getVerifiedUserPassword(email: UserEmail): FutureApplicationResult[UserHiddenPassword] = Future {
userBlockingDataHandler.getVerifiedUserPassword(email)
}
override def getVerifiedUserByEmail(email: UserEmail): FutureApplicationResult[User] = Future {
userBlockingDataHandler.getVerifiedUserByEmail(email)
}
override def getVerifiedUserById(userId: UserId): FutureApplicationResult[User] = Future {
userBlockingDataHandler.getVerifiedUserById(userId)
}
override def getUserPreferences(userId: UserId): FutureApplicationResult[UserPreferences] = Future {
userBlockingDataHandler.getUserPreferences(userId)
}
override def setUserPreferences(
userId: UserId,
preferencesModel: SetUserPreferencesModel): FutureApplicationResult[UserPreferences] = Future {
userBlockingDataHandler.setUserPreferences(userId, preferencesModel)
}
}
| AlexITC/crypto-coin-alerts | alerts-server/app/com/alexitc/coinalerts/data/async/UserFutureDataHandler.scala | Scala | gpl-3.0 | 1,923 |
/*
* Copyright 2015 Otto (GmbH & Co KG)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.flinkspector.matcher
import org.hamcrest.Description
import org.scalatest.Matchers
import org.scalatest.enablers.{Aggregating, Sequencing}
/**
* Wrapper around the [[Matchers]] library from ScalaTest.
*
* @see http://scalatest.org/
* Offers several methods to startWith different [[ListMatcher]]s working on lists.
*/
object ListMatchers extends Matchers {
/**
* Provides a [[ListMatcher]] to tests whether a list contains only a set of elements.
*
* @example List(1,2,3,4) matched against List(1,2,3) is not valid.
* @param right expected list of elements
* @tparam T type to match
* @return concrete [[ListMatcher]]
*/
def containsOnly[T](right: List[T])(implicit aggregating: Aggregating[List[T]])
: ListMatcher[T] = {
new ListMatcher[T](right) {
override def matchesSafely(left: List[T]): Boolean = {
if (aggregating.containsOnly(left, right.distinct)) {
ensureFrequency(_ < _)(left, right)
} else {
false
}
}
override def toString: String = "only matcher"
override def describeTo(description: Description): Unit = {
description.appendText("contains only")
}
}
}
/**
* Provides a [[ListMatcher]] to tests whether a list contains a set of elements.
*
* @example List(1,3,2,4) matched against List(1,2,3) is valid.
* @param right expected list of elements.
* @tparam T type to match
* @return concrete [[ListMatcher]]
*/
def containsAll[T](right: List[T])(implicit aggregating: Aggregating[List[T]])
: ListMatcher[T] = {
new ListMatcher[T](right) {
override def matchesSafely(left: List[T]): Boolean = {
if (aggregating.containsAllOf(left, right)) {
ensureFrequency(_ < _)(left, right)
} else {
false
}
}
override def toString: String = "all matcher"
override def describeTo(description: Description): Unit = {
description.appendText("contains all")
}
}
}
/**
* Provides a [[ListMatcher]] to tests whether a list contains a sequence of elements.
* The matcher permits other elements between the ordered elements.
* also allows for duplicates.
*
* @example List(1,2,4,3,3,5) matched against List(1,2,3) is valid.
* @param right expected order of elements
* @tparam T type to match
* @return concrete [[ListMatcher]]
*/
def containsInOrder[T](right: List[T])(implicit sequencing: Sequencing[List[T]])
: ListMatcher[T] = {
new ListMatcher[T](right) {
override def matchesSafely(left: List[T]): Boolean =
sequencing.containsInOrder(left, right)
override def toString: String = "order matcher"
override def describeTo(description: Description): Unit = {
description.appendText("in order ")
describeOutput(right, description)
}
}
}
/**
* Provides a [[ListMatcher]] to tests whether a list contains another list
*
* @example List(1,2,3,4) matched against List(2,3) is valid.
* @param right expected list
* @tparam T type to match
* @return concrete [[ListMatcher]]
*/
def containsInSeries[T](right: List[T]): ListMatcher[T] = {
new ListMatcher[T](right) {
override def matchesSafely(left: List[T]): Boolean =
left.containsSlice(right)
override def toString: String =
"series matcher"
override def describeTo(description: Description): Unit = {
description.appendText("in series ")
describeOutput(right, description)
}
}
}
private def describeOutput[T](list: Seq[T], description: Description) = {
val builder = StringBuilder.newBuilder
builder.append("<[")
builder.append(list.mkString(", "))
builder.append("]>")
description.appendText(builder.toString())
}
/**
* Provides a [[ListMatcher]] to tests whether a list contains
* an element with the same number of occurrences.
*
* @example List(1,2,2,3,4,4) matched against List(1,2,2) is valid.
* @param right expected list
* @tparam T type to match
* @return concrete [[ListMatcher]]
*/
def sameFrequency[T](right: List[T]): ListMatcher[T] = {
new ListMatcher[T](right) {
override def matchesSafely(left: List[T]): Boolean =
ensureFrequency(_ != _)(left, right)
override def toString: String = {
"frequency matcher"
}
override def describeTo(description: Description): Unit = {
description.appendText("same frequency")
}
}
}
/**
* Helper function to compare the frequency of elements in lists.
*
* @param c function which compares the frequency of the elements.
* @param left first list.
* @param right second list.
* @tparam T generic type of the lists.
* @return true if comparator is true for each element else false.
*/
private def ensureFrequency[T](c: (Int, Int) => Boolean)(left: List[T], right: List[T]): Boolean = {
val countDuplicates = (l: List[T]) => l
.groupBy(identity)
.mapValues(_.size)
val leftDuplicates = countDuplicates(left)
val rightDuplicates = countDuplicates(right)
rightDuplicates.foreach {
case (elem, count) =>
if (leftDuplicates.contains(elem)) {
if (c(leftDuplicates(elem), count)) {
return false
}
}
}
true
}
/**
* Helper function to split a list into a [[Tuple3]].
*
* @param list to split
* @return (first element, second element, rest of elements)
*/
private def splitTo(list: List[Any]): (Any, Any, List[Any]) = {
(list.head, list.tail.head, list.tail.tail)
}
}
| ottogroup/flink-spector | flinkspector-core/src/main/scala/io/flinkspector/matcher/ListMatchers.scala | Scala | apache-2.0 | 6,331 |
package org.jetbrains.plugins.scala
package codeInspection
package hashCodeUsesVar
import com.intellij.codeInspection.LocalInspectionTool
import com.intellij.testFramework.EditorTestUtil
/**
* Daniyar Itegulov
* 2016-02-08
*/
class HashCodeUsesVarInspectionTest extends ScalaInspectionTestBase {
override protected val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[HashCodeUsesVarInspection]
override protected val description: String =
"Non-value field is accessed in 'hashCode()'"
def testReturnsVar(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Int = {
| ${START}a$END
| }
|}"""
checkTextHasError(text)
}
def testReturnsVal(): Unit = {
val text = s"""class A {
| val a = 1
|
| override def hashCode(): Int = {
| ${START}a$END
| }
|}"""
checkTextHasNoErrors(text)
}
def testDefineValThroughVar(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Int = {
| val c = ${START}a$END
| c
| }
|}"""
checkTextHasError(text)
}
def testUseVarFromAncestor(): Unit = {
val text = s"""class A {
| var a = 1
|}
|class B extends A {
| override def hashCode(): Int = {
| ${START}a$END
| }
|}"""
checkTextHasError(text)
}
def testUseVarTuple(): Unit = {
val text = s"""class A {
| var a = (1, 2)
|
| override def hashCode(): Int = {
| ${START}a$END._1
| }
|}"""
checkTextHasError(text)
}
def testUseVarOperations(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Int = {
| (7 + 14 * ${START}a$END) / 21
| }
|}"""
checkTextHasError(text)
}
def testUseVarInNonHashCode(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Unit = {
| ${START}a$END
| }
|}"""
checkTextHasNoErrors(text)
}
def testUseVarInInnerFun(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Int = {
| def f(): Int = ${START}a$END
| f()
| }
|}"""
checkTextHasError(text)
}
def testUseVarInInnerClass(): Unit = {
val text = s"""class A {
| var a = 1
|
| override def hashCode(): Int = {
| class B {
| def f(): Int = ${START}a$END
| }
| new B().f()
| }
|}"""
checkTextHasError(text)
}
def testUseVarAsAcuumulator(): Unit = {
val text = s"""class A {
| val a = 1
| val b = 1
|
| override def hashCode(): Int = {
| var r = 0
| r += a
| r += b
| r
| }
|}"""
checkTextHasNoErrors(text)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/hashCodeUsesVar/HashCodeUsesVarInspectionTest.scala | Scala | apache-2.0 | 3,957 |
package com.hadooparchitecturebook.taxi360.server.hbase
import java.io.File
import com.sun.jersey.spi.container.servlet.ServletContainer
import org.apache.hadoop.hbase.HBaseConfiguration
import org.mortbay.jetty.Server
import org.mortbay.jetty.servlet.{Context, ServletHolder}
object HBaseRestServer {
def main(args:Array[String]): Unit = {
if (args.length == 0) {
println("<port> <configDir> <numberOfSalts> <customerTableName>")
}
val port = args(0).toInt
val hbaseConfigFolder = args(1)
val numberOfSalts = args(2).toInt
val appEventTableName = args(3)
val conf = HBaseConfiguration.create()
conf.addResource(new File(hbaseConfigFolder + "hbase-site.xml").toURI.toURL)
HBaseGlobalValues.init(conf, numberOfSalts,
appEventTableName)
val server = new Server(port)
val sh = new ServletHolder(classOf[ServletContainer])
sh.setInitParameter("com.sun.jersey.config.property.resourceConfigClass", "com.sun.jersey.api.core.PackagesResourceConfig")
sh.setInitParameter("com.sun.jersey.config.property.packages", "com.hadooparchitecturebook.taxi360.server.hbase")
sh.setInitParameter("com.sun.jersey.api.json.POJOMappingFeature", "true")
val context = new Context(server, "/", Context.SESSIONS)
context.addServlet(sh, "/*")
println("starting HBase Rest Server")
server.start()
println("started HBase Rest Sserver")
server.join()
}
}
| hadooparchitecturebook/Taxi360 | src/main/scala/com/hadooparchitecturebook/taxi360/server/hbase/HBaseRestServer.scala | Scala | apache-2.0 | 1,432 |
package com.twitter.finagle.netty3.socks
import com.twitter.finagle.ConnectionFailedException
import com.twitter.finagle.netty3.SocketAddressResolveHandler
import com.twitter.finagle.socks.{Unauthenticated, UsernamePassAuthenticationSetting}
import java.net.{SocketAddress, InetAddress, InetSocketAddress}
import java.util.Arrays
import org.jboss.netty.buffer.{ChannelBuffers, ChannelBuffer}
import org.jboss.netty.channel._
import org.junit.runner.RunWith
import org.mockito.ArgumentCaptor
import org.mockito.Matchers._
import org.mockito.Mockito.{times, verify, when, atLeastOnce}
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
@RunWith(classOf[JUnitRunner])
class SocksConnectHandlerTest extends FunSuite with MockitoSugar {
class SocksConnectHandlerHelper {
val ctx = mock[ChannelHandlerContext]
val channel = mock[Channel]
when(ctx.getChannel) thenReturn channel
val pipeline = mock[ChannelPipeline]
when(ctx.getPipeline) thenReturn pipeline
when(channel.getPipeline) thenReturn pipeline
val closeFuture = Channels.future(channel)
when(channel.getCloseFuture) thenReturn closeFuture
val port = 80 // never bound
val portByte1 = (port >> 8).toByte
val portByte2 = (port & 0xFF).toByte
val remoteAddress = new InetSocketAddress(InetAddress.getByAddress(null, Array[Byte](0x7F, 0x0, 0x0, 0x1)), port)
when(channel.getRemoteAddress) thenReturn remoteAddress
val proxyAddress = mock[SocketAddress]
val connectFuture = Channels.future(channel, true)
val connectRequested = new DownstreamChannelStateEvent(
channel, connectFuture, ChannelState.CONNECTED, remoteAddress)
def sendBytesToServer(x: Byte, xs: Byte*) {
val ec = ArgumentCaptor.forClass(classOf[DownstreamMessageEvent])
verify(ctx, atLeastOnce()).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getMessage match {
case buf: ChannelBuffer =>
val a = Array(x, xs: _*)
val bufBytes = Array.ofDim[Byte](buf.readableBytes())
buf.getBytes(0, bufBytes)
Arrays.equals(bufBytes, a)
})
}
def receiveBytesFromServer(ch: SocksConnectHandler, bytes: Array[Byte]) {
ch.handleUpstream(ctx, new UpstreamMessageEvent(
channel, ChannelBuffers.wrappedBuffer(bytes), null))
}
def connectAndRemoveHandler(ch: SocksConnectHandler) {
assert(connectFuture.isDone)
verify(pipeline).remove(ch)
// we propagated the connect
val ec = ArgumentCaptor.forClass(classOf[UpstreamChannelStateEvent])
verify(ctx).sendUpstream(ec.capture)
val e = ec.getValue
assert(e.getChannel == channel)
assert(e.getState == ChannelState.CONNECTED)
assert(e.getValue == remoteAddress)
}
def checkDidClose() {
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(pipeline).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getChannel == channel)
assert(e.getFuture == closeFuture)
assert(e.getState == ChannelState.OPEN)
assert(e.getValue == java.lang.Boolean.FALSE)
}
}
test("SocksConnectHandler should with no authentication upon connect wrap the downstream connect request") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
assert(e.getChannel == channel)
assert(e.getFuture != connectFuture) // this is proxied
assert(e.getState == ChannelState.CONNECTED)
assert(e.getValue == proxyAddress)
}
test("SocksConnectHandler should with no authentication upon connect propagate cancellation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
assert(!e.getFuture.isCancelled)
connectFuture.cancel()
assert(e.getFuture.isCancelled)
}
test("SocksConnectHandler should with no authentication when connect is successful not propagate success") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
}
test("SocksConnectHandler should with no authentication when connect is successful propagate connection cancellation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
connectFuture.cancel()
checkDidClose()
}
test("SocksConnectHandler should with no authentication when connect is successful do SOCKS negotiation") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x00)
}
{
// when init response is received send connect request
receiveBytesFromServer(ch, Array[Byte](0x05, 0x00))
sendBytesToServer(0x05, 0x01, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2)
}
{
// when connect response is received, propagate the connect and remove the handler
receiveBytesFromServer(ch,
Array[Byte](0x05, 0x00, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2))
connectAndRemoveHandler(ch)
}
}
test("SocksConnectHandler should with no authentication propagate connection failure") {
val h = new SocksConnectHandlerHelper
import h._
val ch = new SocksConnectHandler(proxyAddress, remoteAddress)
ch.handleDownstream(ctx, connectRequested)
val ec = ArgumentCaptor.forClass(classOf[DownstreamChannelStateEvent])
verify(ctx).sendDownstream(ec.capture)
val e = ec.getValue
val exc = new Exception("failed to connect")
assert(!connectFuture.isDone)
e.getFuture.setFailure(exc)
assert(connectFuture.isDone)
assert(connectFuture.getCause == exc)
}
test("SocksConnectHandler should with username and password authentication when connect is successful do SOCKS negotiation") {
val h = new SocksConnectHandlerHelper
import h._
val username = "u"
val password = "pass"
val ch = new SocksConnectHandler(proxyAddress, remoteAddress,
Seq(UsernamePassAuthenticationSetting(username, password)))
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x02)
}
{
// when init response is received send user name and pass
receiveBytesFromServer(ch, Array[Byte](0x05, 0x02))
sendBytesToServer(0x01, 0x01, 0x75, 0x04, 0x70, 0x61, 0x73, 0x73)
}
{
// when authenticated response is received send connect request
receiveBytesFromServer(ch, Array[Byte](0x01, 0x00))
sendBytesToServer(0x05, 0x01, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2)
}
{
// when connect response is received, propagate the connect and remove the handler
receiveBytesFromServer(ch,
Array[Byte](0x05, 0x00, 0x00, 0x01, 0x7F, 0x00, 0x00, 0x01, portByte1, portByte2))
connectAndRemoveHandler(ch)
}
}
test("SocksConnectHandler should with username and password authentication when connect is successful fail SOCKS negotiation when not authenticated") {
val h = new SocksConnectHandlerHelper
import h._
val username = "u"
val password = "pass"
val ch = new SocksConnectHandler(proxyAddress, remoteAddress,
Seq(UsernamePassAuthenticationSetting(username, password)))
ch.handleDownstream(ctx, connectRequested)
ch.handleUpstream(ctx, new UpstreamChannelStateEvent(
channel, ChannelState.CONNECTED, remoteAddress))
assert(!connectFuture.isDone)
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
{
// on connect send init
sendBytesToServer(0x05, 0x01, 0x02)
}
{
// when init response is received send user name and pass
receiveBytesFromServer(ch, Array[Byte](0x05, 0x02))
sendBytesToServer(0x01, 0x01, 0x75, 0x04, 0x70, 0x61, 0x73, 0x73)
}
{
// when not authenticated response is received disconnect
receiveBytesFromServer(ch, Array[Byte](0x01, 0x01))
assert(connectFuture.isDone)
assert(connectFuture.getCause.isInstanceOf[ConnectionFailedException])
checkDidClose()
}
}
test("SocksConnectHandler should not add socket address resolve handler when proxy address is resolved") {
val pipeline = new DefaultChannelPipeline
SocksConnectHandler.addHandler(
new InetSocketAddress(InetAddress.getLoopbackAddress, 2222),
new InetSocketAddress(InetAddress.getLoopbackAddress, 80),
Seq(Unauthenticated),
pipeline
)
assert(pipeline.get("socketAddressResolver") == null)
}
test("SocksConnectHandler should add socket address resolve handler when proxy address is unresolved") {
val pipeline = new DefaultChannelPipeline
SocksConnectHandler.addHandler(
InetSocketAddress.createUnresolved("meow.meow", 2222),
new InetSocketAddress(InetAddress.getLoopbackAddress, 80),
Seq(Unauthenticated),
pipeline
)
assert(pipeline.get("socketAddressResolver").isInstanceOf[SocketAddressResolveHandler])
}
}
| sveinnfannar/finagle | finagle-core/src/test/scala/com/twitter/finagle/netty3/socks/SocksConnectHandlerTest.scala | Scala | apache-2.0 | 10,613 |
import scala.collection.mutable
object Test {
val map = mutable.Map.empty[String, String]
def main(args: Array[String]): Unit = while (true) {
val time = System.currentTimeMillis.toString
map += (time -> time)
}
}
| som-snytt/dotty | tests/vulpix-tests/unit/infiniteAlloc.scala | Scala | apache-2.0 | 229 |
package com.swara.examples.learn
import scala.util.Random
import com.swara.learn.common._
import com.swara.learn.genetic.recombinators.EdgeRecombinator
import com.swara.learn.genetic.selectors.RouletteWheelSelector
import com.swara.learn.genetic.{Evaluator, Mutator, Population}
/**
* The traveling salesman problem (TSP) asks the following questions: "Given a list of cities and
* the distances between each pair of cities, what is the shortest possible route that visits each
* city exactly once and returns to the origin city?" (Wikipedia). The TSP is NP-complete; however,
* we will attempt to use Genetic Algorithms to solve it.
*/
object TravelingSalesmanProblem {
def apply(distance: Array[Array[Double]]): Seq[Int] = {
// The fitness of a route is equal to the negative sum of the distances between cities, because
// the goal of the problem is to find shortest possible route that visits all cities.
object RouteEvaluator extends Evaluator[Seq[Int]] {
override def fitness(genome: Seq[Int]): Double =
(genome :+ genome.head).sliding(2).foldLeft(0.0)((t, c) => t - distance(c(0))(c(1)))
}
// Randomly swaps the positions of two cities in the route.
class RouteMutator(rate: Double) extends Mutator[Seq[Int]] {
override def mutate(genome: Seq[Int]): Seq[Int] = {
val i = Random.nextInt(genome.size)
val j = Random.nextInt(genome.size)
if (Random.nextDouble() < this.rate) genome.swap(i, j) else genome
}
}
// Evolve a population of 100 random permutations of the cities for 50 generations, and then
// return the member of the population with the maximum fitness (shortest distance).
var population = Population(Seq.fill(100)(distance.indices.shuffle))
(1 to 50).foreach { _ =>
population = population.evolve(
new RouletteWheelSelector,
new EdgeRecombinator,
new RouteMutator(0.1),
RouteEvaluator,
elitism = 0.05
)
}
population.members.maxBy(RouteEvaluator.fitness)
}
}
| ashwin153/swara | swara-examples/src/main/scala/com/swara/examples/learn/TravelingSalesmanProblem.scala | Scala | mit | 2,048 |
package controllers
import org.specs2.mutable._
import play.api.test._
import play.api.test.Helpers._
/**
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
class ApplicationIT extends Specification {
"Application" should {
"send 404 on a bad request" in {
running(FakeApplication()) {
route(FakeRequest(GET, "/boum")) must beNone
}
}
"render the index page" in {
running(FakeApplication()) {
val home = route(FakeRequest(GET, "/")).get
status(home) must equalTo(OK)
contentType(home) must beSome.which(_ == "text/html")
}
}
}
} | danihegglin/renga | web/test/controllers/ApplicationIT.scala | Scala | apache-2.0 | 680 |
/*
* Copyright (C) 2007 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.jiva.operators
import scala.util.Random
import net.kogics.jiva.Predef._
import net.kogics.jiva.population.{Population, Chromosome}
import net.kogics.jiva.util.{Shuffler, ShufflerImpl}
import net.kogics.jiva.util.collection.JList
abstract class AbstractCrossoverOp[A](opc: Option[Double], rgen: Random)
extends AbstractGeneticOp[A](opc, rgen) with CrossoverOp[A] {
var shuffler: Shuffler[Chromosome[A]] = new ShufflerImpl[Chromosome[A]]
protected [operators] def operate(pc: Double, pop: Population[A]): Population[A] = {
val selectedForCrossover = new JList[Chromosome[A]]
val candidates = new JList[Chromosome[A]]
pop.foreach { chr => if (rgen.nextDouble < pc) selectedForCrossover += chr else candidates += chr}
if (selectedForCrossover.size % 2 != 0) {
var add = true
if (candidates.size == 0) add = false else add = rgen.nextBoolean
if (add) {
moveRandomGene(candidates, selectedForCrossover)
}
else {
moveRandomGene(selectedForCrossover, candidates)
}
}
shuffler.shuffle(selectedForCrossover.underlying)
for (i <- Iterator.range(0, selectedForCrossover.size, 2)) {
val chr1 = selectedForCrossover(i)
val chr2 = selectedForCrossover(i+1)
val crossed = crossover(chr1, chr2)
candidates += crossed._1
candidates += crossed._2
}
return new Population(candidates)
}
def crossover(chr1: Chromosome[A], chr2: Chromosome[A]) : (Chromosome[A], Chromosome[A])
private def moveRandomGene(from: JList[Chromosome[A]], to: JList[Chromosome[A]]) : Unit = {
val idx = rgen.nextInt(from.size)
val chr = from.remove(idx)
to + chr
}
}
| milliondreams/jiva-ng | src/main/scala/net/kogics/jiva/operators/AbstractCrossoverOp.scala | Scala | gpl-3.0 | 2,291 |
package domain
import play.api.libs.json.Json
case class Handshake(room: String, user:String)
object Handshake {
implicit val reads = Json.reads[Handshake]
def parse(msg: String): Handshake = reads.reads(Json.parse(msg)).get //todo: add validation and/or try/catch
} | vadim-shb/play-chat | app/domain/Handshake.scala | Scala | apache-2.0 | 275 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.coders
import java.nio.ByteBuffer
import org.apache.avro.{Schema => ASchema}
import org.apache.avro.generic.{GenericData, GenericRecord}
import org.apache.beam.sdk.coders.{Coder => BCoder}
import org.apache.beam.sdk.util.CoderUtils
import scala.jdk.CollectionConverters._
private[scio] object AvroBytesUtil {
val schema: ASchema = {
val s = ASchema.createRecord("AvroBytesRecord", null, null, false)
s.setFields(
List(
new ASchema.Field(
"bytes",
ASchema.create(ASchema.Type.BYTES),
null,
null.asInstanceOf[Object]
)
).asJava
)
s
}
def encode[T](coder: BCoder[T], obj: T): GenericRecord = {
val bytes = CoderUtils.encodeToByteArray(coder, obj)
val record = new GenericData.Record(schema)
record.put("bytes", ByteBuffer.wrap(bytes))
record
}
def decode[T](coder: BCoder[T], record: GenericRecord): T = {
val bb = record.get("bytes").asInstanceOf[ByteBuffer]
val bytes =
java.util.Arrays.copyOfRange(bb.array(), bb.position(), bb.limit())
CoderUtils.decodeFromByteArray(coder, bytes)
}
}
| spotify/scio | scio-core/src/main/scala/com/spotify/scio/coders/AvroBytesUtil.scala | Scala | apache-2.0 | 1,747 |
object SCL10116 {
val policy =
s"""
|{
| "conditions": [
| ["starts-with", "$$Content-Type", "image/*"],
| ]
|}
""".<ref>stripMargin
} | ilinum/intellij-scala | testdata/resolve/failed/escapeSymbols/SCL10116.scala | Scala | apache-2.0 | 190 |
/*******************************************************************************
* Copyright (C) 2012 Łukasz Szpakowski.
*
* This library is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package org
/**
* @author Łukasz Szpakowski
*/
package object lkbgraph
{
val V = Vertex
implicit def tuple2ToDiEdge[V](x: (V, V)) =
new UnwDiEdge(x._1, x._2)
implicit def anyToUnwUndiEdgeAssoc[V](x: V) =
new UnwUndiEdgeAssoc(x)
implicit def wEdgeToWeighted[W, E[+Y, +Z] <: EdgeLike[Y, Z, E]](x: E[_, Weighted[W]]) =
x.toWeighted
}
| luckboy/LkbGraph | src/main/org/lkbgraph/package.scala | Scala | lgpl-3.0 | 1,243 |
package se.marcuslonnberg.stark
import java.io._
import java.security._
import java.security.cert.{Certificate, CertificateFactory}
import java.security.spec.PKCS8EncodedKeySpec
import javax.net.ssl.{KeyManagerFactory, SSLContext, TrustManagerFactory}
import org.apache.commons.ssl.PKCS8Key
import spray.io.{SSLContextProvider, ServerSSLEngineProvider}
trait SSLSupport {
val algorithm = "SunX509"
val protocol = "TLS"
val ciphers = List(
"SSL_RSA_WITH_RC4_128_MD5",
"SSL_RSA_WITH_RC4_128_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_128_CBC_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256",
"SSL_RSA_WITH_3DES_EDE_CBC_SHA",
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA")
val protocols = List("SSLv3", "SSLv2Hello", "TLSv1", "TLSv1.1", "TLSv1.2")
implicit def sslEngineProvider(implicit context: SSLContextProvider): ServerSSLEngineProvider = {
ServerSSLEngineProvider { engine =>
val enabledCiphers = ciphers.intersect(engine.getSupportedCipherSuites)
engine.setEnabledCipherSuites(enabledCiphers.toArray)
val enabledProtocols = protocols.intersect(engine.getSupportedProtocols)
engine.setEnabledProtocols(enabledProtocols.toArray)
engine
}
}
def createSSLContext(certificateFilename: String, privateKeyFilename: String): SSLContext = {
val keyStore = KeyStore.getInstance("JKS")
val entryAlias = "entry"
val cert = readCert(certificateFilename)
val privateKey = readPrivateKey(privateKeyFilename)
val emptyPassword = "".toCharArray
keyStore.load(null, emptyPassword)
keyStore.setEntry(entryAlias, new KeyStore.PrivateKeyEntry(privateKey, cert), new KeyStore.PasswordProtection(emptyPassword))
val keyManagerFactory = KeyManagerFactory.getInstance(algorithm)
keyManagerFactory.init(keyStore, emptyPassword)
val trustManagerFactory = TrustManagerFactory.getInstance(algorithm)
trustManagerFactory.init(keyStore)
val context = SSLContext.getInstance(protocol)
context.init(keyManagerFactory.getKeyManagers, trustManagerFactory.getTrustManagers, new SecureRandom)
context
}
def readCert(certificateFilename: String): Array[Certificate] = {
val certificateStream = new FileInputStream(certificateFilename)
val certificateFactory = CertificateFactory.getInstance("X.509")
val certs = certificateFactory.generateCertificates(certificateStream)
var chain = new Array[Certificate](certs.size())
chain = certs.toArray(chain)
certificateStream.close()
chain
}
def readPrivateKey(privateKeyFilename: String): PrivateKey = {
val fileStream = new FileInputStream(privateKeyFilename)
val key = new PKCS8Key(fileStream, Array.empty[Char]) // Empty password
val encodedKey = key.getDecryptedBytes
val rsaKeyFactory = KeyFactory.getInstance("RSA")
rsaKeyFactory.generatePrivate(new PKCS8EncodedKeySpec(encodedKey))
}
}
| FredrikWendt/stark | src/main/scala/se/marcuslonnberg/stark/SSLSupport.scala | Scala | mit | 2,996 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.batch.table
import org.apache.flink.table.api._
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.BatchTestBase
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.util.ExceptionUtils
import org.junit.Assert.{assertEquals, assertTrue, fail}
import org.junit.Test
import scala.collection.JavaConversions._
class TableSinkITCase extends BatchTestBase {
@Test
def testDecimalOnOutputFormatTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true',
| 'runtime-sink' = 'OutputFormat'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
execInsertTableAndWaitResult(table, "sink")
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testDecimalOnSinkFunctionTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
execInsertTableAndWaitResult(table, "sink")
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE,
| PRIMARY KEY (a) NOT ENFORCED
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
execInsertTableAndWaitResult(table, "testSink")
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithoutKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
execInsertTableAndWaitResult(table, "testSink")
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testNotNullEnforcer(): Unit = {
val dataId = TestValuesTableFactory.registerData(nullData4)
tEnv.executeSql(
s"""
|CREATE TABLE nullable_src (
| category STRING,
| shopId INT,
| num INT
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId',
| 'bounded' = 'true'
|)
|""".stripMargin)
tEnv.executeSql(
s"""
|CREATE TABLE not_null_sink (
| category STRING,
| shopId INT,
| num INT NOT NULL
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
// default should fail, because there are null values in the source
try {
execInsertSqlAndWaitResult("INSERT INTO not_null_sink SELECT * FROM nullable_src")
fail("Execution should fail.")
} catch {
case t: Throwable =>
val exception = ExceptionUtils.findThrowableWithMessage(
t,
"Column 'num' is NOT NULL, however, a null value is being written into it. " +
"You can set job configuration 'table.exec.sink.not-null-enforcer'='drop' " +
"to suppress this exception and drop such records silently.")
assertTrue(exception.isPresent)
}
// enable drop enforcer to make the query can run
tEnv.getConfig.getConfiguration.setString("table.exec.sink.not-null-enforcer", "drop")
execInsertSqlAndWaitResult("INSERT INTO not_null_sink SELECT * FROM nullable_src")
val result = TestValuesTableFactory.getResults("not_null_sink")
val expected = List("book,1,12", "book,4,11", "fruit,3,44")
assertEquals(expected.sorted, result.sorted)
}
}
| tzulitai/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/batch/table/TableSinkITCase.scala | Scala | apache-2.0 | 6,168 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.fetch
import io.gatling.http.HeaderNames
import org.asynchttpclient.Request
object UserAgent {
val IE = "MSIE"
private val MsIeUserAgentRegex = "MSIE ([0-9]+.[0-9]+)".r
def getAgent(request: Request): Option[UserAgent] =
Option(request.getHeaders.get(HeaderNames.UserAgent)).flatMap(parseFromHeader)
def parseFromHeader(userAgent: String): Option[UserAgent] =
MsIeUserAgentRegex.findFirstMatchIn(userAgent) match {
case Some(res) => Some(UserAgent(UserAgent.IE, res.group(1).toFloat))
case None => None
}
}
case class UserAgent(name: String, version: Float)
| timve/gatling | gatling-http/src/main/scala/io/gatling/http/fetch/UserAgent.scala | Scala | apache-2.0 | 1,244 |
package core.formatter.misc
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.databind.{ SerializerProvider, JsonSerializer }
import com.lvxingpai.model.misc.RichText
/**
* Created by pengyt on 2015/11/13.
*/
class RichTextSerializer extends JsonSerializer[RichText] {
override def serialize(richText: RichText, gen: JsonGenerator, serializers: SerializerProvider): Unit = {
gen.writeStartObject()
gen.writeStringField("title", Option(richText.title) getOrElse "")
gen.writeStringField("summary", Option(richText.summary) getOrElse "")
gen.writeStringField("body", Option(richText.body) getOrElse "")
gen.writeEndObject()
}
}
| Lvxingpai/Hanse | app/core/formatter/misc/RichTextSerializer.scala | Scala | apache-2.0 | 684 |
package com.github.fellowship_of_the_bus
package bruteb
package models
import scala.collection.mutable.Set
import lib.game.{IDMap, IDFactory, TopLeftCoordinates}
import lib.util.{TimerListener, TickTimer}
import rapture.json._
import rapture.json.jsonBackends.jackson._
sealed trait ProjectileID {
def imageList: List[Int]
def image = imageList(0)
}
case object ArrowProj extends ProjectileID {
override def imageList = List(R.drawable.arrow)
}
case object PoisonProj extends ProjectileID {
override def imageList = List(R.drawable.poison_gas)
}
case object FireProj extends ProjectileID {
override def imageList = List(R.drawable.fire)
}
case object LightningProj extends ProjectileID {
override def imageList = List(R.drawable.lightning4)
}
case class ProjAttr(
speed: Float)
object ProjIds {
val ids = Vector(ArrowProj, PoisonProj, FireProj, LightningProj)
}
abstract class BaseProjectile(val id: ProjectileID, val coord: Coordinate, val damage: Float) extends TopLeftCoordinates {
var active = true
def speed() : Float
def direction(): (Float, Float) // Direction that the projectile should move in
def isActive = active
def move() = {
val (dx, dy) = direction()
coord.x += dx * speed()
coord.y += dy * speed()
}
def deactivate() = {
active = false
}
override def x = coord.x
override def y = coord.y
override def width = 0.6f
override def height = 0.3f
def image = id.image
}
abstract class TimedProjectile(pid: ProjectileID, pcoord:Coordinate, val source: BaseTrap, val target: BaseBrute, val numFrames: Int) extends BaseProjectile(pid, pcoord, 0) with TimerListener {
//a projectile drawable that does not do damage but only gives something to draw
//damage is done instantly when the projectile is fired
//for instance a lightning bolt will be drawn for a couple frames but the damage is done when the tower fires
this += new TickTimer(numFrames, () => deactivate())
override def move() = {
tickOnce()
}
def tickOnce() = {
if (ticking()) {
tick(1)
} else {
cancelAll()
}
}
}
class ArrowProjectile(pid: ProjectileID, pcoord: Coordinate, pdamage: Float, val source: BaseTrap, val target: BaseBrute) extends BaseProjectile(pid, pcoord, pdamage) {
def direction(): (Float, Float) = {
val (dx, dy): (Float, Float) = (target.x - x, target.y - y)
val norm: Float = math.sqrt(dx*dx + dy*dy).toFloat
if (norm == 0) {
(0,0)
} else {
(dx/norm, dy/norm)
}
}
def speed() = 0.3f
override def move() = {
if (!target.isAlive) {
deactivate
}
val (dx, dy) = (target.coord.x-coord.x, target.coord.y - coord.y)
val norm = math.sqrt(dx*dx + dy*dy)
//check if we collide
if (norm >= speed) {
super.move()
} else {
target.hit(source, damage)
deactivate()
}
}
}
class PoisonProjectile(pid:ProjectileID, pcoord: Coordinate, psource: BaseTrap, ptarget:BaseBrute) extends TimedProjectile(pid, pcoord, psource, ptarget, 10) {
override def width = 1f
override def height = 3/4f
override def direction = (0,0)
override def speed = 0f
}
class FireProjectile(pid:ProjectileID, pcoord: Coordinate, psource: BaseTrap, ptarget:BaseBrute) extends TimedProjectile(pid, pcoord, psource, ptarget, 5) {
override def width = 1f
override def height = 3/4f
override def direction = (0,0)
override def speed = 0f
}
class LightningProjectile(pid:ProjectileID, pcoord: Coordinate, psource: BaseTrap, ptarget:BaseBrute) extends TimedProjectile(pid, pcoord, psource, ptarget, 4) {
override def width = {
val (dx, dy) = (target.coord.x-coord.x, target.coord.y - coord.y)
math.sqrt(dx*dx + dy*dy).toFloat
}
override def height = 3/8f
override def direction = {
val dx = target.x - source.x
val dy = target.y - source.y
val norm : Float = math.sqrt(dx*dx + dy*dy).toFloat
if (norm == 0) {
(0,0)
} else {
(dx/norm, dy/norm)
}
}
override def speed = 0f
}
| Fellowship-of-the-Bus/Brute-Blitzkrieg | src/main/scala/models/Projectile.scala | Scala | apache-2.0 | 3,997 |
package controllers
import javax.inject.Inject
import com.mohiva.play.silhouette.api.{ Environment, LogoutEvent, Silhouette }
import com.mohiva.play.silhouette.impl.authenticators.JWTAuthenticator
import models.User
import play.api.libs.json.Json
import scala.concurrent.Future
/**
* The basic application controller.
*
* @param env The Silhouette environment.
*/
class ApplicationController @Inject() (implicit val env: Environment[User, JWTAuthenticator])
extends Silhouette[User, JWTAuthenticator] {
/**
* Returns the user.
*
* @return The result to display.
*/
def user = SecuredAction.async { implicit request =>
Future.successful(Ok(Json.toJson(request.identity)))
}
/**
* Manages the sign out action.
*/
def signOut = SecuredAction.async { implicit request =>
env.eventBus.publish(LogoutEvent(request.identity, request, request2lang))
request.authenticator.discard(Future.successful(Ok))
}
/**
* Provides the desired template.
*
* @param template The template to provide.
* @return The template.
*/
def view(template: String) = UserAwareAction { implicit request =>
template match {
case "home" => Ok(views.html.home())
case "signUp" => Ok(views.html.signUp())
case "signIn" => Ok(views.html.signIn())
case "navigation" => Ok(views.html.navigation.render())
case _ => NotFound
}
}
}
| BareHackers/BareQuotes | app/controllers/ApplicationController.scala | Scala | apache-2.0 | 1,404 |
package lila.importer
import com.typesafe.config.Config
import lila.common.PimpedConfig._
final class Env(
config: Config,
scheduler: akka.actor.Scheduler,
roundMap: akka.actor.ActorRef) {
private val Delay = config duration "delay"
lazy val forms = new DataForm
lazy val importer = new Importer(roundMap, Delay, scheduler)
}
object Env {
lazy val current = "[boot] importer" describes new Env(
config = lila.common.PlayApp loadConfig "importer",
scheduler = lila.common.PlayApp.system.scheduler,
roundMap = lila.round.Env.current.roundMap)
}
| Happy0/lila | modules/importer/src/main/Env.scala | Scala | mit | 583 |
package cromwell.engine.backend
import java.nio.file._
import cromwell.engine.backend.io.filesystem.gcs.{ContentTypeOption, GcsFileSystem, GcsFileSystemProvider, NioGcsPath}
import scala.util.{Success, Try}
package object io {
val defaultGCSFileSystem = GcsFileSystem.defaultGcsFileSystem
val defaultFileSystem = FileSystems.getDefault
val defaultFileSystems = List(defaultGCSFileSystem, defaultFileSystem)
implicit class PathString(val str: String) extends AnyVal {
def isGcsUrl: Boolean = str.startsWith("gs://")
def isUriWithProtocol: Boolean = "^[a-z]+://".r.findFirstIn(str).nonEmpty
def toPath(fss: List[FileSystem]): Path = PathFactory.buildPath(str, fss)
def toPath(fs: FileSystem): Path = str.toPath(List(fs))
def toAbsolutePath(fss: List[FileSystem]): Path = str.toPath(fss).toAbsolutePath
def toAbsolutePath(fs: FileSystem): Path = str.toAbsolutePath(List(fs))
def toDirectory(fss: List[FileSystem]): Path = PathFactory.buildPathAsDirectory(str, fss)
def toDirectory(fs: FileSystem): Path = str.toDirectory(List(fs))
}
implicit class PathEnhanced(val path: Path) extends AnyVal {
import better.files._
def hash = path match {
case gcs: NioGcsPath => gcs.getFileSystem.provider().asInstanceOf[GcsFileSystemProvider].crc32cHash(gcs)
case _ => path.md5
}
def writeAsJson(content: String): File = {
Files.write(path, content.getBytes, ContentTypeOption.Json)
}
def asDirectory = path.toString.toDirectory(path.getFileSystem)
}
}
| cowmoo/cromwell | engine/src/main/scala/cromwell/engine/backend/io/package.scala | Scala | bsd-3-clause | 1,536 |
package dispatch.as
import dispatch._
import java.nio.charset.Charset
import org.asynchttpclient
object Response {
def apply[T](f: asynchttpclient.Response => T) = f
}
object String extends (asynchttpclient.Response => String) {
/** @return response body as a string decoded as either the charset provided by
* Content-Type header of the response or ISO-8859-1 */
def apply(r: asynchttpclient.Response) = r.getResponseBody
/** @return a function that will return response body decoded in the provided charset */
case class charset(set: Charset) extends (asynchttpclient.Response => String) {
def apply(r: asynchttpclient.Response) = r.getResponseBody(set)
}
/** @return a function that will return response body as a utf8 decoded string */
object utf8 extends charset(Charset.forName("utf8"))
}
object Bytes extends (asynchttpclient.Response => Array[Byte]) {
def apply(r: asynchttpclient.Response) = r.getResponseBodyAsBytes
}
object File extends {
def apply(file: java.io.File) =
(new asynchttpclient.handler.resumable.ResumableAsyncHandler with OkHandler[asynchttpclient.Response])
.setResumableListener(
new asynchttpclient.handler.resumable.ResumableRandomAccessFileListener(
new java.io.RandomAccessFile(file, "rw")
)
)
}
| maiflai/reboot | core/src/main/scala/as/core.scala | Scala | lgpl-3.0 | 1,306 |
package com.overviewdocs.background.filegroupcleanup
import scala.concurrent.Future
import com.overviewdocs.database.HasDatabase
import com.overviewdocs.models.tables.FileGroups
trait DeletedFileGroupFinder {
def indexIds: Future[Iterable[Long]]
}
/** Finds FileGroup IDs that we need to delete.
*
* This finds deleted FileGroups, but *not* FileGroups that have an
* `addToDocumentSetId` set. (We can't delete those without delving into the
* add-documents innards; the add-documents logic will delete them.)
*/
object DeletedFileGroupFinder extends DeletedFileGroupFinder with HasDatabase {
import database.api._
lazy val query = FileGroups.filter(_.deleted).filter(_.addToDocumentSetId.isEmpty).map(_.id)
override def indexIds: Future[Iterable[Long]] = database.seq(query)
}
| overview/overview-server | worker/src/main/scala/com/overviewdocs/background/filegroupcleanup/DeletedFileGroupFinder.scala | Scala | agpl-3.0 | 801 |
package ru.yandex.mysqlDiff
package vendor.mysql
import java.sql._
import ru.yandex.small.jdbc._
import jdbc._
import util._
import model._
import MetaDao._
import Implicits._
object MysqlMetaDao {
/** INFORMATION_SCHEMA.COLUMNS */
case class MysqlColumnInfo(
tableCatalog: String, tableSchema: String, tableName: String,
columnName: String, ordinalPosition: Long, columnDefault: String,
isNullable: Boolean, dataType: String,
characterMaximumLength: Long, characterOctetLength: Double,
numericPrecision: Long, numericScale: Long,
characterSetName: String, collationName: String,
columnType: String, /* skipped some columns */ columnComment: String
)
private def mapColumnsRow(rs: ResultSet) = {
import rs._
MysqlColumnInfo(
getString("table_catalog"), getString("table_schema"), getString("table_name"),
getString("column_name"), getLong("ordinal_position"), getString("column_default"),
getBoolean("is_nullable"), getString("data_type"),
getLong("character_maximum_length"), getDouble("character_octet_length"),
getLong("numeric_precision"), getLong("numeric_scale"),
getString("character_set_name"), getString("collation_name"),
getString("column_type"), getString("column_comment")
)
}
}
import MysqlMetaDao._
/**
* MySQL specific implementation of MetaDao. Uses INFORMATION_SCHEMA
*/
class MysqlMetaDao(jt: JdbcTemplate) extends MetaDao(jt) {
import MetaDao._
import jt._
// http://bugs.mysql.com/36699
private val PROPER_COLUMN_DEF_MIN_MYSQL_VERSION = MysqlServerVersion.parse("5.0.51")
// MySQL does not store default charset, collation only
// http://dev.mysql.com/doc/refman/5.1/en/tables-table.html
def mapTableOptions(rs: ResultSet) =
(rs.getString("TABLE_NAME"), Seq(
MysqlEngineTableOption(rs.getString("ENGINE")),
MysqlCollateTableOption(rs.getString("TABLE_COLLATION")),
// stupid MySQL developers print some left information in the TABLE_COMMENT column
MysqlCommentTableOption(rs.getString("TABLE_COMMENT").replaceFirst("(; |^)InnoDB free: .*", ""))
))
private val INFORMATION_SCHEMA_TABLES_FIELDS = Seq("TABLE_NAME", "ENGINE", "TABLE_COLLATION", "TABLE_COMMENT")
def findMysqlTablesOptions(schema: String): Seq[(String, Seq[TableOption])] = {
val q = "SELECT " + INFORMATION_SCHEMA_TABLES_FIELDS.mkString(", ") + " FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ?"
query(q, schema).seq(mapTableOptions _)
}
def findMysqlTableOptions(schema: String, tableName: String): Seq[TableOption] = {
val q = "SELECT " + INFORMATION_SCHEMA_TABLES_FIELDS.mkString(", ") + " FROM INFORMATION_SCHEMA.TABLES WHERE table_schema = ? AND table_name = ?"
query(q, schema, tableName).single(mapTableOptions _)._2
}
override def findTablesOptions(catlog: String, schema: String): Seq[(String, Seq[TableOption])] =
findMysqlTablesOptions(schema)
override def findTableOptions(catalog: String, schema: String, tableName: String): Seq[TableOption] =
findMysqlTableOptions(schema, tableName)
protected def groupBy[A, B](seq: Seq[A])(f: A => B): Seq[(B, Seq[A])] = {
import scala.collection.mutable._
val result = new HashMap[B, ListBuffer[A]]()
for (a <- seq) {
val list = result.getOrElseUpdate(f(a), new ListBuffer[A]())
list += a
}
result.toSeq
}
def findMysqlTablesColumns(catalog: String, schema: String): Seq[(String, Seq[MysqlColumnInfo])] = {
Validate.notNull(schema)
val q = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? ORDER BY table_name"
val columns = query(q, schema).seq(mapColumnsRow _)
groupBy[MysqlColumnInfo, String](columns)(_.tableName)
}
def findMysqlTableColumns(catalog: String, schema: String, tableName: String) = {
Validate.notNull(schema, "schema")
Validate.notNull(tableName, "tableName")
val q = "SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE table_schema = ? AND table_name = ?"
query(q, schema, tableName).seq(mapColumnsRow _) match {
case Seq() => throw new MysqlDiffException(
"no columns found in schema "+ schema +", table "+ tableName +"")
case l => l
}
}
protected override def mapIndexInfoRowToIndexColumn(row: IndexInfoRow) =
IndexColumn(row.columnName, row.ascOrDesc.getOrElse(true), row.vendorSpecific.asInstanceOf[Option[Int]])
protected override def findIndexInfoRows(catalog: String, schema: String, tableName: String) =
jt.query("SHOW INDEX FROM " + tableName).seq { rs =>
IndexInfoRow(rs.getString("KEY_NAME"), rs.getBoolean("NON_UNIQUE"),
rs.getInt("SEQ_IN_INDEX"), rs.getString("COLUMN_NAME"),
mapAscOrDesc(rs.getString("COLLATION")), rs.getIntOption("SUB_PART"))
}
protected override def mapPrimaryKeyInfoRowToPrimaryKeyColumn(row: PrimaryKeyInfoRow) = {
// XXX: primary key should not store indexing info, implicit (or explicit) index should
val (ascOrDesc, subPart) =
row.vendorSpecific.asInstanceOf[(Option[Boolean], Option[Int])]
IndexColumn(row.columnName, ascOrDesc.getOrElse(true), subPart)
}
protected override def findPrimaryKeyInfoRows(catalog: String, schema: String, tableName: String) =
(jt.query("SHOW INDEX FROM " + tableName).seq { rs =>
val keyName = rs.getString("KEY_NAME")
keyName match {
case "PRIMARY" =>
Some(PrimaryKeyInfoRow(keyName, rs.getString("COLUMN_NAME"), rs.getInt("SEQ_IN_INDEX"),
(mapAscOrDesc(rs.getString("COLLATION")), rs.getIntOption("SUB_PART"))))
case _ => None
}
}).flatMap((x: Option[PrimaryKeyInfoRow]) => x)
}
object MysqlMetaDaoTests extends DbMetaDaoTests(vendor.mysql.MysqlTestDataSourceParameters.ds) {
}
class MysqlJdbcModelExtractor(connectedContext: MysqlConnectedContext)
extends JdbcModelExtractor(connectedContext)
{
import connectedContext._
import context._
override def useParserToParseDefaultValue = false
override def urlDbIsCatalog = false
protected class MysqlAllTablesSchemaExtractor extends AllTablesSchemaExtractor
protected class MysqlSingleTableSchemaExtractor extends SingleTableSchemaExtractor
trait MysqlSchemaExtractor extends SchemaExtractor {
import jt._
override def extractTableColumns(tableName: String): Seq[ColumnModel] = metaData { data =>
// copy-paste of super plus hacks
val columns = data.getColumns(currentCatalog, currentSchema, tableName, "%")
val mysqlColumns = getMysqlColumns(tableName)
columns.read { columns =>
val base = parseTableColumn(columns)
val mysqlColumn = mysqlColumns.find(_.columnName == base.name).get
val defaultValueFromDb =
// http://bugs.mysql.com/36699
if (true) mysqlColumn.columnDefault
else columns.getString("COLUMN_DEF")
lazy val characterSet = Some(mysqlColumn.characterSetName)
.filter(x => x != null && x != "")
lazy val collate = Some(mysqlColumn.collationName)
.filter(x => x != null && x != "")
lazy val DataTypeWithLength(_, length) = base.dataType
val columnType = mysqlColumn.dataType.toUpperCase
val dataType =
if (columnType.toUpperCase.matches("(ENUM|SET)\\\\b.*")) {
MysqlParserCombinator.parseDataType(mysqlColumn.columnType)
} else if (MysqlDataTypes.characterDataTypeNames.contains(columnType)) {
val length = mysqlColumn.characterMaximumLength match {
case x if x <= 0 || x >= Int.MaxValue => None
case x => Some(x.toInt)
}
new MysqlCharacterDataType(columnType, length, characterSet, collate)
} else if (MysqlDataTypes.textDataTypeNames.contains(columnType)) {
new MysqlTextDataType(columnType, None, characterSet, collate)
} else if (MysqlDataTypes.numericDataTypeNames.contains(columnType)) {
// XXX: fetch unsigned, zerofill
MysqlParserCombinator.parseDataType(mysqlColumn.columnType)
} else {
base.dataType
}
val defaultValue = parseDefaultValueFromDb(defaultValueFromDb, dataType).map(DefaultValue(_))
val props = Seq[ColumnProperty]() ++ defaultValue ++ Some(MysqlComment(mysqlColumn.columnComment))
base.overrideProperties(props).copy(dataType=dataType)
}
}
def getMysqlColumns(tableName: String): Seq[MysqlColumnInfo]
}
protected override def newAllTablesSchemaExtractor() =
new AllTablesSchemaExtractor with MysqlSchemaExtractor {
val cachedMysqlColumns = new Lazy(metaDao.findMysqlTablesColumns(currentCatalog, currentSchema))
override def getMysqlColumns(tableName: String) =
cachedMysqlColumns.get.find(_._1 == tableName) match {
case Some(x) => x._2
case None => throw new MysqlDiffException("no definition for table: " + tableName)
}
}
protected override def newSingleTableSchemaExtractor() =
new SingleTableSchemaExtractor with MysqlSchemaExtractor {
override def getMysqlColumns(tableName: String) =
metaDao.findMysqlTableColumns(currentCatalog, currentSchema, tableName)
}
}
object MysqlJdbcModelExtractorTests
extends JdbcModelExtractorTests(MysqlTestDataSourceParameters.connectedContext)
{
import MysqlTestDataSourceParameters.connectedContext._
import MysqlTestDataSourceParameters.connectedContext.context._
import jt.execute
"Simple Table" in {
dropTable("bananas")
execute("CREATE TABLE bananas (id INT, color VARCHAR(100), PRIMARY KEY(id))")
val table = extractTable("bananas")
assert("bananas" == table.name)
assert("id" == table.columns(0).name)
assert("INT" == table.columns(0).dataType.name)
assert("color" == table.columns(1).name)
table.columns(1).dataType must beLike {
case MysqlCharacterDataType("VARCHAR", Some(100), _, _) => true
case _ => false
}
table.primaryKey.get.columnNames.toList must_== List("id")
}
"Indexes" in {
dropTable("users")
execute("CREATE TABLE users (first_name VARCHAR(20), last_name VARCHAR(20), age INT, INDEX age_k(age), UNIQUE KEY(first_name, last_name), KEY(age, last_name))")
val table = extractTable("users")
val ageK = table.indexes.find(_.name.get == "age_k").get
List("age") must_== ageK.columnNames.toList
val firstLastK = table.uniqueKeyWithColumns("first_name", "last_name")
val ageLastK = table.indexWithColumns("age", "last_name")
()
}
"INDEX column part" in {
ddlTemplate.recreateTable(
"CREATE TABLE index_c_p (c1 VARCHAR(100), c2 VARCHAR(50), INDEX ind (c1(10), c2)) ENGINE=InnoDB")
val table = extractTable("index_c_p")
table.indexes must haveSize(1)
val index = table.indexes.head
index.columns must haveSize(2)
index.columns(0).length must_== Some(10)
index.columns(1).length must_== None
()
}
"PRIMARY KEY INDEX column part" in {
ddlTemplate.recreateTable(
"CREATE TABLE pk_lp_extr (name VARCHAR(20), PRIMARY KEY (name(3)))")
val table = extractTable("pk_lp_extr")
table.primaryKey.get.columns.head must_== IndexColumn("name", true, Some(3))
}
"PK is not in indexes list" in {
dropTable("files")
execute("CREATE TABLE files (id INT, PRIMARY KEY(id))")
val table = extractTable("files")
table.primaryKey.get.columnNames.toList must_== List("id")
}
"Foreign keys" in {
dropTable("citizen")
dropTable("city")
dropTable("person")
execute("CREATE TABLE city (id INT PRIMARY KEY, name VARCHAR(10)) ENGINE=InnoDB")
execute("CREATE TABLE person(id1 INT, id2 INT, PRIMARY KEY(id1, id2)) ENGINE=InnoDB")
// http://community.livejournal.com/levin_matveev/20802.html
execute("CREATE TABLE citizen (id INT PRIMARY KEY, city_id INT, pid1 INT, pid2 INT, " +
"FOREIGN KEY (city_id) REFERENCES city(id), " +
"CONSTRAINT fk2c FOREIGN KEY fk2i(pid1, pid2) REFERENCES person(id1, id2)" +
") ENGINE=InnoDB")
val citizen = extractTable("citizen")
val city = extractTable("city")
val person = extractTable("person")
citizen.foreignKeys must haveSize(2)
citizen.indexes must haveSize(3)
val fkc = citizen.foreignKeys.find(_.localColumnNames.toList == List("city_id")).get
fkc.localColumnNames.toList must_== List("city_id")
fkc.externalColumns must beLike { case Seq("id") => true }
fkc.externalTable must_== "city"
val ic = citizen.indexes.find(_.columnNames.toList == List("city_id"))
val fkp = citizen.foreignKeys.find(_.localColumnNames.toList == List("pid1", "pid2")).get
fkp.localColumnNames.toList must_== List("pid1", "pid2")
fkp.externalColumns must beLike { case Seq("id1", "id2") => true }
fkp.externalTable must_== "person"
fkp.name must_== Some("fk2c")
val ip = citizen.indexes.find(_.columnNames.toList == List("pid1", "pid2")).get
// behavior is different in mysql 5.0 and 5.1
// http://bitbucket.org/stepancheg/mysql-diff/wiki/EvilMysql/ForeignKeyConstraint
Seq("fk2i", "fk2c") must contain(ip.name.get)
city.foreignKeys must haveSize(0)
person.foreignKeys must haveSize(0)
}
"FOREIGN KEY actions" in {
dropTable("ggg")
dropTable("rrr")
execute("CREATE TABLE rrr (id INT PRIMARY KEY) ENGINE=InnoDB")
for (updateDelete <- List(true, false)) {
for (action <- List(ImportedKeyNoAction, ImportedKeyCascade, ImportedKeySetNull)) {
dropTable("ggg")
val text =
(if (updateDelete) "ON UPDATE"
else "ON DELETE") +
" " +
(action match {
case ImportedKeyNoAction => "NO ACTION"
case ImportedKeyCascade => "CASCADE"
case ImportedKeySetNull => "SET NULL"
})
execute("CREATE TABLE ggg (r_id INT, FOREIGN KEY (r_id) REFERENCES rrr(id) " + text + ") ENGINE=InnoDB")
val table = extractTable("ggg")
val Seq(fk) = table.foreignKeys
val gotRule = if (updateDelete) fk.updateRule else fk.deleteRule
gotRule must_== Some(action)
}
}
}
"fetch table option ENGINE" in {
dropTable("dogs")
execute("CREATE TABLE dogs (id INT) ENGINE=InnoDB")
val table = extractTable("dogs")
table.options.properties must contain(MysqlEngineTableOption("InnoDB"))
}
"fetch table option COLLATE" in {
dropTable("cats")
execute("CREATE TABLE cats (id INT) COLLATE=cp1251_bin")
val table = extractTable("cats")
table.options.properties must contain(MysqlCollateTableOption("cp1251_bin"))
}
"fetch TABLE COMMENT MyISAM" in {
ddlTemplate.recreateTable("CREATE TABLE table_comment_fetch_myisam (id INT) COMMENT='stone' ENGINE=MyISAM")
val table = extractTable("table_comment_fetch_myisam")
table.options.properties must contain(MysqlCommentTableOption("stone"))
}
"fetch TABLE COMMENT InnoDB" in {
ddlTemplate.recreateTable("CREATE TABLE table_comment_fetch_innodb (id INT) COMMENT='stone' ENGINE=InnoDB")
val table = extractTable("table_comment_fetch_innodb")
table.options.properties must contain(MysqlCommentTableOption("stone"))
}
"fetch TABLE empty COMMENT InnoDB" in {
ddlTemplate.recreateTable("CREATE TABLE table_comment_fetch_innodb_empty (id INT) ENGINE=InnoDB")
val table = extractTable("table_comment_fetch_innodb_empty")
table.options.properties must contain(MysqlCommentTableOption(""))
}
"DEFAULT NOW()" in {
dropTable("cars")
execute("CREATE TABLE cars (id INT, created TIMESTAMP DEFAULT NOW())")
val table = extractTable("cars")
val created = table.column("created")
created.defaultValue must_== Some(NowValue)
}
"MySQL string DEFAULT values" in {
dropTable("jets")
execute("CREATE TABLE jets (a VARCHAR(2), b VARCHAR(2) DEFAULT '', c VARCHAR(2) DEFAULT 'x', " +
"d VARCHAR(2) NOT NULL, e VARCHAR(2) NOT NULL DEFAULT '', f VARCHAR(2) NOT NULL DEFAULT 'y')")
val table = extractTable("jets")
//table.column("a").defaultValue must_== None
table.column("b").defaultValue must_== Some(StringValue(""))
table.column("c").defaultValue must_== Some(StringValue("x"))
//table.column("d").defaultValue must_== None
table.column("e").defaultValue must_== Some(StringValue(""))
table.column("f").defaultValue must_== Some(StringValue("y"))
}
"various types" in {
dropTable("various_types")
execute("CREATE TABLE various_types (t TEXT, lt LONGTEXT, v VARCHAR(100))")
val table = extractTable("various_types")
()
}
"unspecified AUTO_INCREMENT" in {
dropTable("ships")
execute("CREATE TABLE ships (id INT NOT NULL, name VARCHAR(10), PRIMARY KEY(id))")
val t = extractTable("ships")
t.column("id").properties.find(MysqlAutoIncrementPropertyType) must_== Some(MysqlAutoIncrement(false))
//t.column("name").properties.autoIncrement must_== None
}
"COLUMN CHARACTER SET and COLLATE" in {
dropTable("qwqw")
execute("CREATE TABLE qwqw (a VARCHAR(2), b VARCHAR(2) CHARACTER SET utf8 COLLATE utf8_bin)")
val table = extractTable("qwqw")
val a = table.column("a")
val b = table.column("b")
b.dataType must beLike {
case MysqlCharacterDataType("VARCHAR", Some(2), Some("utf8"), Some("utf8_bin")) => true
case _ => false
}
}
"ENUM" in {
ddlTemplate.recreateTable(
"CREATE TABLE s_ev (season ENUM('winter', 'spring', 'summer', 'autumn')," +
" probability ENUM('yes', 'no', 'maybe', 'definitely') DEFAULT 'yes')")
val table = extractTable("s_ev")
table.column("season").dataType must_== new MysqlEnumDataType(Seq("winter", "spring", "summer", "autumn"))
table.column("season").defaultValue must_== Some(NullValue)
table.column("probability").defaultValue must_== Some(StringValue("yes"))
}
"BOOLEAN" in {
ddlTemplate.recreateTable(
"CREATE TABLE t_b (a BOOLEAN)")
val t = extractTable("t_b")
t.column("a").dataType must beLike {
case MysqlNumericDataType("TINYINT", Some(1), _, _, _) => true
case _ => false
}
}
"BIT DEFAULT b'0'" in {
ddlTemplate.recreateTable(
"CREATE TABLE bit_with_default_0 (aux BIT NOT NULL DEFAULT b'0') ENGINE=InnoDB CHARSET=latin1")
val t = extractTable("bit_with_default_0")
val c = t.column("aux")
c.defaultValue must_== Some(NumberValue(0))
}
"COLUMN COMMENT" in {
dropTable("beers")
execute("CREATE TABLE beers (name VARCHAR(100) COMMENT 'short name')")
val t = extractTable("beers")
val c = t.column("name")
c.properties.find(MysqlCommentPropertyType) must_== Some(MysqlComment("short name"))
}
}
// vim: set ts=4 sw=4 et:
| hkerem/mysql-diff | src/main/scala/ru/yandex/mysqlDiff/vendor/mysql/mysql-jdbc.scala | Scala | bsd-3-clause | 21,073 |
package com.databricks.spark.corenlp
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
import org.scalatest.{BeforeAndAfterAll, FunSuite}
trait SparkFunSuite extends FunSuite with BeforeAndAfterAll {
@transient var sc: SparkContext = _
@transient var sqlContext: SQLContext = _
override def beforeAll(): Unit = {
sc = SparkContext.getOrCreate(
new SparkConf()
.setMaster("local[2]")
.setAppName(this.getClass.getSimpleName)
)
sqlContext = SQLContext.getOrCreate(sc)
}
override def afterAll(): Unit = {
sc.stop()
sc = null
sqlContext = null
}
}
| mengxr/spark-corenlp | src/test/scala/com/databricks/spark/corenlp/SparkFunSuite.scala | Scala | gpl-3.0 | 645 |
package ua.nure.lab2
import org.specs2.mutable.Specification
/**
* @author Bohdan_Suprun
*/
class SavingsAccountTest extends Specification {
"SavingsAccount" should {
"not charge the customer 3times in a month" in {
val account = new SavingsAccount(100)
account deposit 10 mustEqual 110
account deposit 10 mustEqual 120
account deposit 10 mustEqual 130
account deposit 10 mustEqual 139
}
"increase monthly counter and add percents to the customer's account" in {
val account = new SavingsAccount(100)
account deposit 10 mustEqual 110
account deposit 10 mustEqual 120
account deposit 10 mustEqual 130
account deposit 10 mustEqual 139
account.earnMonthlyInterest()
account deposit 10 mustEqual 139 + 139 * 0.1 + 10
}
}
}
| isCompetent/spp | src/test/scala/ua/nure/lab2/SavingsAccountTest.scala | Scala | mit | 818 |
package step1
object Runner {
def main(arr: Array[String]): Unit = {
val t3 = NumberT3.fromInt(12)
val t2 = NumberT2.fromInt(12)
val t1 = NumberT1.fromInt(12)
val t4 = NumberT4.count(t3, t2, t1)
println(t4.length)
}
}
| djx314/ubw | a36-快乐智多星/src/main/scala/step1/Runner.scala | Scala | bsd-3-clause | 245 |
/*
* Copyright (c) 2014 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.graph.util
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.graphx._
import com.oculusinfo.tilegen.util.ArgumentParser
//import com.oculusinfo.tilegen.graph.util.ForceDirectedLayout
import com.oculusinfo.tilegen.spark.MavenReference
import com.oculusinfo.tilegen.spark.SparkConnector
object ClusteredGraphLayoutApp {
def main(args: Array[String]) {
val argParser = new ArgumentParser(args)
argParser.debug
val jars =
Seq(new MavenReference("com.oculusinfo", "tile-generation", "0.3-SNAPSHOT")
) union SparkConnector.getDefaultLibrariesFromMaven
val sc = argParser.getSparkConnector(jars).getSparkContext("Clustered Graph Layout")
val sourceDir = argParser.getString("source", "The source directory where to find clustered graph data")
val outputDir = argParser.getString("output", "The output location where to save data")
val partitions = argParser.getInt("parts", "The number of partitions into which to read the raw data", Some(0))
val consolidationPartitions = argParser.getInt("p", "The number of partitions for data processing. Default=based on input partitions", Some(0))
val dataDelimiter = argParser.getString("d", "Delimiter for the source graph data. Default is tab-delimited", Some("\\t"))
val maxIterations = argParser.getInt("i", "Max number of iterations for force-directed algorithm", Some(500))
val maxHierarchyLevel = argParser.getInt("maxLevel","Max cluster hierarchy level to use for determining graph layout", Some(0))
val borderPercent = argParser.getDouble("border","Percent of parent bounding box to leave as whitespace between neighbouring communities during initial layout. Default is 2 percent", Some(2.0))
val layoutLength = argParser.getDouble("layoutLength", "Desired width/height length of the total node layout region. Default = 256.0", Some(256.0))
val numNodesThres = argParser.getInt("nThres", "Community size threshold to use for grouping sub-communities together into one force-directed layout task", Some(1000))
val nodeAreaPercent = argParser.getInt("nArea", "Used for Hierarchical Force-directed layout ONLY. Sets the area of all node 'circles' within the boundingBox vs whitespace. Default is 30 percent", Some(30))
val bUseEdgeWeights = argParser.getBoolean("eWeight", "Use edge weights, if present, to scale force-directed attraction forces. Default is false", Some(false))
val gravity = argParser.getDouble("g", "Amount of gravitational force to use for Force-Directed layout to prevent outer nodes from spreading out too far. Default = 0 (no gravity)", Some(0.0))
val fileStartTime = System.currentTimeMillis()
// Hierarchical Force-Directed layout scheme
val layouter = new HierarchicFDLayout()
layouter.determineLayout(sc,
maxIterations,
maxHierarchyLevel,
partitions,
consolidationPartitions,
sourceDir,
dataDelimiter,
(layoutLength,layoutLength),
borderPercent,
nodeAreaPercent,
bUseEdgeWeights,
gravity,
outputDir)
val fileEndTime = System.currentTimeMillis()
println("Finished hierarchic graph layout job in "+((fileEndTime-fileStartTime)/60000.0)+" minutes")
println("DONE!!")
}
} | aashish24/aperture-tiles | tile-generation/src/main/scala/com/oculusinfo/tilegen/graph/util/ClusteredGraphLayoutApp.scala | Scala | mit | 4,544 |
package scala.lms
package common
import java.io.PrintWriter
import internal._
import scala.reflect.SourceContext
trait UncheckedOps extends Base {
def unchecked[T:Typ](s: Any*): Rep[T]
def uncheckedPure[T:Typ](s: Any*): Rep[T]
implicit class richQuote(c: StringContext) {
class QuoteOps(args: Thunk[Rep[Any]]*) {
def as[T:Typ]: Rep[T] = {
//reflect(c.s(args map (a => reify(a.eval())):_*))
def merge(a: List[Any], b: List[Any]): List[Any] = a match {
case Nil => Nil
case x::xs => x::merge(b,a)
}
unchecked[T](merge(c.parts.toList, args.toList.map(_.eval())):_*)
}
}
def raw(args: Thunk[Rep[Any]]*) = new QuoteOps(args:_*)
}
// args: =>Code* is not allowed so we make thunks explicit
case class Thunk[+A](eval: () => A)
implicit def toThunk[A](x: =>A) = new Thunk(() => x)
}
trait UncheckedOpsExp extends EffectExp {
// TODO: use reifyEffects
case class Unchecked[T](s: List[Any]) extends Def[T]
def unchecked[T:Typ](s: Any*): Rep[T] = reflectEffect[T](Unchecked(s.toList))
def uncheckedPure[T:Typ](s: Any*): Rep[T] = toAtom[T](Unchecked(s.toList))
override def mirror[A:Typ](e: Def[A], f: Transformer)(implicit pos: SourceContext): Exp[A] = (e match {
//case Reflect(ThrowException(s), u, es) => reflectMirrored(Reflect(ThrowException(f(s)), mapOver(f,u), f(es)))(mtype(manifest[A]))
// TODO mirror Unchecked and Reflect(Unchecked)
case _ => super.mirror(e,f)
}).asInstanceOf[Exp[A]]
}
trait ScalaGenUncheckedOps extends ScalaGenBase {
val IR: UncheckedOpsExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case Unchecked(xs) =>
emitValDef(sym, xs map ((x:Any)=> x match { case x: Exp[_] => quote(x) case x => x.toString }) mkString "")
case _ => super.emitNode(sym, rhs)
}
}
trait CGenUncheckedOps extends CGenBase {
val IR: UncheckedOpsExp
import IR._
override def emitNode(sym: Sym[Any], rhs: Def[Any]) = rhs match {
case Unchecked(xs) =>
emitValDef(sym, xs map ((x:Any)=> x match { case x: Exp[_] => quote(x) case x => x.toString }) mkString "")
case _ => super.emitNode(sym, rhs)
}
}
| astojanov/virtualization-lms-core | src/common/Unchecked.scala | Scala | bsd-3-clause | 2,203 |
import java.sql.DriverManager
/**
* Created by yww08 on 2019-01-07.
*/
object GZM {
def main(args: Array[String]): Unit = {
Class.forName("com.microsoft.sqlserver.jdbc.SQLServerDriver")
val connStr = "jdbc:sqlserver://10.8.30.32:1433;databaseName=DW_iSecureCloud_Empty_20181118"
val dbConn = DriverManager.getConnection(connStr, "sa", "123")
val stmt = dbConn.createStatement()
val rs = stmt.executeQuery("select * from T_DIM_SENSOR")
while (rs != null && rs.next()) {
val sid = rs.getInt("SENSOR_ID")
println(sid)
}
dbConn.close()
}
} | yinweiwen/study | demo/datamover/src/main/scala/GZM.scala | Scala | mit | 569 |
package org.dama.datasynth.common.generators.structure
import org.apache.hadoop.conf.Configuration
import scala.util.Try
/**
* Created by aprat on 20/04/17.
*/
object StructureGenerator {
def getInstance( name : String ) : Try[StructureGenerator] = {
Try(Class.forName(name).newInstance().asInstanceOf[StructureGenerator])
}
}
abstract class StructureGenerator {
def run( num : Long, hdfsConf : Configuration, path : String )
}
| DAMA-UPC/DataSynth | src/main/scala/org/dama/datasynth/common/generators/structure/StructureGenerator.scala | Scala | gpl-3.0 | 447 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import java.util.concurrent.TimeUnit
import _root_.scala.concurrent.duration.Duration
import com.mongodb.{ MongoCredential => JMongoCredential }
import org.bson.BsonDocumentWrapper
import org.bson.codecs.DocumentCodec
import org.mongodb.scala
import org.mongodb.scala.MongoClient.DEFAULT_CODEC_REGISTRY
import org.mongodb.scala.bson._
import org.mongodb.scala.model._
class ScalaPackageSpec extends BaseSpec {
it should "be able to create Observable, Observers and Subscriptions" in {
var success = false
val observerable = new Observable[Int] {
override def subscribe(observer: Observer[_ >: Int]): Unit = {
val subscription = new Subscription {
override def isUnsubscribed: Boolean = false
override def request(l: Long): Unit = observer.onComplete()
override def unsubscribe(): Unit = {}
}
observer.onSubscribe(subscription)
}
}
val observer = new Observer[Int] {
override def onError(throwable: Throwable): Unit = {}
override def onSubscribe(subscription: Subscription): Unit = subscription.request(1)
override def onComplete(): Unit = success = true
override def onNext(tResult: Int): Unit = {}
}
observerable.subscribe(observer)
success shouldBe true
}
it should "be able to create MongoClientSettings" in {
val settings = scala.MongoClientSettings.builder().readPreference(ReadPreference.primary()).build()
settings shouldBe a[com.mongodb.MongoClientSettings]
}
it should "be able to create Documents" in {
val doc = Document("a" -> BsonString("1"))
val doc2 = collection.Document("a" -> BsonString("1"))
doc shouldBe a[collection.immutable.Document]
doc should equal(doc2)
}
it should "be able to create BulkWriteOptions" in {
val options = BulkWriteOptions()
options shouldBe a[com.mongodb.client.model.BulkWriteOptions]
}
it should "be able to create MongoNamespace" in {
val namespace = MongoNamespace("db.coll")
namespace shouldBe a[com.mongodb.MongoNamespace]
val namespace2 = MongoNamespace("db", "coll")
namespace2 shouldBe a[com.mongodb.MongoNamespace]
}
it should "be able to create WriteConcern" in {
WriteConcern.ACKNOWLEDGED should equal(com.mongodb.WriteConcern.ACKNOWLEDGED)
WriteConcern.W1 should equal(new com.mongodb.WriteConcern(1))
WriteConcern.W2 should equal(new com.mongodb.WriteConcern(2))
WriteConcern.W3 should equal(new com.mongodb.WriteConcern(3))
WriteConcern.UNACKNOWLEDGED should equal(com.mongodb.WriteConcern.UNACKNOWLEDGED)
WriteConcern.JOURNALED should equal(com.mongodb.WriteConcern.JOURNALED)
WriteConcern.MAJORITY should equal(com.mongodb.WriteConcern.MAJORITY)
WriteConcern(1) should equal(new com.mongodb.WriteConcern(1))
WriteConcern("Majority") should equal(new com.mongodb.WriteConcern("Majority"))
WriteConcern(1).withJournal(true) should equal(new com.mongodb.WriteConcern(1).withJournal(true))
WriteConcern("Majority").withWTimeout(Duration(10, TimeUnit.MILLISECONDS)) should equal(
new com.mongodb.WriteConcern("Majority").withWTimeout(10, TimeUnit.MILLISECONDS)
)
WriteConcern(1).withWTimeout(Duration(10, TimeUnit.MILLISECONDS)) should equal(
new com.mongodb.WriteConcern(1).withWTimeout(10, TimeUnit.MILLISECONDS)
)
}
it should "create MongoCredential" in {
val scalaCredential = MongoCredential.createCredential("userName", "database", "password".toCharArray)
val javaCredential = JMongoCredential.createCredential("userName", "database", "password".toCharArray)
scalaCredential should equal(javaCredential)
val scalaCredential1 = MongoCredential.createScramSha1Credential("userName", "database", "password".toCharArray)
val javaCredential1 = JMongoCredential.createScramSha1Credential("userName", "database", "password".toCharArray)
scalaCredential1 should equal(javaCredential1)
val scalaCredential2 = MongoCredential.createMongoX509Credential("userName")
val javaCredential2 = JMongoCredential.createMongoX509Credential("userName")
scalaCredential2 should equal(javaCredential2)
val scalaCredential3 = MongoCredential.createMongoX509Credential()
val javaCredential3 = JMongoCredential.createMongoX509Credential()
scalaCredential3 should equal(javaCredential3)
val scalaCredential4 = MongoCredential.createPlainCredential("userName", "database", "password".toCharArray)
val javaCredential4 = JMongoCredential.createPlainCredential("userName", "database", "password".toCharArray)
scalaCredential4 should equal(javaCredential4)
val scalaCredential5 = MongoCredential.createGSSAPICredential("userName")
val javaCredential5 = JMongoCredential.createGSSAPICredential("userName")
scalaCredential5 should equal(javaCredential5)
}
it should "implicitly convert to org.bson.document with type fidelity" in {
val bsonDocument = Document(
"null" -> BsonNull(),
"int32" -> BsonInt32(32),
"int64" -> BsonInt64(Long.MaxValue),
"decimal128" -> BsonDecimal128(128.1),
"boolean" -> BsonBoolean(true),
"date" -> BsonDateTime(123456789),
"double" -> BsonDouble(1.1),
"string" -> BsonString("String"),
"minKey" -> BsonMinKey(),
"maxKey" -> BsonMaxKey(),
"javaScript" -> BsonJavaScript("function () {}"),
"objectId" -> BsonObjectId(),
"codeWithScope" -> BsonJavaScriptWithScope("function () {}", Document()),
"regex" -> BsonRegularExpression("/(.*)/"),
"symbol" -> BsonSymbol(Symbol("sym")),
"timestamp" -> BsonTimestamp(),
"undefined" -> BsonUndefined(),
"binary" -> BsonBinary(Array[Byte](128.toByte)),
"array" -> BsonArray(List("a", "b", "c")),
"document" -> Document("a" -> 1, "b" -> List(1, 2, 3))
)
val document: org.bson.Document = bsonDocument
BsonDocumentWrapper.asBsonDocument(document, DEFAULT_CODEC_REGISTRY) should equal(bsonDocument.underlying)
}
}
| jyemin/mongo-java-driver | driver-scala/src/test/scala/org/mongodb/scala/ScalaPackageSpec.scala | Scala | apache-2.0 | 6,644 |
package lila.mod
import lila.db.api._
import lila.db.Implicits._
import tube.modlogTube
import play.api.libs.json.Json
final class ModlogApi {
def streamConfig(mod: String) = add {
Modlog(mod, none, Modlog.streamConfig)
}
def engine(mod: String, user: String, v: Boolean) = add {
Modlog(mod, user.some, v.fold(Modlog.engine, Modlog.unengine))
}
def booster(mod: String, user: String, v: Boolean) = add {
Modlog(mod, user.some, v.fold(Modlog.booster, Modlog.unbooster))
}
def troll(mod: String, user: String, v: Boolean) = add {
Modlog(mod, user.some, v.fold(Modlog.troll, Modlog.untroll))
}
def ban(mod: String, user: String, v: Boolean) = add {
Modlog(mod, user.some, v.fold(Modlog.ipban, Modlog.ipunban))
}
def closeAccount(mod: String, user: String) = add {
Modlog(mod, user.some, Modlog.closeAccount)
}
def reopenAccount(mod: String, user: String) = add {
Modlog(mod, user.some, Modlog.reopenAccount)
}
def setTitle(mod: String, user: String, title: Option[String]) = add {
val name = title flatMap lila.user.User.titlesMap.get
Modlog(mod, user.some, name.isDefined.fold(Modlog.setTitle, Modlog.removeTitle), details = name)
}
def setEmail(mod: String, user: String) = add {
Modlog(mod, user.some, Modlog.setEmail)
}
def ipban(mod: String, ip: String) = add {
Modlog(mod, none, Modlog.ipban, ip.some)
}
def deletePost(mod: String, user: Option[String], author: Option[String], ip: Option[String], text: String) = add {
Modlog(mod, user, Modlog.deletePost, details = Some(
author.??(_ + " ") + ip.??(_ + " ") + text.take(140)
))
}
def toggleCloseTopic(mod: String, categ: String, topic: String, closed: Boolean) = add {
Modlog(mod, none, closed ? Modlog.closeTopic | Modlog.openTopic, details = Some(
categ + " / " + topic
))
}
def toggleHideTopic(mod: String, categ: String, topic: String, hidden: Boolean) = add {
Modlog(mod, none, hidden ? Modlog.hideTopic | Modlog.showTopic, details = Some(
categ + " / " + topic
))
}
def deleteQaQuestion(mod: String, user: String, title: String) = add {
Modlog(mod, user.some, Modlog.deleteQaQuestion, details = Some(title take 140))
}
def deleteQaAnswer(mod: String, user: String, text: String) = add {
Modlog(mod, user.some, Modlog.deleteQaAnswer, details = Some(text take 140))
}
def deleteQaComment(mod: String, user: String, text: String) = add {
Modlog(mod, user.some, Modlog.deleteQaComment, details = Some(text take 140))
}
def recent = $find($query($select.all) sort $sort.naturalDesc, 100)
def wasUnengined(userId: String) = $count.exists(Json.obj(
"user" -> userId,
"action" -> Modlog.unengine
))
def wasUnbooster(userId: String) = $count.exists(Json.obj(
"user" -> userId,
"action" -> Modlog.unbooster
))
private def add(m: Modlog): Funit = {
play.api.Logger("ModApi").info(m.toString)
$insert(m)
}
}
| TangentialAlan/lila | modules/mod/src/main/ModlogApi.scala | Scala | mit | 2,978 |
package jp.co.bizreach.elasticsearch4s
import JsonUtils._
sealed trait BulkAction {
def jsonString: String
}
object BulkAction {
case class Index(config: ESConfig, doc: AnyRef) extends BulkAction {
def jsonString: String = {
s"""{ "index" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("_doc")}" } }
|${singleLine(serialize(doc))}""".stripMargin
}
}
case class Create(config: ESConfig, doc: AnyRef, id: String) extends BulkAction {
def jsonString: String = {
s"""{ "create" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("_doc")}", "_id": "${id}"} }
|${singleLine(serialize(doc))}""".stripMargin
}
}
case class Update(config: ESConfig, doc: AnyRef, id: String) extends BulkAction {
def jsonString: String = {
s"""{ "update" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("_doc")}", "_id": "${id}"} }
|{ "doc": ${singleLine(serialize(doc))} }""".stripMargin
}
}
case class Script(config: ESConfig, script: String, id: String) extends BulkAction {
def jsonString: String = {
s"""{ "update" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("_doc")}", "_id": "${id}"} }
|{ "script": ${script} }""".stripMargin
}
}
case class Delete(config: ESConfig, id: String) extends BulkAction {
def jsonString: String = {
s"""{ "delete" : { "_index" : "${config.indexName}", "_type" : "${config.typeName.getOrElse("_doc")}", "_id": "${id}"} }"""
}
}
private def singleLine(str: String): String = str.replace("\\n", "").replace("\\r", "")
}
| bizreach/elastic-scala-httpclient | elastic-scala-httpclient/src/main/scala/jp/co/bizreach/elasticsearch4s/BulkAction.scala | Scala | apache-2.0 | 1,680 |
package com.twitter.zipkin.storage.redis
import com.google.common.io.Closer
import com.twitter.finagle.redis.Client
import com.twitter.util.{Duration, Future}
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.storage._
import java.nio.ByteBuffer
/**
* @param client the redis client to use
* @param ttl expires keys older than this many seconds.
*/
class RedisSpanStore(client: Client, ttl: Option[Duration]) extends SpanStore {
private[this] val closer = Closer.create();
private[this] val index = closer.register(new RedisIndex(client, ttl))
private[this] val storage = closer.register(new RedisStorage(client, ttl))
private[this] def call[T](f: => T): Future[T] = synchronized { Future(f) }
/** For testing, clear this store. */
private[redis] def clear(): Future[Unit] = client.flushDB()
override def close() = closer.close()
def apply(newSpans: Seq[Span]): Future[Unit] = Future.collect(newSpans.flatMap {
span =>
Seq(storage.storeSpan(span),
index.indexServiceName(span),
index.indexSpanNameByService(span),
index.indexTraceIdByServiceAndName(span),
index.indexSpanByAnnotations(span),
index.indexSpanDuration(span))
}).unit
// Used for pinning
def setTimeToLive(traceId: Long, ttl: Duration): Future[Unit] = {
storage.setTimeToLive(traceId, ttl)
}
def getTimeToLive(traceId: Long): Future[Duration] = {
storage.getTimeToLive(traceId)
}
override def getDataTimeToLive = Future.value(ttl.map(_.inSeconds).getOrElse(Int.MaxValue))
def tracesExist(traceIds: Seq[Long]): Future[Set[Long]] = {
storage.tracesExist(traceIds)
}
def getSpansByTraceIds(traceIds: Seq[Long]): Future[Seq[Seq[Span]]] = {
storage.getSpansByTraceIds(traceIds)
}
def getSpansByTraceId(traceId: Long): Future[Seq[Span]] = {
storage.getSpansByTraceId(traceId)
}
def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
index.getTraceIdsByName(serviceName, spanName, endTs, limit)
}
def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
index.getTraceIdsByAnnotation(serviceName, annotation, value, endTs, limit)
}
def getTracesDuration(traceIds: Seq[Long]): Future[Seq[TraceIdDuration]] = index.getTracesDuration(traceIds)
def getAllServiceNames: Future[Set[String]] = {
index.getServiceNames
}
def getSpanNames(serviceName: String): Future[Set[String]] = index.getSpanNames(serviceName)
}
| hydro2k/zipkin | zipkin-redis/src/main/scala/com/twitter/zipkin/storage/redis/RedisSpanStore.scala | Scala | apache-2.0 | 2,651 |
package es.weso.slang
import es.weso.rdf.nodes.IRI
import es.weso.shex.{
BNodeKind => ShExBNodeKind,
BNodeLabel => ShExBNodeLabel,
IRIKind => ShExIRIKind,
IRILabel => ShExIRILabel,
IntMax => ShExIntMax,
Max => ShExMax,
Star => ShExStar, _}
import es.weso.slang
// import es.weso.utils.EitherUtils._
import cats.effect.IO
import cats.data._
import cats.implicits._
trait ShEx2SLang {
def shex2SLang(schema: Schema): IO[SchemaS] = {
val e = for {
keyValues <- schema.shapesMap.toList.map(cnvlabelShape(schema)).sequence
} yield SchemaS(keyValues.toMap)
e.value.flatMap(e => e match {
case Left(s) => IO.raiseError(new RuntimeException(s))
case Right(schema) => IO.pure(schema)
})
}
private def cnvlabelShape(schema: Schema)(pair: (ShapeLabel, ShapeExpr)): EitherT[IO, String, (Label,SLang)] = {
val (label,se) = pair
for {
lbl <- cnvLabel(label)
s <- cnvShapeExpr(se,schema)
} yield (lbl,s)
}
private def cnvLabel(lbl: ShapeLabel): EitherT[IO,String, Label] = lbl match {
case ShExBNodeLabel(bnode) => ok(slang.BNodeLabel(bnode))
case ShExIRILabel(iri) => ok(slang.IRILabel(iri))
case Start => err(s"Unimplemented conversion of Start to SLang")
}
private def cnvShapeExpr(se: ShapeExpr, schema: Schema): EitherT[IO,String, SLang] = se match {
case ShapeAnd(_,ses, _, _) => for {
ss <- ses.map(se => cnvShapeExpr(se,schema)).sequence
} yield ss.foldRight(SLang.strue)(And)
case ShapeOr(_,ses,_,_) => for {
ss <- ses.map(se => cnvShapeExpr(se,schema)).sequence
} yield ss.foldRight(SLang.sfalse)(SLang.or)
case nk: NodeConstraint => for {
s <- cnvNodeConstraint(nk)
} yield s
case ShapeNot(_,s,_,_) => for {
sn <- cnvShapeExpr(s,schema)
} yield Not(sn)
case ShapeRef(ref,_,_) => for {
lbl <- cnvLabel(ref)
} yield Ref(lbl)
case s: Shape => cnvShape(s,schema)
case _ => err(s"shex2slang: Unimplemented $se")
}
private def cnvNodeConstraint(nc: NodeConstraint): EitherT[IO,String,SLang] = for {
nks <- nc.nodeKind.map(cnvNodeKind(_)).sequence
datatypes <- nc.datatype.map(cnvDatatype(_)).sequence
// TODO convert the rest: xsfacets, values...
r <- {
val maybeS : Option[SLang] = (nks ++ datatypes).reduceOption(SLang.and)
maybeS match {
case None => err(s"cnvNodeConstraint($nc): No values in constraint")
case Some(s) => ok(s)
}
}
} yield r
private def cnvNodeKind(nk: NodeKind): EitherT[IO,String,SLang] = nk match {
case ShExIRIKind => ok(slang.IRIKind)
case ShExBNodeKind => ok(slang.BNodeKind)
case _ => err(s"shex2slang (cnvNodeKind): Unimplemented $nk")
}
private def ok[A](x:A): EitherT[IO,String,A] = EitherT.fromEither(x.asRight)
private def err[A](msg: String): EitherT[IO,String,A] = EitherT.fromEither[IO](msg.asLeft[A])
private def cnvDatatype(dt: IRI): EitherT[IO,String,SLang] =
ok(Datatype(dt))
// TODO: Handle Closed, Extras, etc....
private def cnvShape(s: Shape, schema: Schema): EitherT[IO,String,SLang] = s.expression match {
case None => EitherT.fromEither(SLang.strue.asRight)
case Some(expr) => cnvTripleExpr(expr,schema)
}
private def cnvTripleExpr(te: TripleExpr, schema: Schema): EitherT[IO,String,SLang] = te match {
case eo: EachOf => for {
es <- eo.expressions.map(cnvTripleExpr(_,schema)).sequence
preds = eo.predicates(schema)
} yield And(
es.foldRight(SLang.strue)(And),
Not(QualifiedArc(NoPreds(preds.toSet), SLang.strue, Card.oneStar))
)
case tc: TripleConstraint => for {
s <- tc.valueExpr.map(cnvShapeExpr(_,schema)) match {
case None => EitherT.fromEither[IO](SLang.strue.asRight[String])
case Some(r) => r
}
card = cnvCard(tc.min,tc.max)
} yield
And(
QualifiedArc(Pred(tc.predicate),s,card),
Not(QualifiedArc(Pred(tc.predicate), Not(s), Card.oneStar))
)
case _ => EitherT.fromEither(s"shex2slang (cnvTripleExpr): Unimplemented $te".asLeft)
}
private def cnvCard(min: Int, max: ShExMax): Card =
Card(min,max match {
case ShExIntMax(n) => IntMax(n)
case ShExStar => Star
})
} | labra/shaclex | modules/slang/src/main/scala/es/weso/slang/ShEx2SLang.scala | Scala | mit | 4,344 |
package com.eevolution.context.dictionary.infrastructure.service
import java.util.UUID
import akka.NotUsed
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.TabTrl
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.api.{Service, ServiceCall}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Tab Trl Service
*/
trait TabTrlService extends Service with api.service.TabTrlService {
override def getAll() : ServiceCall[NotUsed, List[TabTrl]]
override def getById(id: Int): ServiceCall[NotUsed, TabTrl]
override def getByUUID(uuid :UUID): ServiceCall[NotUsed, TabTrl]
override def getAllByPage(pageNo: Option[Int], pageSize: Option[Int]): ServiceCall[NotUsed, PaginatedSequence[TabTrl]]
def descriptor = {
import Service._
named("tabTrl").withCalls(
pathCall("/api/v1_0_0/tabTrl/all", getAll _) ,
pathCall("/api/v1_0_0/tabTrl/:id", getById _),
pathCall("/api/v1_0_0/tabTrl/:uuid", getByUUID _) ,
pathCall("/api/v1_0_0/tabTrl?pageNo&pageSize", getAllByPage _)
)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/TabTrlService.scala | Scala | gpl-3.0 | 1,978 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This file was taken from Apache Spark org/apache/spark/sql/kafka010/KafkaOffsetReader.scala
*
* There are some modifications:
* 1. Parameters and API were adapted to RocketMQ
*/
package org.apache.spark.sql.rocketmq
import java.{util => ju}
import org.apache.rocketmq.client.consumer.MQPullConsumer
import org.apache.rocketmq.common.message.MessageQueue
import org.apache.spark.internal.Logging
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
/**
* This class uses RocketMQ's own [[MQPullConsumer]] API to read data offsets from RocketMQ.
*/
private class RocketMQOffsetReader(
driverRocketMQParams: ju.Map[String, String],
readerOptions: Map[String, String],
driverGroupIdPrefix: String) extends Logging {
val topic: String = driverRocketMQParams.get(RocketMQConf.CONSUMER_TOPIC)
/**
* Place [[groupId]] and [[nextId]] here so that they are initialized before any consumer is
* created -- see SPARK-19564.
*/
private var groupId: String = _
private var nextId = 0
/**
* A RocketMQConsumer used in the driver to query the latest RocketMQ offsets. This only queries the
* offsets and never commits them.
*/
protected var consumer: MQPullConsumer = createConsumer()
private val maxOffsetFetchAttempts =
readerOptions.getOrElse("fetchOffset.numRetries", "3").toInt
private val offsetFetchAttemptIntervalMs =
readerOptions.getOrElse("fetchOffset.retryIntervalMs", "1000").toLong
private def nextGroupId(): String = {
groupId = driverGroupIdPrefix + "-" + nextId
nextId += 1
groupId
}
/**
* Closes the connection to RocketMQ, and cleans up state.
*/
def close(): Unit = {
consumer.shutdown()
}
/**
* @return The Set of MessageQueue for a given topic
*/
def fetchTopicPartitions(): Set[MessageQueue] = {
val partitions = consumer.fetchSubscribeMessageQueues(topic)
partitions.asScala.toSet
}
/**
* Resolves the specific offsets based on RocketMQ seek positions.
* This method resolves offset value -1 to the latest and -2 to the
* earliest RocketMQ seek position.
*
* @param partitionOffsets the specific offsets to resolve
* @param reportDataLoss callback to either report or log data loss depending on setting
*/
def fetchSpecificOffsets(
partitionOffsets: Map[MessageQueue, Long],
reportDataLoss: String => Unit): RocketMQSourceOffset = {
val fetched = {
withRetries {
val partitions = consumer.fetchSubscribeMessageQueues(topic)
assert(partitions.asScala == partitionOffsets.keySet,
"If startingOffsets contains specific offsets, you must specify all TopicPartitions.\n" +
"Use -1 for latest, -2 for earliest, if you don't care.\n" +
s"Specified: ${partitionOffsets.keySet} Assigned: ${partitions.asScala}")
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to $partitionOffsets")
partitionOffsets.foreach {
case (mq, RocketMQOffsetRangeLimit.LATEST) =>
consumer.updateConsumeOffset(mq, consumer.maxOffset(mq))
case (mq, RocketMQOffsetRangeLimit.EARLIEST) =>
consumer.updateConsumeOffset(mq, consumer.minOffset(mq))
case (mq, offset) => consumer.updateConsumeOffset(mq, offset)
}
partitionOffsets.map {
case (mq, _) => mq -> consumer.fetchConsumeOffset(mq, false)
}
}
}
partitionOffsets.foreach {
case (tp, off) if off != RocketMQOffsetRangeLimit.LATEST &&
off != RocketMQOffsetRangeLimit.EARLIEST =>
if (fetched(tp) != off) {
reportDataLoss(
s"startingOffsets for $tp was $off but consumer reset to ${fetched(tp)}")
}
case _ =>
// no real way to check that beginning or end is reasonable
}
RocketMQSourceOffset(fetched)
}
/**
* Fetch the earliest offsets for the topic partitions
*/
def fetchEarliestOffsets(): Map[MessageQueue, Long] = {
withRetries {
val partitions = consumer.fetchSubscribeMessageQueues(topic)
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to the beginning")
val partitionOffsets = partitions.asScala.map(p => p -> consumer.minOffset(p)).toMap
logDebug(s"Got earliest offsets for partition : $partitionOffsets")
partitionOffsets
}
}
/**
* Fetch the latest offsets for the topic partitions
*/
def fetchLatestOffsets(): Map[MessageQueue, Long] = {
withRetries {
val partitions = consumer.fetchSubscribeMessageQueues(topic)
logDebug(s"Partitions assigned to consumer: $partitions. Seeking to the end.")
val partitionOffsets = partitions.asScala.map(p => p -> consumer.maxOffset(p)).toMap
logDebug(s"Got latest offsets for partition : $partitionOffsets")
partitionOffsets
}
}
/**
* Fetch the earliest offsets for specific topic partitions.
* The return result may not contain some partitions if they are deleted.
*/
def fetchEarliestOffsets(
newPartitions: Seq[MessageQueue]): Map[MessageQueue, Long] = {
if (newPartitions.isEmpty) {
Map.empty[MessageQueue, Long]
} else {
withRetries {
val partitions = consumer.fetchSubscribeMessageQueues(topic)
logDebug(s"\tPartitions assigned to consumer: $partitions")
// Get the earliest offset of each partition
val partitionOffsets = newPartitions.filter { p =>
// When deleting topics happen at the same time, some partitions may not be in
// `partitions`. So we need to ignore them
partitions.contains(p)
}.map(p => p -> consumer.minOffset(p)).toMap
logDebug(s"Got earliest offsets for new partitions: $partitionOffsets")
partitionOffsets
}
}
}
/**
* Helper function that does multiple retries on a body of code that returns offsets.
* Retries are needed to handle transient failures. For e.g. race conditions between getting
* assignment and getting position while topics/partitions are deleted can cause NPEs.
*/
private def withRetries(
body: => Map[MessageQueue, Long]): Map[MessageQueue, Long] = synchronized {
var result: Option[Map[MessageQueue, Long]] = None
var attempt = 1
var lastException: Throwable = null
while (result.isEmpty && attempt <= maxOffsetFetchAttempts) {
try {
result = Some(body)
} catch {
case NonFatal(e) =>
lastException = e
logWarning(s"Error in attempt $attempt getting RocketMQ offsets: ", e)
attempt += 1
Thread.sleep(offsetFetchAttemptIntervalMs)
resetConsumer()
}
}
if (result.isEmpty) {
assert(attempt > maxOffsetFetchAttempts)
assert(lastException != null)
throw lastException
}
result.get
}
/**
* Create a consumer using the new generated group id. We always use a new consumer to avoid
* just using a broken consumer to retry on RocketMQ errors, which likely will fail again.
*/
private def createConsumer(): MQPullConsumer = synchronized {
val newRocketMQParams = new ju.HashMap[String, String](driverRocketMQParams)
val groupId = nextGroupId()
RocketMQUtils.makePullConsumer(groupId, newRocketMQParams)
}
private def resetConsumer(): Unit = synchronized {
consumer.shutdown()
consumer = createConsumer()
}
}
| StyleTang/incubator-rocketmq-externals | rocketmq-spark/src/main/scala/org/apache/spark/sql/rocketmq/RocketMQOffsetReader.scala | Scala | apache-2.0 | 8,240 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.createTable
import java.io.{File, IOException}
import java.sql.Timestamp
import java.util
import org.apache.avro
import org.apache.commons.io.FileUtils
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.junit.Assert
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.datatype.DataTypes
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.sdk.file._
import scala.collection.JavaConverters._
class TestNonTransactionalCarbonTableJsonWriter extends QueryTest with BeforeAndAfterAll {
var writerPath = new File(this.getClass.getResource("/").getPath
+ "../."
+ "./target/SparkCarbonFileFormat/WriterOutput/").getCanonicalPath
//getCanonicalPath gives path with \\, but the code expects /.
writerPath = writerPath.replace("\\\\", "/")
var backupdateFormat = CarbonProperties.getInstance().getProperty(
CarbonCommonConstants.CARBON_DATE_FORMAT, CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
var backupTimeFormat = CarbonProperties.getInstance().getProperty(
CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
override def beforeAll(): Unit = {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
sql("DROP TABLE IF EXISTS sdkOutputTable")
}
override def afterAll(): Unit = {
sql("DROP TABLE IF EXISTS sdkOutputTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
backupTimeFormat)
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT,
backupdateFormat)
}
/**
* Utility function to read a whole file as a string,
* Must not use this if the file is very huge. As it may result in memory exhaustion.
*
* @param filePath
* @return
*/
def readFromFile(filePath: String): String = {
val file = new File(filePath)
val uri = file.toURI
try {
val bytes = java.nio.file.Files.readAllBytes(java.nio.file.Paths.get(uri))
new String(bytes, "UTF-8")
} catch {
case e: IOException =>
e.printStackTrace()
return "ERROR loading file " + filePath
}
}
private def writeCarbonFileFromJsonRowInput(jsonRow: String,
carbonSchema: Schema) = {
try {
var options: util.Map[String, String] = Map("bAd_RECords_action" -> "FAIL", "quotechar" -> "\\"").asJava
val writer = CarbonWriter.builder
.outputPath(writerPath).isTransactionalTable(false)
.uniqueIdentifier(System.currentTimeMillis())
.withLoadOptions(options)
.buildWriterForJsonInput(carbonSchema)
writer.write(jsonRow)
writer.close()
}
catch {
case e: Exception => {
e.printStackTrace()
Assert.fail(e.getMessage)
}
}
}
// test all primitive type
test("Read sdk writer Json output of all primitive type") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath: String = null
dataPath = resourcesPath + "/jsonFiles/data/allPrimitiveType.json"
val fields = new Array[Field](9)
fields(0) = new Field("stringField", DataTypes.STRING)
fields(1) = new Field("intField", DataTypes.INT)
fields(2) = new Field("shortField", DataTypes.SHORT)
fields(3) = new Field("longField", DataTypes.LONG)
fields(4) = new Field("doubleField", DataTypes.DOUBLE)
fields(5) = new Field("boolField", DataTypes.BOOLEAN)
fields(6) = new Field("dateField", DataTypes.DATE)
fields(7) = new Field("timeField", DataTypes.TIMESTAMP)
fields(8) = new Field("decimalField", DataTypes.createDecimalType(8, 2))
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
checkAnswer(sql("select * from sdkOutputTable"),
Seq(Row("ajantha\\"bhat\\"",
26,
26,
1234567,
23.3333,
false,
java.sql.Date.valueOf("2019-03-02"),
Timestamp.valueOf("2019-02-12 03:03:34"),
55.35)))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test all primitive type with bad record
test("Read sdk writer Json output of all primitive type with Bad record") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath: String = null
dataPath = resourcesPath + "/jsonFiles/data/allPrimitiveTypeBadRecord.json"
val fields = new Array[Field](9)
fields(0) = new Field("stringField", DataTypes.STRING)
fields(1) = new Field("intField", DataTypes.INT)
fields(2) = new Field("shortField", DataTypes.SHORT)
fields(3) = new Field("longField", DataTypes.LONG)
fields(4) = new Field("doubleField", DataTypes.DOUBLE)
fields(5) = new Field("boolField", DataTypes.BOOLEAN)
fields(6) = new Field("dateField", DataTypes.DATE)
fields(7) = new Field("timeField", DataTypes.TIMESTAMP)
fields(8) = new Field("decimalField", DataTypes.createDecimalType(8, 2))
val jsonRow = readFromFile(dataPath)
var exception = intercept[java.lang.AssertionError] {
writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
}
assert(exception.getMessage()
.contains("Data load failed due to bad record"))
FileUtils.deleteDirectory(new File(writerPath))
}
// test array Of array Of array Of Struct
test("Read sdk writer Json output of array Of array Of array Of Struct") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath = resourcesPath + "/jsonFiles/data/arrayOfarrayOfarrayOfStruct.json"
// for testing purpose get carbonSchema from avro schema.
// Carbon schema will be passed without AVRO in the real scenarios
var schemaPath = resourcesPath + "/jsonFiles/schema/arrayOfarrayOfarrayOfStruct.avsc"
val avroSchema = new avro.Schema.Parser().parse(readFromFile(schemaPath))
val carbonSchema = AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, carbonSchema)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
sql("select * from sdkOutputTable").show(false)
/*
* +-------+---+-----------------------------------------+
|name |age|BuildNum |
+-------+---+-----------------------------------------+
|ajantha|26 |[WrappedArray(WrappedArray([abc,city1]))]|
+-------+---+-----------------------------------------+
*
*/
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test array Of Struct Of Struct
test("Read sdk writer Json output of array Of Struct Of Struct") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath = resourcesPath + "/jsonFiles/data/arrayOfStructOfStruct.json"
// for testing purpose get carbonSchema from avro schema.
// Carbon schema will be passed without AVRO in the real scenarios
var schemaPath = resourcesPath + "/jsonFiles/schema/arrayOfStructOfStruct.avsc"
val avroSchema = new avro.Schema.Parser().parse(readFromFile(schemaPath))
val carbonSchema = AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, carbonSchema)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
sql("select * from sdkOutputTable").show(false)
/*
* +----+---+-------------------+
* |name|age|doorNum |
* +----+---+-------------------+
* |bob |10 |[[abc,city1,[a,1]]]|
* +----+---+-------------------+
* */
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test struct of all types
test("Read sdk writer Json output of Struct of all types") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath = resourcesPath + "/jsonFiles/data/StructOfAllTypes.json"
// for testing purpose get carbonSchema from avro schema.
// Carbon schema will be passed without AVRO in the real scenarios
var schemaPath = resourcesPath + "/jsonFiles/schema/StructOfAllTypes.avsc"
val avroSchema = new avro.Schema.Parser().parse(readFromFile(schemaPath))
val carbonSchema = AvroCarbonWriter.getCarbonSchemaFromAvroSchema(avroSchema)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, carbonSchema)
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
assert(sql("select * from sdkOutputTable").collectAsList().toString.equals(
"[[[bob,10,12345678,123400.78,true,WrappedArray(1, 2, 3, 4, 5, 6),WrappedArray(abc, def)," +
"WrappedArray(1234567, 2345678),WrappedArray(1.0, 2.0, 33.33),WrappedArray(true, false, " +
"false, true)]]]"))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test : One element as null
test("Read sdk writer Json output of primitive type with one element as null") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath: String = null
dataPath = resourcesPath + "/jsonFiles/data/PrimitiveTypeWithNull.json"
val fields = new Array[Field](2)
fields(0) = new Field("stringField", DataTypes.STRING)
fields(1) = new Field("intField", DataTypes.INT)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
checkAnswer(sql("select * from sdkOutputTable"),
Seq(Row(null,
26)))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test : Schema length is greater than array length
test("Read Json output of primitive type with Schema length is greater than array length") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath: String = null
dataPath = resourcesPath + "/jsonFiles/data/PrimitiveTypeWithNull.json"
val fields = new Array[Field](5)
fields(0) = new Field("stringField", DataTypes.STRING)
fields(1) = new Field("intField", DataTypes.INT)
fields(2) = new Field("shortField", DataTypes.SHORT)
fields(3) = new Field("longField", DataTypes.LONG)
fields(4) = new Field("doubleField", DataTypes.DOUBLE)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
checkAnswer(sql("select * from sdkOutputTable"),
Seq(Row(null, 26, null, null, null)))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
// test : Schema length is lesser than array length
test("Read Json output of primitive type with Schema length is lesser than array length") {
FileUtils.deleteDirectory(new File(writerPath))
var dataPath: String = null
dataPath = resourcesPath + "/jsonFiles/data/allPrimitiveType.json"
val fields = new Array[Field](2)
fields(0) = new Field("stringField", DataTypes.STRING)
fields(1) = new Field("intField", DataTypes.INT)
val jsonRow = readFromFile(dataPath)
writeCarbonFileFromJsonRowInput(jsonRow, new Schema(fields))
assert(new File(writerPath).exists())
sql("DROP TABLE IF EXISTS sdkOutputTable")
sql(
s"""CREATE EXTERNAL TABLE sdkOutputTable STORED BY 'carbondata' LOCATION
|'$writerPath' """.stripMargin)
checkAnswer(sql("select * from sdkOutputTable"),
Seq(Row("ajantha\\"bhat\\"", 26)))
sql("DROP TABLE sdkOutputTable")
// drop table should not delete the files
assert(new File(writerPath).listFiles().length > 0)
FileUtils.deleteDirectory(new File(writerPath))
}
}
| jatin9896/incubator-carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/createTable/TestNonTransactionalCarbonTableJsonWriter.scala | Scala | apache-2.0 | 14,562 |
import scala.quoted.*
object scalatest {
inline def assert(condition: => Boolean): Unit = ${ assertImpl('condition, '{""}) }
def assertImpl(cond: Expr[Boolean], clue: Expr[Any])(using Quotes): Expr[Unit] = {
import quotes.reflect.*
import util.*
def isImplicitMethodType(tp: TypeRepr): Boolean = tp match
case tp: MethodType => tp.isImplicit
case _ => false
cond.asTerm.underlyingArgument match {
case t @ Apply(sel @ Select(lhs, op), rhs :: Nil) =>
ValDef.let(Symbol.spliceOwner, lhs) { left =>
ValDef.let(Symbol.spliceOwner, rhs) { right =>
val app = left.select(sel.symbol).appliedTo(right)
ValDef.let(Symbol.spliceOwner, app) { result =>
val l = left.asExpr
val r = right.asExpr
val b = result.asExprOf[Boolean]
val code = '{ scala.Predef.assert($b) }
code.asTerm
}
}
}.asExprOf[Unit]
case Apply(f @ Apply(sel @ Select(Apply(qual, lhs :: Nil), op), rhs :: Nil), implicits)
if isImplicitMethodType(f.tpe) =>
ValDef.let(Symbol.spliceOwner, lhs) { left =>
ValDef.let(Symbol.spliceOwner, rhs) { right =>
val app = qual.appliedTo(left).select(sel.symbol).appliedTo(right)
ValDef.let(Symbol.spliceOwner, Apply(app, implicits)) { result =>
val l = left.asExpr
val r = right.asExpr
val b = result.asExprOf[Boolean]
val code = '{ scala.Predef.assert($b) }
code.asTerm
}
}
}.asExprOf[Unit]
}
}
}
| dotty-staging/dotty | tests/run-macros/reflect-dsl/assert_1.scala | Scala | apache-2.0 | 1,636 |
/*
* Copyright (C) 2016 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.persistence.cassandra.snapshot
import scala.concurrent.duration._
import akka.persistence.cassandra.testkit.CassandraLauncher
import java.lang.{ Long => JLong }
import java.lang.{ Integer => JInteger }
import java.nio.ByteBuffer
import akka.persistence._
import akka.persistence.SnapshotProtocol._
import akka.persistence.cassandra.{ CassandraMetricsRegistry, CassandraLifecycle }
import akka.persistence.snapshot.SnapshotStoreSpec
import akka.testkit.TestProbe
import com.datastax.driver.core._
import com.typesafe.config.ConfigFactory
import scala.concurrent.Await
object CassandraSnapshotStoreConfiguration {
lazy val config = ConfigFactory.parseString(
s"""
|cassandra-journal.keyspace=CassandraSnapshotStoreSpec
|cassandra-snapshot-store.keyspace=CassandraSnapshotStoreSpecSnapshot
|cassandra-snapshot-store.max-metadata-result-size = 2
""".stripMargin
).withFallback(CassandraLifecycle.config)
lazy val protocolV3Config = ConfigFactory.parseString(
s"""
cassandra-journal.protocol-version = 3
cassandra-journal.keyspace=CassandraSnapshotStoreProtocolV3Spec
cassandra-snapshot-store.keyspace=CassandraSnapshotStoreProtocolV3Spec
"""
).withFallback(config)
}
class CassandraSnapshotStoreSpec extends SnapshotStoreSpec(CassandraSnapshotStoreConfiguration.config) with CassandraLifecycle {
val storeConfig = new CassandraSnapshotStoreConfig(system, system.settings.config.getConfig("cassandra-snapshot-store"))
val storeStatements = new CassandraStatements { def config = storeConfig }
var session: Session = _
import storeConfig._
import storeStatements._
override def systemName: String = "CassandraSnapshotStoreSpec"
override def beforeAll(): Unit = {
super.beforeAll()
import system.dispatcher
session = Await.result(storeConfig.sessionProvider.connect(), 5.seconds)
}
override def afterAll(): Unit = {
session.close()
session.getCluster.close()
super.afterAll()
}
// ByteArraySerializer
val serId: JInteger = 4
"A Cassandra snapshot store" must {
"insert Cassandra metrics to Cassandra Metrics Registry" in {
val registry = CassandraMetricsRegistry(system).getRegistry
val snapshots = registry.getNames.toArray()
snapshots.length should be > 0
}
"make up to 3 snapshot loading attempts" in {
val probe = TestProbe()
// load most recent snapshot
snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), probe.ref)
// get most recent snapshot
val expected = probe.expectMsgPF() { case LoadSnapshotResult(Some(snapshot), _) => snapshot }
// write two more snapshots that cannot be de-serialized.
session.execute(writeSnapshot, pid, 17L: JLong, 123L: JLong, serId, "", ByteBuffer.wrap("fail-1".getBytes("UTF-8")), null)
session.execute(writeSnapshot, pid, 18L: JLong, 124L: JLong, serId, "", ByteBuffer.wrap("fail-2".getBytes("UTF-8")), null)
// load most recent snapshot, first two attempts will fail ...
snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), probe.ref)
// third attempt succeeds
probe.expectMsg(LoadSnapshotResult(Some(expected), Long.MaxValue))
}
"give up after 3 snapshot loading attempts" in {
val probe = TestProbe()
// load most recent snapshot
snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), probe.ref)
// wait for most recent snapshot
probe.expectMsgPF() { case LoadSnapshotResult(Some(snapshot), _) => snapshot }
// write three more snapshots that cannot be de-serialized.
session.execute(writeSnapshot, pid, 17L: JLong, 123L: JLong, serId, "", ByteBuffer.wrap("fail-1".getBytes("UTF-8")), null)
session.execute(writeSnapshot, pid, 18L: JLong, 124L: JLong, serId, "", ByteBuffer.wrap("fail-2".getBytes("UTF-8")), null)
session.execute(writeSnapshot, pid, 19L: JLong, 125L: JLong, serId, "", ByteBuffer.wrap("fail-3".getBytes("UTF-8")), null)
// load most recent snapshot, first three attempts will fail ...
snapshotStore.tell(LoadSnapshot(pid, SnapshotSelectionCriteria.Latest, Long.MaxValue), probe.ref)
// no 4th attempt has been made
probe.expectMsgType[LoadSnapshotFailed]
}
}
}
/**
* Cassandra 2.2.0 or later should support protocol version V4, but as long as we
* support 2.1.6+ we do some compatibility testing with V3.
*/
class CassandraSnapshotStoreProtocolV3Spec extends SnapshotStoreSpec(CassandraSnapshotStoreConfiguration.protocolV3Config)
with CassandraLifecycle {
override def systemName: String = "CassandraSnapshotStoreProtocolV3Spec"
}
| kpbochenek/akka-persistence-cassandra | src/test/scala/akka/persistence/cassandra/snapshot/CassandraSnapshotStoreSpec.scala | Scala | apache-2.0 | 4,802 |
package shield.routing
import org.specs2.mutable.Specification
import shield.config.{HttpServiceLocation, ServiceLocation}
import spray.http.Uri
import scala.util.Try
class UpstreamServiceLocationSpec extends Specification {
"UpstreamServiceLocation" should {
"accept valid https urls" in {
val svc = HttpServiceLocation(Uri("https://example.edu"))
svc.baseUrl.scheme must be equalTo "https"
svc.baseUrl.authority.host.address must be equalTo "example.edu"
}
"accept valid http urls" in {
val svc = HttpServiceLocation(Uri("http://example.edu"))
svc.baseUrl.scheme must be equalTo "http"
svc.baseUrl.authority.host.address must be equalTo "example.edu"
}
"use the correct port" in {
val default_http = HttpServiceLocation(Uri("http://example.edu"))
default_http.baseUrl.authority.port must be equalTo 0
val custom_http = HttpServiceLocation(Uri("http://example.edu:5001"))
custom_http.baseUrl.authority.port must be equalTo 5001
val default_https = HttpServiceLocation(Uri("https://example.edu"))
default_https.baseUrl.authority.port must be equalTo 0
val custom_https = HttpServiceLocation(Uri("https://example.edu:8443"))
custom_https.baseUrl.authority.port must be equalTo 8443
}
"reject unrecognized schemes" in {
val ftp = Try { HttpServiceLocation(Uri("ftp://example.edu")) }
ftp.isSuccess must be equalTo false
val mailto = Try { HttpServiceLocation(Uri("mailto://example.edu")) }
mailto.isSuccess must be equalTo false
}
"ignore case in urls" in {
val svc = HttpServiceLocation(Uri("HTTPs://EXamPLE.edu"))
svc.baseUrl.authority.host.address must be equalTo "example.edu"
svc.baseUrl.scheme must be equalTo "https"
}
"reject urls with a path" in {
val empty = Try { HttpServiceLocation(Uri("http://example.edu/")) }
empty.isSuccess must be equalTo false
val nonempty = Try { HttpServiceLocation(Uri("http://example.edu/foobar")) }
nonempty.isSuccess must be equalTo false
}
"reject urls with a fragment" in {
val empty = Try { HttpServiceLocation(Uri("http://example.edu#")) }
empty.isSuccess must be equalTo false
val nonempty = Try { HttpServiceLocation(Uri("http://example.edu#foobar")) }
nonempty.isSuccess must be equalTo false
}
"reject urls with a query" in {
val empty = Try { HttpServiceLocation(Uri("http://example.edu?")) }
empty.isSuccess must be equalTo false
val nonempty = Try { HttpServiceLocation(Uri("http://example.edu?foo=bar")) }
nonempty.isSuccess must be equalTo false
}
"reject urls with a auth information" in {
val empty = Try { HttpServiceLocation(Uri("http://:@example.edu")) }
empty.isSuccess must be equalTo false
val nonempty = Try { HttpServiceLocation(Uri("http://foo:[email protected]")) }
nonempty.isSuccess must be equalTo false
}
"reject relative urls" in {
val relative = Try { HttpServiceLocation(Uri("/foobar")) }
relative.isSuccess must be equalTo false
}
"reject empty urls" in {
val relative = Try { HttpServiceLocation(Uri("")) }
relative.isSuccess must be equalTo false
}
}
}
| RetailMeNot/shield | src/test/scala/shield/routing/UpstreamServiceLocationSpec.scala | Scala | mit | 3,285 |
class Ref[T]
abstract class Outer {type T}
trait D {
val x: Ref[(_ <: Outer with Singleton)#T]
val y: Ref[x_type # T] forSome {type x_type <: Outer with Singleton} = x
} | ilinum/intellij-scala | testdata/resolve/testAllResolve/complex/Existential.scala | Scala | apache-2.0 | 173 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.message
import java.io.{InputStream, ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.ByteBuffer
import java.util.Random
import junit.framework.Assert._
import org.junit.Test
import org.scalatest.junit.JUnitSuite
class MessageWriterTest extends JUnitSuite {
private val rnd = new Random()
private def mkRandomArray(size: Int): Array[Byte] = {
(0 until size).map(_ => rnd.nextInt(10).toByte).toArray
}
private def mkMessageWithWriter(key: Array[Byte] = null, bytes: Array[Byte], codec: CompressionCodec): Message = {
val writer = new MessageWriter(100)
writer.write(key = key, codec = codec) { output =>
val out = if (codec == NoCompressionCodec) output else CompressionFactory(codec, output)
try {
val p = rnd.nextInt(bytes.length)
out.write(bytes, 0, p)
out.write(bytes, p, bytes.length - p)
} finally {
out.close()
}
}
val bb = ByteBuffer.allocate(writer.size)
writer.writeTo(bb)
bb.rewind()
new Message(bb)
}
private def compress(bytes: Array[Byte], codec: CompressionCodec): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val out = CompressionFactory(codec, baos)
out.write(bytes)
out.close()
baos.toByteArray
}
private def decompress(compressed: Array[Byte], codec: CompressionCodec): Array[Byte] = {
toArray(CompressionFactory(codec, new ByteArrayInputStream(compressed)))
}
private def toArray(in: InputStream): Array[Byte] = {
val out = new ByteArrayOutputStream()
val buf = new Array[Byte](100)
var amount = in.read(buf)
while (amount >= 0) {
out.write(buf, 0, amount)
amount = in.read(buf)
}
out.toByteArray
}
private def toArray(bb: ByteBuffer): Array[Byte] = {
val arr = new Array[Byte](bb.limit())
bb.get(arr)
bb.rewind()
arr
}
@Test
def testBufferingOutputStream(): Unit = {
val out = new BufferingOutputStream(50)
out.write(0)
out.write(1)
out.write(2)
val r = out.reserve(100)
out.write((103 until 200).map(_.toByte).toArray)
r.write((3 until 103).map(_.toByte).toArray)
val buf = ByteBuffer.allocate(out.size)
out.writeTo(buf)
buf.rewind()
assertEquals((0 until 200).map(_.toByte), buf.array.toSeq)
}
@Test
def testWithNoCompressionAttribute(): Unit = {
val bytes = mkRandomArray(4096)
val actual = mkMessageWithWriter(bytes = bytes, codec = NoCompressionCodec)
val expected = new Message(bytes, NoCompressionCodec)
assertEquals(expected.buffer, actual.buffer)
}
@Test
def testWithCompressionAttribute(): Unit = {
val bytes = mkRandomArray(4096)
val actual = mkMessageWithWriter(bytes = bytes, codec = SnappyCompressionCodec)
val expected = new Message(compress(bytes, SnappyCompressionCodec), SnappyCompressionCodec)
assertEquals(
decompress(toArray(expected.payload), SnappyCompressionCodec).toSeq,
decompress(toArray(actual.payload), SnappyCompressionCodec).toSeq
)
}
@Test
def testWithKey(): Unit = {
val key = mkRandomArray(123)
val bytes = mkRandomArray(4096)
val actual = mkMessageWithWriter(bytes = bytes, key = key, codec = NoCompressionCodec)
val expected = new Message(bytes = bytes, key = key, codec = NoCompressionCodec)
assertEquals(expected.buffer, actual.buffer)
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/message/MessageWriterTest.scala | Scala | bsd-2-clause | 4,181 |
package de.choffmeister.secpwd
import org.specs2.mutable._
import org.specs2.runner.JUnitRunner
import org.junit.runner.RunWith
import java.io.File
import java.util.UUID
import de.choffmeister.secpwd.utils.RichFile._
import de.choffmeister.secpwd.utils.SshConnectionInfo
@RunWith(classOf[JUnitRunner])
class ConfigSpec extends Specification {
def tmp = new File(System.getProperty("java.io.tmpdir"), UUID.randomUUID.toString)
"load not existing" in {
val dir = tmp
dir.mkdirs()
val confFile = new File(dir, "config")
Config.load(dir) === Config()
}
"load empty" in {
val dir = tmp
dir.mkdirs()
val confFile = new File(dir, "config")
confFile.text = ""
Config.load(dir) === Config()
}
"load non empty" in {
val dir = tmp
dir.mkdirs()
val confFile = new File(dir, "config")
confFile.text = """
host = test-host
username = test-username"""
Config.load(dir) === Config(
Some(SshConnectionInfo(
host = "test-host",
userName = "test-username"
)),
None
)
confFile.text = """
host = test-host
username = test-username
password = test-password
keyfile = /tmp/keyfile
remote_dir = /tmp/remote_dir
port = 22222"""
Config.load(dir) === Config(
Some(SshConnectionInfo(
host = "test-host",
userName = "test-username",
password = Some("test-password"),
keyFile = Some(new File("/tmp/keyfile")),
keyFilePass = None,
port = 22222
)),
Some("/tmp/remote_dir")
)
confFile.text = """
host = test-host
username = test-username
keyfile_pass = test-keyfile-pass"""
Config.load(dir).syncConnInfo.get.keyFilePass.get.toList === "test-keyfile-pass".getBytes("UTF-8").toList
}
}
| choffmeister/secpwd | src/test/scala/de/choffmeister/secpwd/ConfigSpec.scala | Scala | apache-2.0 | 1,802 |
// Firebase Rules Generator
// Bloom Technologies Inc. Copyright 2017
//
// Authors: Raphael Javaux <[email protected]>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package com.bloomlife.fbrules.types
import com.bloomlife.fbrules.Rules.Generator
import com.bloomlife.fbrules.ruleexpr.{NewData}
import com.bloomlife.fbrules.ruleexpr.Implicits._
object FbInteger {
def apply(min: Option[Int] = None, max: Option[Int] = None): FbNode = {
def toDouble(optInt: Option[Int]) = optInt.map(_.toDouble)
FbNumber(toDouble(min), toDouble(max)).
validateIf((NewData.asNumber % 1) === 0.0)
}
}
| RaphaelJ/firebase-rules-generator | src/main/scala/types/FbInteger.scala | Scala | gpl-3.0 | 1,210 |
package com.twitter.scalding
import org.scalatest.WordSpec
import com.twitter.scalding.typed.CumulativeSum._
class AddRankingWithCumulativeSum(args: Args) extends Job(args) {
TypedPipe.from(TypedTsv[(String, Double)]("input1"))
.map {
case (gender, height) =>
(gender, (height, 1L))
}
.cumulativeSum
.map {
case (gender, (height, rank)) =>
(gender, height, rank)
}
.write(TypedTsv("result1"))
}
class AddRankingWithPartitionedCumulativeSum(args: Args) extends Job(args) {
TypedPipe.from(TypedTsv[(String, Double)]("input1"))
.map {
case (gender, height) =>
(gender, (height, 1L))
}
.cumulativeSum { h => (h / 100).floor.toLong }
.map {
case (gender, (height, rank)) =>
(gender, height, rank)
}
.write(TypedTsv("result1"))
}
class CumulativeSumTest1 extends WordSpec {
// --- A simple ranking job
val sampleInput1 = List(
("male", "165.2"),
("female", "172.2"),
("male", "184.1"),
("male", "125.4"),
("female", "128.6"),
("male", "265.2"),
("female", "272.2"),
("male", "284.1"),
("male", "225.4"),
("female", "228.6"))
// Each group sorted and ranking added highest person to shortest
val expectedOutput1 = Set(
("male", 184.1, 3),
("male", 165.2, 2),
("male", 125.4, 1),
("female", 172.2, 2),
("female", 128.6, 1),
("male", 284.1, 6),
("male", 265.2, 5),
("male", 225.4, 4),
("female", 272.2, 4),
("female", 228.6, 3))
"A simple ranking cumulative sum job" should {
JobTest("com.twitter.scalding.AddRankingWithCumulativeSum")
.source(TypedTsv[(String, Double)]("input1"), sampleInput1)
.sink[(String, Double, Long)](TypedTsv[(String, Double, Long)]("result1")) { outBuf1 =>
"produce correct number of records when filtering out null values" in {
assert(outBuf1.size === 10)
}
"create correct ranking per group, 1st being the heighest person of that group" in {
assert(outBuf1.toSet === expectedOutput1)
}
}
.run
.finish
}
"A partitioned ranking cumulative sum job" should {
JobTest("com.twitter.scalding.AddRankingWithPartitionedCumulativeSum")
.source(TypedTsv[(String, Double)]("input1"), sampleInput1)
.sink[(String, Double, Long)](TypedTsv[(String, Double, Long)]("result1")) { outBuf1 =>
"produce correct number of records when filtering out null values" in {
assert(outBuf1.size === 10)
}
"create correct ranking per group, 1st being the heighest person of that group" in {
assert(outBuf1.toSet === expectedOutput1)
}
}
.run
.finish
}
}
| nvoron23/scalding | scalding-core/src/test/scala/com/twitter/scalding/CumulitiveSumTest.scala | Scala | apache-2.0 | 2,729 |
package net.lshift.diffa.schema.cleaner
import java.sql.SQLException
import net.lshift.diffa.schema.hibernate.SessionHelper.sessionFactoryToSessionHelper
import net.lshift.diffa.schema.environment.DatabaseEnvironment
/**
* Implements SchemaCleaner for MySQL databases.
*/
object MySqlSchemaCleaner extends SchemaCleaner {
override def clean(sysEnvironment: DatabaseEnvironment, appEnvironment: DatabaseEnvironment) {
val configuration = sysEnvironment.getHibernateConfigurationWithoutMappingResources
val sessionFactory = configuration.buildSessionFactory
val dropDbStatement = """drop schema %s""".format(appEnvironment.dbName)
val createDbStatement = """create schema %s""".format(appEnvironment.dbName)
val statements = (dropDbStatement :: createDbStatement :: Nil)
sessionFactory.executeOnSession(connection => {
val stmt = connection.createStatement
statements foreach {
stmtText => {
try {
stmt.execute(stmtText)
} catch {
case ex: SQLException =>
println("Failed to execute prepared statement: %s".format(stmtText))
}
}
}
stmt.close
})
}
}
| lshift/diffa | schema/src/main/scala/net/lshift/diffa/schema/cleaner/MySqlSchemaCleaner.scala | Scala | apache-2.0 | 1,194 |
package com.nabijaczleweli.fancymagicks.render.entity
import com.nabijaczleweli.fancymagicks.entity.EntityBugs
import com.nabijaczleweli.fancymagicks.util.ResourceLocationFancyMagicks
import cpw.mods.fml.relauncher.{SideOnly, Side}
import net.minecraft.client.renderer.entity.RenderLiving
import net.minecraft.entity.{EntityLivingBase, Entity}
import scala.collection.immutable.HashMap
@SideOnly(Side.CLIENT)
object RenderBugs extends RenderLiving(new ModelBugs, 0) {
private val entityTexture = ResourceLocationFancyMagicks("textures/entity/bug.png")
private var entityToModel: Map[Int, ModelBugs] = new HashMap
setRenderPassModel(new ModelBugs)
override def getEntityTexture(entity: Entity) =
entityTexture
override def doRender(entity: Entity, d0: Double, d1: Double, d2: Double, f0: Float, f1: Float) {
entityToModel get entity.getEntityId match {
case Some(model) =>
setRenderPassModel(model)
case None =>
entityToModel += entity.getEntityId -> new ModelBugs
}
super.doRender(entity.asInstanceOf[EntityBugs], d0, d1, d2, f0, f1)
}
override def shouldRenderPass(entity: EntityLivingBase, pass: Int, renderPartialTicks: Float) =
pass match {
case 0 =>
1
case _ =>
-1
}
}
| nabijaczleweli/Magicks | src/main/scala/com/nabijaczleweli/fancymagicks/render/entity/RenderBugs.scala | Scala | mit | 1,229 |
/* Generated File */
package models.address
import com.kyleu.projectile.models.result.data.{DataField, DataFieldModel, DataSummary}
import com.kyleu.projectile.util.DateUtils
import com.kyleu.projectile.util.JsonSerializers._
import java.time.ZonedDateTime
object AddressRow {
implicit val jsonEncoder: Encoder[AddressRow] = (r: AddressRow) => io.circe.Json.obj(
("addressId", r.addressId.asJson),
("address", r.address.asJson),
("address2", r.address2.asJson),
("district", r.district.asJson),
("cityId", r.cityId.asJson),
("postalCode", r.postalCode.asJson),
("phone", r.phone.asJson),
("lastUpdate", r.lastUpdate.asJson)
)
implicit val jsonDecoder: Decoder[AddressRow] = (c: io.circe.HCursor) => for {
addressId <- c.downField("addressId").as[Int]
address <- c.downField("address").as[String]
address2 <- c.downField("address2").as[Option[String]]
district <- c.downField("district").as[String]
cityId <- c.downField("cityId").as[Int]
postalCode <- c.downField("postalCode").as[Option[String]]
phone <- c.downField("phone").as[String]
lastUpdate <- c.downField("lastUpdate").as[ZonedDateTime]
} yield AddressRow(addressId, address, address2, district, cityId, postalCode, phone, lastUpdate)
def empty(
addressId: Int = 0,
address: String = "",
address2: Option[String] = None,
district: String = "",
cityId: Int = 0,
postalCode: Option[String] = None,
phone: String = "",
lastUpdate: ZonedDateTime = DateUtils.nowZoned
) = {
AddressRow(addressId, address, address2, district, cityId, postalCode, phone, lastUpdate)
}
}
final case class AddressRow(
addressId: Int,
address: String,
address2: Option[String],
district: String,
cityId: Int,
postalCode: Option[String],
phone: String,
lastUpdate: ZonedDateTime
) extends DataFieldModel {
override def toDataFields = Seq(
DataField("addressId", Some(addressId.toString)),
DataField("address", Some(address)),
DataField("address2", address2),
DataField("district", Some(district)),
DataField("cityId", Some(cityId.toString)),
DataField("postalCode", postalCode),
DataField("phone", Some(phone)),
DataField("lastUpdate", Some(lastUpdate.toString))
)
def toSummary = DataSummary(model = "addressRow", pk = addressId.toString, entries = Map(
"Address Id" -> Some(addressId.toString),
"Address" -> Some(address),
"Address2" -> address2,
"District" -> Some(district),
"City Id" -> Some(cityId.toString),
"Postal Code" -> postalCode,
"Phone" -> Some(phone),
"Last Update" -> Some(lastUpdate.toString)
))
}
| KyleU/boilerplay | app/models/address/AddressRow.scala | Scala | cc0-1.0 | 2,671 |
package com.levent.hive.udfs
import org.apache.hadoop.hive.ql.exec.Description;
import org.apache.hadoop.hive.ql.exec.UDF
import java.net.URLDecoder
@Description(
name = "lurlDecode",
value = "_FUNC_(string) URLDecode - decodes application/x-www-form-urlencoded type into string (UTF-8)",
extended = "SELECT lurlDecode(string) FROM test LIMIT 1;")
class lurlDecode extends UDF {
def evaluate(url: String ): String= {
if (url == null )
return null
val decodedURL= java.net.URLDecoder.decode(url, "UTF-8");
return (decodedURL)
}
}
| lserinol/Hive-udfs | src/main/scala/com/levent/hive/udfs/lurldecode.scala | Scala | gpl-3.0 | 569 |
/*
* Copyright 2016 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.testing
import java.io.OutputStream
object NullOutStream extends OutputStream {
override def write(b: Int): Unit = {
// do nothing
}
}
| rossabaker/http4s | testing/shared/src/test/scala/org/http4s/testing/NullOutStream.scala | Scala | apache-2.0 | 759 |
package fr.univnantes.vroom.control.update
import fr.univnantes.vroom.control.Command
import fr.univnantes.vroom.core.Systeme
import fr.univnantes.vroom.core.dto.materiel.MaterielMobileDTO
import fr.univnantes.vroom.datacontroller.Mediator
/**
* Commande d'update de materiel mobile disponible
* @param system Objet utilisé pour le fonctionnement du logiciel
* @param mediator Objet utilisé pour la persistence des données
* @param old_materiel ancien DTO
* @param new_materiel nouveau DTO
*/
class UpdateMaterielMobileDisponibleCommande(
system: Systeme,
mediator: Mediator,
old_materiel: MaterielMobileDTO,
new_materiel: MaterielMobileDTO) extends Command[Unit](system) {
/**
* Méthode exécutant la commande contre le système
*/
override def execute(): Unit = {
system.popMaterielMobileDisponible(old_materiel)
system.addMaterielMobileDisponible(new_materiel)
mediator.update(new_materiel)
}
}
| Callidon/v-room | src/main/scala/fr/univnantes/vroom/control/update/UpdateMaterielMobileDisponibleCommande.scala | Scala | mit | 1,127 |
// This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
package ducttape.util
import ducttape.syntax.AbstractSyntaxTree._
import ducttape.syntax.GrammarParser._
import org.scalatest.Assertions
import scala.util.parsing.combinator.Parsers
object Tests {
/** Verify that a test case succeeded. */
def verify(testCase:Assertions, result:ParseResult[Any]) : Unit = {
result match {
case Success(res, _) => ()
case Failure(msg, next) => testCase.fail("At position " + next.pos.toString + ": " + msg) //"At " + position+ ": "+
case Error(msg, next) => testCase.fail("At position " + next.pos.toString + ": " + msg)//("At " + position+ ": "+ msg)
}
}
/** Verify that a test case failed in a way that the parser will not attempt to backtrack. */
def verifyError(testCase:Assertions, result:ParseResult[Any]) : Unit = {
result match {
case Success(res, next) => testCase.fail("At position " + next.pos.toString + ": " + res.toString)
case Failure(msg, next) => testCase.fail("At position " + next.pos.toString + ": Encounted Failure instead of Error: " + msg)
case Error(msg, _) => ()
}
}
/** Verify that a test case failed in a way that the parser will attempt to backtrack. */
def verifyFailure(testCase:Assertions, result:ParseResult[Any]) : Unit = {
result match {
case Success(res, next) => testCase.fail("At position " + next.pos.toString + ": " + res.toString)
case Failure(msg, _) => ()
case Error(msg, next) => testCase.fail("At position " + next.pos.toString + ": Encounted Error instead of Failure: " + msg)
}
}
}
| jhclark/ducttape | src/test/scala/ducttape/util/Tests.scala | Scala | mpl-2.0 | 1,751 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.util
import scala.annotation.varargs
import scala.reflect.ClassTag
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.annotation.Since
import org.apache.spark.internal.Logging
import org.apache.spark.ml.linalg.{MatrixUDT => MLMatrixUDT, VectorUDT => MLVectorUDT}
import org.apache.spark.mllib.linalg._
import org.apache.spark.mllib.linalg.BLAS.dot
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.{PartitionwiseSampledRDD, RDD}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.text.TextFileFormat
import org.apache.spark.sql.functions._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.random.BernoulliCellSampler
/**
* Helper methods to load, save and pre-process data used in MLLib.
*/
@Since("0.8.0")
object MLUtils extends Logging {
private[mllib] lazy val EPSILON = {
var eps = 1.0
while ((1.0 + (eps / 2.0)) != 1.0) {
eps /= 2.0
}
eps
}
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint].
* The LIBSVM format is a text-based format used by LIBSVM and LIBLINEAR.
* Each line represents a labeled sparse feature vector using the following format:
* {{{label index1:value1 index2:value2 ...}}}
* where the indices are one-based and in ascending order.
* This method parses each line into a [[org.apache.spark.mllib.regression.LabeledPoint]],
* where the feature indices are converted to zero-based.
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param numFeatures number of features, which will be determined from the input data if a
* nonpositive value is given. This is useful when the dataset is already split
* into multiple files and you want to load them separately, because some
* features may not present in certain files, which leads to inconsistent
* feature dimensions.
* @param minPartitions min number of partitions
* @return labeled data stored as an RDD[LabeledPoint]
*/
@Since("1.0.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
numFeatures: Int,
minPartitions: Int): RDD[LabeledPoint] = {
val parsed = parseLibSVMFile(sc, path, minPartitions)
// Determine number of features.
val d = if (numFeatures > 0) {
numFeatures
} else {
parsed.persist(StorageLevel.MEMORY_ONLY)
computeNumFeatures(parsed)
}
parsed.map { case (label, indices, values) =>
LabeledPoint(label, Vectors.sparse(d, indices, values))
}
}
private[spark] def computeNumFeatures(rdd: RDD[(Double, Array[Int], Array[Double])]): Int = {
rdd.map { case (label, indices, values) =>
indices.lastOption.getOrElse(0)
}.reduce(math.max) + 1
}
private[spark] def parseLibSVMFile(
sc: SparkContext,
path: String,
minPartitions: Int): RDD[(Double, Array[Int], Array[Double])] = {
sc.textFile(path, minPartitions)
.map(_.trim)
.filter(line => !(line.isEmpty || line.startsWith("#")))
.map(parseLibSVMRecord)
}
private[spark] def parseLibSVMFile(
sparkSession: SparkSession, paths: Seq[String]): RDD[(Double, Array[Int], Array[Double])] = {
val lines = sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
className = classOf[TextFileFormat].getName
).resolveRelation(checkFilesExist = false))
.select("value")
import lines.sqlContext.implicits._
lines.select(trim($"value").as("line"))
.filter(not((length($"line") === 0).or($"line".startsWith("#"))))
.as[String]
.rdd
.map(MLUtils.parseLibSVMRecord)
}
private[spark] def parseLibSVMRecord(line: String): (Double, Array[Int], Array[Double]) = {
val items = line.split(' ')
val label = items.head.toDouble
val (indices, values) = items.tail.filter(_.nonEmpty).map { item =>
val indexAndValue = item.split(':')
val index = indexAndValue(0).toInt - 1 // Convert 1-based indices to 0-based.
val value = indexAndValue(1).toDouble
(index, value)
}.unzip
// check if indices are one-based and in ascending order
var previous = -1
var i = 0
val indicesLength = indices.length
while (i < indicesLength) {
val current = indices(i)
require(current > previous, s"indices should be one-based and in ascending order;"
+ s""" found current=$current, previous=$previous; line="$line"""")
previous = current
i += 1
}
(label, indices.toArray, values.toArray)
}
/**
* Loads labeled data in the LIBSVM format into an RDD[LabeledPoint], with the default number of
* partitions.
*/
@Since("1.0.0")
def loadLibSVMFile(
sc: SparkContext,
path: String,
numFeatures: Int): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, numFeatures, sc.defaultMinPartitions)
/**
* Loads binary labeled data in the LIBSVM format into an RDD[LabeledPoint], with number of
* features determined automatically and the default number of partitions.
*/
@Since("1.0.0")
def loadLibSVMFile(sc: SparkContext, path: String): RDD[LabeledPoint] =
loadLibSVMFile(sc, path, -1)
/**
* Save labeled data in LIBSVM format.
* @param data an RDD of LabeledPoint to be saved
* @param dir directory to save the data
* @see `org.apache.spark.mllib.util.MLUtils.loadLibSVMFile`
*/
@Since("1.0.0")
def saveAsLibSVMFile(data: RDD[LabeledPoint], dir: String): Unit = {
// TODO: allow to specify label precision and feature precision.
val dataStr = data.map { case LabeledPoint(label, features) =>
val sb = new StringBuilder(label.toString)
features.foreachActive { case (i, v) =>
sb += ' '
sb ++= s"${i + 1}:$v"
}
sb.mkString
}
dataStr.saveAsTextFile(dir)
}
/**
* Loads vectors saved using `RDD[Vector].saveAsTextFile`.
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param minPartitions min number of partitions
* @return vectors stored as an RDD[Vector]
*/
@Since("1.1.0")
def loadVectors(sc: SparkContext, path: String, minPartitions: Int): RDD[Vector] =
sc.textFile(path, minPartitions).map(Vectors.parse)
/**
* Loads vectors saved using `RDD[Vector].saveAsTextFile` with the default number of partitions.
*/
@Since("1.1.0")
def loadVectors(sc: SparkContext, path: String): RDD[Vector] =
sc.textFile(path, sc.defaultMinPartitions).map(Vectors.parse)
/**
* Loads labeled points saved using `RDD[LabeledPoint].saveAsTextFile`.
* @param sc Spark context
* @param path file or directory path in any Hadoop-supported file system URI
* @param minPartitions min number of partitions
* @return labeled points stored as an RDD[LabeledPoint]
*/
@Since("1.1.0")
def loadLabeledPoints(sc: SparkContext, path: String, minPartitions: Int): RDD[LabeledPoint] =
sc.textFile(path, minPartitions).map(LabeledPoint.parse)
/**
* Loads labeled points saved using `RDD[LabeledPoint].saveAsTextFile` with the default number of
* partitions.
*/
@Since("1.1.0")
def loadLabeledPoints(sc: SparkContext, dir: String): RDD[LabeledPoint] =
loadLabeledPoints(sc, dir, sc.defaultMinPartitions)
/**
* Return a k element array of pairs of RDDs with the first element of each pair
* containing the training data, a complement of the validation data and the second
* element, the validation data, containing a unique 1/kth of the data. Where k=numFolds.
*/
@Since("1.0.0")
def kFold[T: ClassTag](rdd: RDD[T], numFolds: Int, seed: Int): Array[(RDD[T], RDD[T])] = {
kFold(rdd, numFolds, seed.toLong)
}
/**
* Version of `kFold()` taking a Long seed.
*/
@Since("2.0.0")
def kFold[T: ClassTag](rdd: RDD[T], numFolds: Int, seed: Long): Array[(RDD[T], RDD[T])] = {
val numFoldsF = numFolds.toFloat
(1 to numFolds).map { fold =>
val sampler = new BernoulliCellSampler[T]((fold - 1) / numFoldsF, fold / numFoldsF,
complement = false)
val validation = new PartitionwiseSampledRDD(rdd, sampler, true, seed)
val training = new PartitionwiseSampledRDD(rdd, sampler.cloneComplement(), true, seed)
(training, validation)
}.toArray
}
/**
* Version of `kFold()` taking a fold column name.
*/
@Since("3.1.0")
def kFold(df: DataFrame, numFolds: Int, foldColName: String): Array[(RDD[Row], RDD[Row])] = {
val foldCol = df.col(foldColName)
val checker = udf { foldNum: Int =>
// Valid fold number is in range [0, numFolds).
if (foldNum < 0 || foldNum >= numFolds) {
throw new SparkException(s"Fold number must be in range [0, $numFolds), but got $foldNum.")
}
true
}
(0 until numFolds).map { fold =>
val training = df
.filter(checker(foldCol) && foldCol =!= fold)
.drop(foldColName).rdd
val validation = df
.filter(checker(foldCol) && foldCol === fold)
.drop(foldColName).rdd
if (training.isEmpty()) {
throw new SparkException(s"The training data at fold $fold is empty.")
}
if (validation.isEmpty()) {
throw new SparkException(s"The validation data at fold $fold is empty.")
}
(training, validation)
}.toArray
}
/**
* Returns a new vector with `1.0` (bias) appended to the input vector.
*/
@Since("1.0.0")
def appendBias(vector: Vector): Vector = {
vector match {
case dv: DenseVector =>
val inputValues = dv.values
val inputLength = inputValues.length
val outputValues = Array.ofDim[Double](inputLength + 1)
System.arraycopy(inputValues, 0, outputValues, 0, inputLength)
outputValues(inputLength) = 1.0
Vectors.dense(outputValues)
case sv: SparseVector =>
val inputValues = sv.values
val inputIndices = sv.indices
val inputValuesLength = inputValues.length
val dim = sv.size
val outputValues = Array.ofDim[Double](inputValuesLength + 1)
val outputIndices = Array.ofDim[Int](inputValuesLength + 1)
System.arraycopy(inputValues, 0, outputValues, 0, inputValuesLength)
System.arraycopy(inputIndices, 0, outputIndices, 0, inputValuesLength)
outputValues(inputValuesLength) = 1.0
outputIndices(inputValuesLength) = dim
Vectors.sparse(dim + 1, outputIndices, outputValues)
case _ => throw new IllegalArgumentException(s"Do not support vector type ${vector.getClass}")
}
}
/**
* Converts vector columns in an input Dataset from the [[org.apache.spark.mllib.linalg.Vector]]
* type to the new [[org.apache.spark.ml.linalg.Vector]] type under the `spark.ml` package.
* @param dataset input dataset
* @param cols a list of vector columns to be converted. New vector columns will be ignored. If
* unspecified, all old vector columns will be converted except nested ones.
* @return the input `DataFrame` with old vector columns converted to the new vector type
*/
@Since("2.0.0")
@varargs
def convertVectorColumnsToML(dataset: Dataset[_], cols: String*): DataFrame = {
val schema = dataset.schema
val colSet = if (cols.nonEmpty) {
cols.flatMap { c =>
val dataType = schema(c).dataType
if (dataType.getClass == classOf[VectorUDT]) {
Some(c)
} else {
// ignore new vector columns and raise an exception on other column types
require(dataType.getClass == classOf[MLVectorUDT],
s"Column $c must be old Vector type to be converted to new type but got $dataType.")
None
}
}.toSet
} else {
schema.fields
.filter(_.dataType.getClass == classOf[VectorUDT])
.map(_.name)
.toSet
}
if (colSet.isEmpty) {
return dataset.toDF()
}
logWarning("Vector column conversion has serialization overhead. " +
"Please migrate your datasets and workflows to use the spark.ml package.")
// TODO: This implementation has performance issues due to unnecessary serialization.
// TODO: It is better (but trickier) if we can cast the old vector type to new type directly.
val convertToML = udf { v: Vector => v.asML }
val exprs = schema.fields.map { field =>
val c = field.name
if (colSet.contains(c)) {
convertToML(col(c)).as(c, field.metadata)
} else {
col(c)
}
}
dataset.select(exprs: _*)
}
/**
* Converts vector columns in an input Dataset to the [[org.apache.spark.mllib.linalg.Vector]]
* type from the new [[org.apache.spark.ml.linalg.Vector]] type under the `spark.ml` package.
* @param dataset input dataset
* @param cols a list of vector columns to be converted. Old vector columns will be ignored. If
* unspecified, all new vector columns will be converted except nested ones.
* @return the input `DataFrame` with new vector columns converted to the old vector type
*/
@Since("2.0.0")
@varargs
def convertVectorColumnsFromML(dataset: Dataset[_], cols: String*): DataFrame = {
val schema = dataset.schema
val colSet = if (cols.nonEmpty) {
cols.flatMap { c =>
val dataType = schema(c).dataType
if (dataType.getClass == classOf[MLVectorUDT]) {
Some(c)
} else {
// ignore old vector columns and raise an exception on other column types
require(dataType.getClass == classOf[VectorUDT],
s"Column $c must be new Vector type to be converted to old type but got $dataType.")
None
}
}.toSet
} else {
schema.fields
.filter(_.dataType.getClass == classOf[MLVectorUDT])
.map(_.name)
.toSet
}
if (colSet.isEmpty) {
return dataset.toDF()
}
logWarning("Vector column conversion has serialization overhead. " +
"Please migrate your datasets and workflows to use the spark.ml package.")
// TODO: This implementation has performance issues due to unnecessary serialization.
// TODO: It is better (but trickier) if we can cast the new vector type to old type directly.
val convertFromML = udf { Vectors.fromML _ }
val exprs = schema.fields.map { field =>
val c = field.name
if (colSet.contains(c)) {
convertFromML(col(c)).as(c, field.metadata)
} else {
col(c)
}
}
dataset.select(exprs: _*)
}
/**
* Converts Matrix columns in an input Dataset from the [[org.apache.spark.mllib.linalg.Matrix]]
* type to the new [[org.apache.spark.ml.linalg.Matrix]] type under the `spark.ml` package.
* @param dataset input dataset
* @param cols a list of matrix columns to be converted. New matrix columns will be ignored. If
* unspecified, all old matrix columns will be converted except nested ones.
* @return the input `DataFrame` with old matrix columns converted to the new matrix type
*/
@Since("2.0.0")
@varargs
def convertMatrixColumnsToML(dataset: Dataset[_], cols: String*): DataFrame = {
val schema = dataset.schema
val colSet = if (cols.nonEmpty) {
cols.flatMap { c =>
val dataType = schema(c).dataType
if (dataType.getClass == classOf[MatrixUDT]) {
Some(c)
} else {
// ignore new matrix columns and raise an exception on other column types
require(dataType.getClass == classOf[MLMatrixUDT],
s"Column $c must be old Matrix type to be converted to new type but got $dataType.")
None
}
}.toSet
} else {
schema.fields
.filter(_.dataType.getClass == classOf[MatrixUDT])
.map(_.name)
.toSet
}
if (colSet.isEmpty) {
return dataset.toDF()
}
logWarning("Matrix column conversion has serialization overhead. " +
"Please migrate your datasets and workflows to use the spark.ml package.")
val convertToML = udf { v: Matrix => v.asML }
val exprs = schema.fields.map { field =>
val c = field.name
if (colSet.contains(c)) {
convertToML(col(c)).as(c, field.metadata)
} else {
col(c)
}
}
dataset.select(exprs: _*)
}
/**
* Converts matrix columns in an input Dataset to the [[org.apache.spark.mllib.linalg.Matrix]]
* type from the new [[org.apache.spark.ml.linalg.Matrix]] type under the `spark.ml` package.
* @param dataset input dataset
* @param cols a list of matrix columns to be converted. Old matrix columns will be ignored. If
* unspecified, all new matrix columns will be converted except nested ones.
* @return the input `DataFrame` with new matrix columns converted to the old matrix type
*/
@Since("2.0.0")
@varargs
def convertMatrixColumnsFromML(dataset: Dataset[_], cols: String*): DataFrame = {
val schema = dataset.schema
val colSet = if (cols.nonEmpty) {
cols.flatMap { c =>
val dataType = schema(c).dataType
if (dataType.getClass == classOf[MLMatrixUDT]) {
Some(c)
} else {
// ignore old matrix columns and raise an exception on other column types
require(dataType.getClass == classOf[MatrixUDT],
s"Column $c must be new Matrix type to be converted to old type but got $dataType.")
None
}
}.toSet
} else {
schema.fields
.filter(_.dataType.getClass == classOf[MLMatrixUDT])
.map(_.name)
.toSet
}
if (colSet.isEmpty) {
return dataset.toDF()
}
logWarning("Matrix column conversion has serialization overhead. " +
"Please migrate your datasets and workflows to use the spark.ml package.")
val convertFromML = udf { Matrices.fromML _ }
val exprs = schema.fields.map { field =>
val c = field.name
if (colSet.contains(c)) {
convertFromML(col(c)).as(c, field.metadata)
} else {
col(c)
}
}
dataset.select(exprs: _*)
}
/**
* Returns the squared Euclidean distance between two vectors. The following formula will be used
* if it does not introduce too much numerical error:
* <pre>
* \\|a - b\\|_2^2 = \\|a\\|_2^2 + \\|b\\|_2^2 - 2 a^T b.
* </pre>
* When both vector norms are given, this is faster than computing the squared distance directly,
* especially when one of the vectors is a sparse vector.
* @param v1 the first vector
* @param norm1 the norm of the first vector, non-negative
* @param v2 the second vector
* @param norm2 the norm of the second vector, non-negative
* @param precision desired relative precision for the squared distance
* @return squared distance between v1 and v2 within the specified precision
*/
private[mllib] def fastSquaredDistance(
v1: Vector,
norm1: Double,
v2: Vector,
norm2: Double,
precision: Double = 1e-6): Double = {
val n = v1.size
require(v2.size == n,
s"Both vectors should have same length, found v1 is $n while v2 is ${v2.size}")
require(norm1 >= 0.0 && norm2 >= 0.0,
s"Both norms should be greater or equal to 0.0, found norm1=$norm1, norm2=$norm2")
var sqDist = 0.0
/*
* The relative error is
* <pre>
* EPSILON * ( \\|a\\|_2^2 + \\|b\\\\_2^2 + 2 |a^T b|) / ( \\|a - b\\|_2^2 ),
* </pre>
* which is bounded by
* <pre>
* 2.0 * EPSILON * ( \\|a\\|_2^2 + \\|b\\|_2^2 ) / ( (\\|a\\|_2 - \\|b\\|_2)^2 ).
* </pre>
* The bound doesn't need the inner product, so we can use it as a sufficient condition to
* check quickly whether the inner product approach is accurate.
*/
if (v1.isInstanceOf[DenseVector] && v2.isInstanceOf[DenseVector]) {
sqDist = Vectors.sqdist(v1, v2)
} else {
val sumSquaredNorm = norm1 * norm1 + norm2 * norm2
val normDiff = norm1 - norm2
val precisionBound1 = 2.0 * EPSILON * sumSquaredNorm / (normDiff * normDiff + EPSILON)
if (precisionBound1 < precision) {
sqDist = sumSquaredNorm - 2.0 * dot(v1, v2)
} else {
val dotValue = dot(v1, v2)
sqDist = math.max(sumSquaredNorm - 2.0 * dotValue, 0.0)
val precisionBound2 = EPSILON * (sumSquaredNorm + 2.0 * math.abs(dotValue)) /
(sqDist + EPSILON)
if (precisionBound2 > precision) {
sqDist = Vectors.sqdist(v1, v2)
}
}
}
sqDist
}
/**
* When `x` is positive and large, computing `math.log(1 + math.exp(x))` will lead to arithmetic
* overflow. This will happen when `x > 709.78` which is not a very large number.
* It can be addressed by rewriting the formula into `x + math.log1p(math.exp(-x))` when `x > 0`.
* @param x a floating-point value as input.
* @return the result of `math.log(1 + math.exp(x))`.
*/
private[spark] def log1pExp(x: Double): Double = {
if (x > 0) {
x + math.log1p(math.exp(-x))
} else {
math.log1p(math.exp(x))
}
}
}
| dbtsai/spark | mllib/src/main/scala/org/apache/spark/mllib/util/MLUtils.scala | Scala | apache-2.0 | 22,080 |
import data.{BezierCurve, Point}
import math.Calculator
/**
* Created by nchampagne on 7/27/15.
*/
object Main {
var offset: Int = 25;
def main(args: Array[String]) {
println("Hello, world!")
val bezierCurve: BezierCurve = Calculator.calculateCubicBezierCurve(new Point(1, 2), new Point(5, 3), new Point(10, 10), new Point(4, 4));
// Partial function
val partialBezierCurveFunc = Calculator.calculateCubicBezierCurve(new Point(1, 2), _: Point, _: Point, new Point(4, 4));
val calcedBezierCurve = partialBezierCurveFunc.apply(new Point(50, 50), new Point(42, 42));
// Closure test
val offsetBezierCurve = Calculator.calculateCubicBezierCurveWithOffset(createPointWithOffset, (2, 2), (2, 4), (2, 6), (2, 8));
// Partial function w/ closure
val partialOffsetBezierCurveFunc = Calculator.calculateCubicBezierCurveWithOffset(_: Tuple2[Int, Int] => Point, (2, 2), (2, 4), (2, 6), _: Tuple2[Int, Int]);
val calculatedOffsetBezierCurve = partialOffsetBezierCurveFunc(createPointWithOffset, (100, 100));
}
def createPointWithOffset(point: Tuple2[Int, Int]): Point = {
val newPoint = new Point(point._1 + offset, point._2 + offset);
println("POINT CREATED (" + point._1 + ", " + point._2 + ") --> (" + newPoint.pointX + ", " + newPoint.pointY + ")");
return newPoint;
}
}
| Visitor15/bezierCurve-scala | src/main/scala/Main.scala | Scala | gpl-2.0 | 1,333 |
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.tagcnt;
import scouter.lang.pack.AlertPack
import scouter.lang.value.NullValue
import scouter.server.Configure
import scouter.server.Logger
import scouter.server.core.AgentManager
import scouter.server.core.CoreRun
import scouter.server.util.ThreadScala
import scouter.util.RequestQueue
import scouter.lang.value.TextHashValue
import scouter.lang.value.DecimalValue
import scouter.lang.value.TextValue
object AlertTagCount {
val queue = new RequestQueue[AlertPack](CoreRun.MAX_QUE_SIZE);
ThreadScala.startDaemon("scouter.server.tagcnt.AlertTagCount") {
val conf = Configure.getInstance();
while (CoreRun.running) {
val m = queue.get();
try {
val objInfo = AgentManager.getAgent(m.objHash)
if (objInfo != null) {
process(objInfo.objType, m)
}
} catch {
case e: Exception => Logger.println("S180", e.toString())
}
}
}
def add(p: AlertPack) {
val ok = queue.put(p);
if (ok == false) {
Logger.println("S181", 10, "AlertTagCount queue exceeded!!");
}
}
def process(objType: String, x: AlertPack) {
TagCountProxy.add(x.time, objType, TagCountConfig.alert.total, NullValue.value, 1)
TagCountProxy.add(x.time, objType, TagCountConfig.alert.objectName, new TextHashValue(x.objHash), 1)
TagCountProxy.add(x.time, objType, TagCountConfig.alert.level, new DecimalValue(x.level), 1)
TagCountProxy.add(x.time, objType, TagCountConfig.alert.title, new TextValue(x.title), 1)
}
}
| scouter-project/scouter | scouter.server/src/main/scala/scouter/server/tagcnt/AlertTagCount.scala | Scala | apache-2.0 | 2,324 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.concurrent._
import scala.concurrent.duration._
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.Expression
import org.apache.spark.sql.catalyst.plans.physical.{Distribution, Partitioning, UnspecifiedDistribution}
import org.apache.spark.sql.execution.{BinaryNode, SQLExecution, SparkPlan}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.util.ThreadUtils
import org.apache.spark.{InternalAccumulator, TaskContext}
/**
* :: DeveloperApi ::
* Performs an inner hash join of two child relations. When the output RDD of this operator is
* being constructed, a Spark job is asynchronously started to calculate the values for the
* broadcasted relation. This data is then placed in a Spark broadcast variable. The streamed
* relation is not shuffled.
* 执行两个子关系的内部哈希联接,当构造此运算符的输出RDD时,异步启动Spark作业以计算广播关系的值,
* 然后将此数据放在Spark广播变量中,流式关系不会被洗牌。
*
*/
@DeveloperApi
case class BroadcastHashJoin(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
buildSide: BuildSide,
left: SparkPlan,
right: SparkPlan)
extends BinaryNode with HashJoin {
override private[sql] lazy val metrics = Map(
"numLeftRows" -> SQLMetrics.createLongMetric(sparkContext, "number of left rows"),
"numRightRows" -> SQLMetrics.createLongMetric(sparkContext, "number of right rows"),
"numOutputRows" -> SQLMetrics.createLongMetric(sparkContext, "number of output rows"))
val timeout: Duration = {
val timeoutValue = sqlContext.conf.broadcastTimeout
if (timeoutValue < 0) {
Duration.Inf
} else {
timeoutValue.seconds
}
}
override def outputPartitioning: Partitioning = streamedPlan.outputPartitioning
override def requiredChildDistribution: Seq[Distribution] =
UnspecifiedDistribution :: UnspecifiedDistribution :: Nil
// Use lazy so that we won't do broadcast when calling explain but still cache the broadcast value
// for the same query.
//使用延迟使我们在调用explain时不会进行广播,但仍然缓存同一查询的广播值。
@transient
private lazy val broadcastFuture = {
val numBuildRows = buildSide match {
case BuildLeft => longMetric("numLeftRows")
case BuildRight => longMetric("numRightRows")
}
// broadcastFuture is used in "doExecute". Therefore we can get the execution id correctly here.
//broadcastFuture用于“doExecute”, 因此我们可以在这里正确获取执行ID
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
future {
// This will run in another thread. Set the execution id so that we can connect these jobs
// with the correct execution.
//这将在另一个线程中运行,设置执行ID,以便我们可以通过正确的执行来连接这些作业。
SQLExecution.withExecutionId(sparkContext, executionId) {
// Note that we use .execute().collect() because we don't want to convert data to Scala
// types
//请注意,我们使用.execute()。collect(),因为我们不想将数据转换为Scala类型
val input: Array[InternalRow] = buildPlan.execute().map { row =>
numBuildRows += 1
row.copy()
}.collect()
// The following line doesn't run in a job so we cannot track the metric value. However, we
// have already tracked it in the above lines. So here we can use
// `SQLMetrics.nullLongMetric` to ignore it.
//以下行不在作业中运行,因此我们无法跟踪度量标准值,但是,我们已经在上述内容中对其进行了跟踪,
//所以在这里我们可以使用`SQLMetrics.nullLongMetric`来忽略它。
val hashed = HashedRelation(
input.iterator, SQLMetrics.nullLongMetric, buildSideKeyGenerator, input.size)
sparkContext.broadcast(hashed)
}
}(BroadcastHashJoin.broadcastHashJoinExecutionContext)
}
protected override def doPrepare(): Unit = {
broadcastFuture
}
protected override def doExecute(): RDD[InternalRow] = {
val numStreamedRows = buildSide match {
case BuildLeft => longMetric("numRightRows")
case BuildRight => longMetric("numLeftRows")
}
val numOutputRows = longMetric("numOutputRows")
val broadcastRelation = Await.result(broadcastFuture, timeout)
streamedPlan.execute().mapPartitions { streamedIter =>
val hashedRelation = broadcastRelation.value
hashedRelation match {
case unsafe: UnsafeHashedRelation =>
TaskContext.get().internalMetricsToAccumulators(
InternalAccumulator.PEAK_EXECUTION_MEMORY).add(unsafe.getUnsafeSize)
case _ =>
}
hashJoin(streamedIter, numStreamedRows, hashedRelation, numOutputRows)
}
}
}
object BroadcastHashJoin {
private[joins] val broadcastHashJoinExecutionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("broadcast-hash-join", 128))
}
| tophua/spark1.52 | sql/core/src/main/scala/org/apache/spark/sql/execution/joins/BroadcastHashJoin.scala | Scala | apache-2.0 | 6,059 |
package com.github.libsml.optimization.lbfgs
import com.github.libsml.commons.LibsmlException
import com.github.libsml.math.linalg.{BLAS, Vector}
import com.github.libsml.math.util.VectorUtils
/**
* Created by huangyu on 15/9/14.
*/
class SingleDirectSearch(val m: Int) extends DirectSearch {
require(m > 0, s"Single direct search exception:m=${m} should > 0")
private[this] var k: Int = 1
private[this] var end: Int = 0
private[this] var lm: Array[IterationData] = null
//d should be inited.
override def direct(d: Vector, x: Vector, xp: Vector, g: Vector, gp: Vector, sg: Option[Vector] = None): Vector = {
if (lm == null) {
lm = Array.fill(m)(new IterationData(0.0, VectorUtils.newVectorAs(x), VectorUtils.newVectorAs(x), 0.0))
}
sg.foreach(BLAS.ncopy(_, d))
var it = lm(end)
BLAS.copy(x, it.s)
BLAS.axpy(-1, xp, it.s)
BLAS.copy(g, it.y)
BLAS.axpy(-1, gp, it.y)
it.ys = BLAS.dot(it.y, it.s)
val yy: Double = BLAS.dot(it.y, it.y)
val bound = if (m <= k) m else k
k += 1
end = (end + 1) % m
var j = end
var i = 0
while (i < bound) {
j = (j + m - 1) % m
i += 1
it = lm(j)
it.alpha = BLAS.dot(it.s, d)
it.alpha /= it.ys
BLAS.axpy(-it.alpha, it.y, d)
}
BLAS.scal(it.ys / yy, d)
i = 0
while (i < bound) {
it = lm(j)
var beta = BLAS.dot(it.y, d)
beta /= it.ys
BLAS.axpy(it.alpha - beta, it.s, d)
j = (j + 1) % m
i += 1
}
d
}
override def clear(): Unit = {
k = 1
end = 0
// lm = null
}
}
case class IterationData(var alpha: Double, s: Vector, y: Vector, var ys: Double)
| libsml/libsml | optimization/src/main/scala/com/github/libsml/optimization/lbfgs/SingleDirectSearch.scala | Scala | apache-2.0 | 1,676 |
package com.appliedscala.generator.services
class PreviewService {
private val PreviewSplitter = """\[\/\/\]\: \# \"__PREVIEW__\""""
def extractPreview(contentMd: String): Option[String] = {
val contentLength = contentMd.length
val previewParts = contentMd.split(PreviewSplitter)
if (previewParts.length > 1 && previewParts(1).trim.length > 0) {
Some(previewParts(1))
} else if (previewParts.nonEmpty && previewParts(0).trim.length > 0 && previewParts(0).length < contentLength) {
Some(previewParts(0))
} else {
None
}
}
}
| denisftw/s2gen | src/main/scala/com/appliedscala/generator/services/PreviewService.scala | Scala | mit | 574 |
package com.sksamuel.elastic4s.monix
import com.sksamuel.elastic4s.http.{ElasticRequest, Executor, HttpClient, HttpResponse}
import monix.eval.Task
import monix.execution.Cancelable
class TaskExecutor extends Executor[Task] {
override def exec(client: HttpClient, request: ElasticRequest): Task[HttpResponse] = {
Task.async { case (_, callback) =>
client.send(request, {
case Left(t) => callback.onError(t)
case Right(v) => callback.onSuccess(v)
})
Cancelable.empty
}
}
}
| Tecsisa/elastic4s | elastic4s-monix/src/main/scala/com/sksamuel/elastic4s/monix/TaskExecutor.scala | Scala | apache-2.0 | 520 |
/*
The MIT License (MIT)
Copyright (c) 2015-2016 Raymond Dodge
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package com.rayrobdod.divReduce
import org.scalajs.dom.document
import org.scalajs.dom.raw.HTMLTextAreaElement
object WebConverter {
def main(args:Array[String]):Unit = {
document.addEventListener("DOMContentLoaded", {(x:Any) =>
document.getElementById("input").addEventListener("keyup", updateOutput _)
updateOutput("?")
})
}
def updateOutput(dunno:Any):Unit = {
val inText = document.getElementById("input").asInstanceOf[HTMLTextAreaElement].value.split("\n")
val outText = toHtml(parse(java.util.Arrays.asList(inText:_*)))
document.getElementById("output").textContent = outText
}
}
| rayrobdod/div-reduce | webpage/src/main/scala/WebConverter.scala | Scala | mit | 1,723 |
package com.twitter.finagle.stats
import com.twitter.app.GlobalFlag
import com.twitter.finagle.http.{HttpMuxHandler, Route, RouteIndex}
import com.twitter.logging.{Level, Logger}
import com.twitter.util.lint.{Category, GlobalRules, Issue, Rule}
import java.util.concurrent.atomic.LongAdder
import scala.collection.JavaConverters._
// The ordering issue is that LoadService is run early in the startup
// lifecycle, typically before Flags are loaded. By using a system
// property you can avoid that brittleness.
object debugLoggedStatNames
extends GlobalFlag[Set[String]](
Set.empty,
"Comma separated stat names for logging observed values" +
" (set via a -D system property to avoid load ordering issues)"
)
// It's possible to override the scope separator (the default value for `MetricsStatsReceiver` is
// `"/"`), which is used to separate scopes defined by `StatsReceiver`. This flag might be useful
// while migrating from Commons Stats (i.e., `CommonsStatsReceiver`), which is configured to use
// `"_"` as scope separator.
object scopeSeparator
extends GlobalFlag[String](
"/",
"Override the scope separator."
)
object MetricsStatsReceiver {
val defaultRegistry = new Metrics()
private[stats] val defaultHostRegistry = Metrics.createDetached()
/**
* A semi-arbitrary value, but should a service call any counter/stat/addGauge
* this often, it's a good indication that they are not following best practices.
*/
private val CreateRequestLimit = 100000L
}
/**
* The standard implementation of StatsReceiver, optimized to be high precision
* and low overhead.
*
* Note: Histogram is configured to store events in 60 second snapshots. It
* means that when you add a value, you need to wait at most 20 seconds before
* this value will be aggregated in the exported metrics.
*/
class MetricsStatsReceiver(val registry: Metrics)
extends StatsReceiverWithCumulativeGauges
with WithHistogramDetails {
import MetricsStatsReceiver._
def this() = this(MetricsStatsReceiver.defaultRegistry)
def repr: MetricsStatsReceiver = this
def histogramDetails: Map[String, HistogramDetail] = registry.histoDetails.asScala.toMap
// Used to store underlying histogram counts
private[this] val log = Logger.get()
private[this] val counterRequests = new LongAdder()
private[this] val statRequests = new LongAdder()
private[this] val gaugeRequests = new LongAdder()
private[this] def checkRequestsLimit(which: String, adder: LongAdder): Option[Issue] = {
// todo: ideally these would be computed as rates over time, but this is a
// relatively simple proxy for bad behavior.
val count = adder.sum()
if (count > CreateRequestLimit)
Some(Issue(s"StatReceiver.$which() has been called $count times"))
else
None
}
GlobalRules.get.add(
Rule(
Category.Performance,
"Elevated metric creation requests",
"For best performance, metrics should be created and stored in member variables " +
"and not requested via `StatsReceiver.{counter,stat,addGauge}` at runtime. " +
"Large numbers are an indication that these metrics are being requested " +
"frequently at runtime."
) {
Seq(
checkRequestsLimit("counter", counterRequests),
checkRequestsLimit("stat", statRequests),
checkRequestsLimit("addGauge", gaugeRequests)
).flatten
}
)
// Scope separator, a string value used to separate scopes defined by `StatsReceiver`.
private[this] val separator: String = scopeSeparator()
require(separator.length == 1, s"Scope separator should be one symbol: '$separator'")
override def toString: String = "MetricsStatsReceiver"
/**
* Create and register a counter inside the underlying Metrics library
*/
def counter(verbosity: Verbosity, names: String*): Counter = {
if (log.isLoggable(Level.TRACE))
log.trace(s"Calling StatsReceiver.counter on $names")
counterRequests.increment()
val storeCounter = registry.getOrCreateCounter(verbosity, names)
storeCounter.counter
}
/**
* Create and register a stat (histogram) inside the underlying Metrics library
*/
def stat(verbosity: Verbosity, names: String*): Stat = {
if (log.isLoggable(Level.TRACE))
log.trace(s"Calling StatsReceiver.stat for $names")
statRequests.increment()
val storeStat = registry.getOrCreateStat(verbosity, names)
storeStat.stat
}
override def addGauge(verbosity: Verbosity, name: String*)(f: => Float): Gauge = {
if (log.isLoggable(Level.TRACE))
log.trace(s"Calling StatsReceiver.addGauge for $name")
gaugeRequests.increment()
super.addGauge(verbosity, name: _*)(f)
}
protected[this] def registerGauge(verbosity: Verbosity, name: Seq[String], f: => Float): Unit =
registry.registerGauge(verbosity, name, f)
protected[this] def deregisterGauge(name: Seq[String]): Unit = registry.unregisterGauge(name)
}
class MetricsExporter(val registry: Metrics)
extends JsonExporter(registry)
with HttpMuxHandler
with MetricsRegistry {
def this() = this(MetricsStatsReceiver.defaultRegistry)
val pattern = "/admin/metrics.json"
def route: Route =
Route(
pattern = pattern,
handler = this,
index = Some(
RouteIndex(
alias = "Metrics",
group = "Metrics",
path = Some("/admin/metrics.json?pretty=true")
)
)
)
}
| mkhq/finagle | finagle-stats/src/main/scala/com/twitter/finagle/stats/MetricsStatsReceiver.scala | Scala | apache-2.0 | 5,472 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.sparkline.druid.index.service
import java.net.URLClassLoader
import com.fasterxml.jackson.databind.{ObjectWriter, ObjectMapper}
import com.google.inject.Injector
import io.druid.guice.GuiceInjectors
import org.apache.spark.sql.SQLContext
object DruidIndexer extends Function2[String, SQLContext, Unit] {
def apply(spec : String, sqlContext : SQLContext) : Unit = {
import scala.collection.JavaConversions._
this.getClass.getClassLoader.asInstanceOf[URLClassLoader].getURLs.foreach(println)
val sc = sqlContext.sparkContext
val injector: Injector = GuiceInjectors.makeStartupInjector
implicit val objectMapper = injector.getInstance(classOf[ObjectMapper])
val indexSpec = DruidIndexSpec.fromString(spec)
indexSpec.getProperties.foreach {
case (k,v) => sc.hadoopConfiguration.set(k,v)
}
}
}
| SparklineData/spark-druid-indexer | src/main/scala/org/sparkline/druid/index/service/DruidIndexer.scala | Scala | apache-2.0 | 1,655 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
import scala.tools.nsc.doc.DocFactory
import scala.tools.nsc.reporters.ConsoleReporter
import scala.tools.nsc.settings.DefaultPathFactory
import scala.reflect.internal.util.{FakePos, Position}
/** The main class for scaladoc, a front-end for the Scala compiler
* that generates documentation from source files.
*/
class ScalaDoc {
val versionMsg = s"Scaladoc ${Properties.versionString} -- ${Properties.copyrightString}"
def process(args: Array[String]): Boolean = {
var reporter: ScalaDocReporter = null
val docSettings = new doc.Settings(msg => reporter.error(FakePos("scaladoc"), msg + "\n scaladoc -help gives more information"),
msg => reporter.echo(msg),
DefaultPathFactory)
reporter = new ScalaDocReporter(docSettings)
val command = new ScalaDoc.Command(args.toList, docSettings)
def hasFiles = command.files.nonEmpty || docSettings.uncompilableFiles.nonEmpty
if (docSettings.version.value)
reporter.echo(versionMsg)
else if (docSettings.Xhelp.value)
reporter.echo(command.xusageMsg)
else if (docSettings.Yhelp.value)
reporter.echo(command.yusageMsg)
else if (docSettings.showPlugins.value)
reporter.warning(null, "Plugins are not available when using Scaladoc")
else if (docSettings.showPhases.value)
reporter.warning(null, "Phases are restricted when using Scaladoc")
else if (docSettings.help.value || !hasFiles)
reporter.echo(command.usageMsg)
else
try { new DocFactory(reporter, docSettings) document command.files }
catch {
case ex @ FatalError(msg) =>
if (docSettings.isDebug) ex.printStackTrace()
reporter.error(null, "fatal error: " + msg)
}
finally reporter.finish()
!reporter.reallyHasErrors
}
}
/** The Scaladoc reporter adds summary messages to the `ConsoleReporter`
*
* Use the `summaryX` methods to add unique summarizing message to the end of
* the run.
*/
class ScalaDocReporter(settings: Settings) extends ConsoleReporter(settings) {
import scala.collection.mutable.LinkedHashMap
// need to do sometimes lie so that the Global instance doesn't
// trash all the symbols just because there was an error
override def hasErrors = false
def reallyHasErrors = super.hasErrors
private[this] val delayedMessages: LinkedHashMap[(Position, String), () => Unit] =
LinkedHashMap.empty
/** Eliminates messages if both `pos` and `msg` are equal to existing element */
def addDelayedMessage(pos: Position, msg: String, print: () => Unit): Unit =
delayedMessages += ((pos, msg) -> print)
def printDelayedMessages(): Unit = delayedMessages.values.foreach(_.apply())
override def finish(): Unit = {
printDelayedMessages()
super.finish()
}
}
object ScalaDoc extends ScalaDoc {
class Command(arguments: List[String], settings: doc.Settings) extends CompilerCommand(arguments, settings) {
override def cmdName = "scaladoc"
override def usageMsg = (
createUsageMsg("where possible scaladoc", explain = false)(x => x.isStandard && settings.isScaladocSpecific(x.name)) +
"\n\nStandard scalac options also available:" +
optionsMessage(x => x.isStandard && !settings.isScaladocSpecific(x.name))
)
}
def main(args: Array[String]): Unit = {
System.exit(if (process(args)) 0 else 1)
}
}
| scala/scala | src/scaladoc/scala/tools/nsc/ScalaDoc.scala | Scala | apache-2.0 | 3,725 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import java.util.concurrent._
/** A mixin for adapting for the Java `ThreadPoolExecutor` implementation
* to report errors using the default thread exception handler.
*/
private[schedulers] trait AdaptedThreadPoolExecutorMixin { self: ThreadPoolExecutor =>
def reportFailure(t: Throwable): Unit
override def afterExecute(r: Runnable, t: Throwable): Unit = {
self.afterExecute(r, t)
var exception: Throwable = t
if ((exception eq null) && r.isInstanceOf[Future[_]]) {
try {
val future = r.asInstanceOf[Future[_]]
if (future.isDone) future.get()
} catch {
case ex: ExecutionException =>
exception = ex.getCause
case _: InterruptedException =>
// ignore/reset
Thread.currentThread().interrupt()
case _: CancellationException =>
() // ignore
}
}
if (exception ne null) reportFailure(exception)
}
}
| alexandru/monifu | monix-execution/jvm/src/main/scala/monix/execution/schedulers/AdaptedThreadPoolExecutorMixin.scala | Scala | apache-2.0 | 1,637 |
package imgdetect
import org.opencv.core.Mat
import org.opencv.core.MatOfKeyPoint
import org.opencv.features2d.FeatureDetector
import org.opencv.features2d.DescriptorExtractor
import org.opencv.highgui.Highgui
import org.opencv.features2d.DescriptorMatcher
import org.opencv.core.MatOfDMatch
import org.opencv.features2d.Features2d
import scala.collection.JavaConverters._
import imgretrieve.DiskImageKeeper
import java.io.File
import scala.collection.mutable.HashMap
import scala.util.Try
import scala.collection.mutable.ArrayBuffer
class LogoDetector(trainLogos: Array[String]) extends Detector {
val LOGODIR = "/train-logos/"
val MATCH_THRESHOLD = 0.33 // TODO: for testing purposes
private val keeper = new DiskImageKeeper
private val trainImgMap = new HashMap[String, Array[String]]
trainLogos.foreach { logo =>
val logoDir = new File(getClass.getResource(LOGODIR + logo).getPath)
val logoImgs = logoDir.listFiles.map(_.getAbsolutePath)
trainImgMap.put(logo, logoImgs)
}
def detect(imageLink: String): Array[String] = {
val imFile = keeper.getImage(imageLink)
val result = ArrayBuffer[String]()
if (imFile.isSuccess) {
trainLogos.foreach { logo =>
val trainImgs = trainImgMap.get(logo)
trainImgs match {
case None => System.err.println(s"Cannot find logo in Logo Map.")
case Some(x) => {
val score = detectScore(imFile.get.getAbsolutePath, x)
println(s"Score with ${logo} for ${imageLink}: ${score}")
if (score < MATCH_THRESHOLD)
result.append(logo)
}
}
}
} else {
System.err.println(s"Failed to load image ${imageLink}")
}
return result.toArray
}
def detectScore(imgQuery: String, trainImgs: Array[String]): Double = {
val queryImage = Highgui.imread(imgQuery)
val (leftKeyPoints, leftDescriptors) = detectAndExtract(queryImage)
val trainList: java.util.List[Mat] = new java.util.ArrayList()
trainImgs.foreach { img =>
val imgMat = Highgui.imread(img)
val (imgKeyPoints, imgDescriptors) = detectAndExtract(imgMat)
trainList.add(imgDescriptors)
}
val matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE)
val dmatches = new MatOfDMatch
matcher.add(trainList)
matcher.train()
Try(matcher.`match`(leftDescriptors, dmatches)).getOrElse {
System.err.println("Error matching images (Size stuff)");
return -1
}
//dmatches.toList().asScala.sortWith((e1, e2) => e1.distance < e2.distance).take(200).foreach(println)
val distances = dmatches.toArray().map(x => x.distance)
val count = distances.length
val mean = distances.sum / count
mean
}
def detectAndExtract(mat: Mat) = {
val keyPoints = new MatOfKeyPoint
val detector = FeatureDetector.create(FeatureDetector.SURF)
detector.detect(mat, keyPoints)
val sorted = keyPoints.toArray.sortBy(_.response).reverse.take(50)
val bestKeyPoints: MatOfKeyPoint = new MatOfKeyPoint(sorted: _*)
val extractor = DescriptorExtractor.create(DescriptorExtractor.SURF)
val descriptors = new Mat
extractor.compute(mat, bestKeyPoints, descriptors)
//println(s"${descriptors.rows} descriptors were extracted, each with dimension ${descriptors.cols}")
(bestKeyPoints, descriptors)
}
} | flaviusanton/logo-detection | src/main/scala/imgdetect/LogoDetector.scala | Scala | mit | 3,360 |
/*
* BuildUtil.scala
*
* Updated: Dec 3, 2014
*
* Copyright (c) 2014, CodeMettle
*/
import sbt.{CrossVersion, ModuleID}
object BuildUtil {
implicit class ExcludeModId(val u: ModuleID) extends AnyVal {
def excludeCross(group: String, art: String, scalaVersion: String) = {
val suff = CrossVersion partialVersion scalaVersion match {
case Some((2, 10)) => "2.10"
case Some((2, 11)) => "2.11"
case _ => sys.error("excludeCross needs updating")
}
u.exclude(group, s"${art}_$suff")
}
}
}
| CodeMettle/akka-snmp4j | project/BuildUtil.scala | Scala | apache-2.0 | 601 |
package io.buoyant.namer.fs
import com.twitter.finagle.{Addr, Address, Path, Stack}
import com.twitter.finagle.addr.WeightedAddress
import com.twitter.finagle.util.LoadService
import io.buoyant.config.Parser
import io.buoyant.config.types.Directory
import io.buoyant.namer.{NamerConfig, NamerInitializer, NamerTestUtil}
import java.io.{File, PrintWriter}
import java.nio.file.Paths
import org.scalatest.FunSuite
import org.scalatest.concurrent.{Eventually, IntegrationPatience}
import org.scalatest.time.{Millis, Span}
import scala.sys.process._
class FsTest extends FunSuite with NamerTestUtil with Eventually with IntegrationPatience {
test("sanity") {
// ensure it doesn't totally blowup
val _ = FsConfig(Directory(Paths.get("."))).newNamer(Stack.Params.empty)
}
test("service registration") {
assert(LoadService[NamerInitializer]().exists(_.isInstanceOf[FsInitializer]))
}
test("parse config") {
val dir = new File("mktemp -d -t disco.XXX".!!.stripLineEnd)
try {
val yaml = s"""
|kind: io.l5d.fs
|rootDir: ${dir.getAbsolutePath}
""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(FsInitializer)))
val fs = mapper.readValue[NamerConfig](yaml).asInstanceOf[FsConfig]
assert(fs.rootDir.path == dir.toPath)
} finally {
val _ = Seq("rm", "-rf", dir.getPath).!
}
}
test("id is bound name") {
val path = Path.read("/#/io.l5d.fs/default")
val dir = new File("mktemp -d -t disco.XXX".!!.stripLineEnd)
try {
val default = new File(dir, "default")
val writer = new PrintWriter(default)
writer.println("127.0.0.1 8080")
writer.close()
val yaml = s"""
|kind: io.l5d.fs
|rootDir: ${dir.getAbsolutePath}
""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(FsInitializer)))
val fs = mapper.readValue[NamerConfig](yaml)
val namer = fs.mk(Stack.Params.empty)
assertBoundIdAutobinds(namer, path, fs.prefix)
} finally {
val _ = Seq("rm", "-rf", dir.getPath).!
}
}
test("supports weights") {
val path = Path.read("/#/io.l5d.fs/default")
val dir = new File("mktemp -d -t disco.XXX".!!.stripLineEnd)
try {
val default = new File(dir, "default")
val writer = new PrintWriter(default)
writer.println("127.0.0.1 8080")
writer.println("127.0.0.1 8081 * 0.23")
writer.close()
val yaml = s"""
|kind: io.l5d.fs
|rootDir: ${dir.getAbsolutePath}
""".stripMargin
val mapper = Parser.objectMapper(yaml, Iterable(Seq(FsInitializer)))
val fs = mapper.readValue[NamerConfig](yaml)
val namer = fs.mk(Stack.Params.empty)
eventually {
val bound = lookupBound(namer, path.drop(fs.prefix.size))
assert(bound.size == 1)
assert(bound.head.addr.sample() == Addr.Bound(
Address("127.0.0.1", 8080),
WeightedAddress(Address("127.0.0.1", 8081), 0.23)
))
}
} finally {
val _ = Seq("rm", "-rf", dir.getPath).!
}
}
}
| denverwilliams/linkerd | namer/fs/src/test/scala/io/buoyant/namer/fs/FsTest.scala | Scala | apache-2.0 | 3,140 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.tables
import com.datastax.driver.core.utils.UUIDs
import com.websudos.phantom.builder.query.InsertQuery
import com.websudos.phantom.dsl._
import org.joda.time.DateTime
case class TimeSeriesRecord(
id: UUID,
name: String,
timestamp: DateTime
)
case class TimeUUIDRecord(
user: UUID,
id: UUID,
name: String,
timestamp: DateTime
)
sealed class TimeSeriesTable extends CassandraTable[ConcreteTimeSeriesTable, TimeSeriesRecord] {
object id extends UUIDColumn(this) with PartitionKey[UUID]
object name extends StringColumn(this)
object timestamp extends DateTimeColumn(this) with ClusteringOrder[DateTime] with Descending {
override val name = "unixTimestamp"
}
def fromRow(row: Row): TimeSeriesRecord = {
TimeSeriesRecord(
id(row),
name(row),
timestamp(row)
)
}
}
abstract class ConcreteTimeSeriesTable extends TimeSeriesTable with RootConnector
sealed class TimeUUIDTable extends CassandraTable[ConcreteTimeUUIDTable, TimeUUIDRecord] {
object user extends UUIDColumn(this) with PartitionKey[UUID]
object id extends TimeUUIDColumn(this) with ClusteringOrder[UUID] with Descending
object name extends StringColumn(this)
def fromRow(row: Row): TimeUUIDRecord = {
TimeUUIDRecord(
user(row),
id(row),
name(row),
new DateTime(UUIDs.unixTimestamp(id(row)))
)
}
}
abstract class ConcreteTimeUUIDTable extends TimeUUIDTable with RootConnector {
def store(rec: TimeUUIDRecord): InsertQuery.Default[ConcreteTimeUUIDTable, TimeUUIDRecord] = {
insert
.value(_.user, rec.user)
.value(_.id, rec.id)
.value(_.name, rec.name)
}
}
| levinson/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/tables/TimeSeriesTable.scala | Scala | bsd-2-clause | 3,169 |
package com.twitter.finagle.mux
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.mux.Message._
import com.twitter.finagle.mux.lease.exp.{Lessee, Lessor}
import com.twitter.finagle.netty3.{ChannelBufferBuf, BufChannelBuffer}
import com.twitter.finagle.stats.InMemoryStatsReceiver
import com.twitter.finagle.tracing._
import com.twitter.io.Buf
import com.twitter.util.{Await, Future, Promise, Duration, Closable, Time}
import java.io.{PrintWriter, StringWriter}
import org.jboss.netty.buffer.ChannelBuffers
import org.junit.runner.RunWith
import org.scalatest.concurrent.{IntegrationPatience, Eventually}
import org.scalatest.junit.{AssertionsForJUnit, JUnitRunner}
import org.scalatest.{BeforeAndAfter, FunSuite}
@RunWith(classOf[JUnitRunner])
class EndToEndTest extends FunSuite
with Eventually with IntegrationPatience with BeforeAndAfter with AssertionsForJUnit {
var saveBase: Dtab = Dtab.empty
before {
saveBase = Dtab.base
Dtab.base = Dtab.read("/foo=>/bar; /baz=>/biz")
}
after {
Dtab.base = saveBase
}
test("Discard request properly sent") {
@volatile var handled = false
val p = Promise[Response]()
p.setInterruptHandler { case t: Throwable =>
handled = true
}
val server = Mux.serve("localhost:*", Service.mk[Request, Response](_ => p))
val client = Mux.newService(server)
val f = client(Request(Path.empty, Buf.Empty))
assert(!f.isDefined)
assert(!p.isDefined)
f.raise(new Exception())
eventually { assert(handled) }
}
test("Dtab propagation") {
val server = Mux.serve("localhost:*", Service.mk[Request, Response] { _ =>
val stringer = new StringWriter
val printer = new PrintWriter(stringer)
Dtab.local.print(printer)
Future.value(Response(Buf.Utf8(stringer.toString)))
})
val client = Mux.newService(server)
Dtab.unwind {
Dtab.local ++= Dtab.read("/foo=>/bar; /web=>/$/inet/twitter.com/80")
for (n <- 0 until 2) {
val rsp = Await.result(client(Request(Path.empty, Buf.Empty)), 30.seconds)
val Buf.Utf8(str) = rsp.body
assert(str === "Dtab(2)\n\t/foo => /bar\n\t/web => /$/inet/twitter.com/80\n")
}
}
}
test("(no) Dtab propagation") {
val server = Mux.serve("localhost:*", Service.mk[Request, Response] { _ =>
val buf = ChannelBuffers.buffer(4)
buf.writeInt(Dtab.local.size)
Future.value(Response(ChannelBufferBuf.Owned(buf)))
})
val client = Mux.newService(server)
val payload = Await.result(client(Request.empty), 30.seconds).body
val cb = BufChannelBuffer(payload)
assert(cb.readableBytes() === 4)
assert(cb.readInt() === 0)
}
def assertAnnotationsInOrder(tracer: Seq[Record], annos: Seq[Annotation]) {
assert(tracer.collect { case Record(_, _, ann, _) if annos.contains(ann) => ann } === annos)
}
test("trace propagation") {
val tracer = new BufferingTracer
var count: Int = 0
var client: Service[Request, Response] = null
val server = Mux.server
.configured(param.Tracer(tracer))
.configured(param.Label("theServer"))
.serve("localhost:*", new Service[Request, Response] {
def apply(req: Request) = {
count += 1
if (count >= 1) Future.value(Response(req.body))
else client(req)
}
})
client = Mux.client
.configured(param.Tracer(tracer))
.configured(param.Label("theClient"))
.newService(server)
Await.result(client(Request.empty), 30.seconds)
assertAnnotationsInOrder(tracer.toSeq, Seq(
Annotation.ServiceName("theClient"),
Annotation.ClientSend(),
Annotation.BinaryAnnotation("clnt/mux/enabled", true),
Annotation.ServiceName("theServer"),
Annotation.ServerRecv(),
Annotation.BinaryAnnotation("srv/mux/enabled", true),
Annotation.ServerSend(),
Annotation.ClientRecv()
))
}
test("responds to lease") {
Time.withCurrentTimeFrozen { ctl =>
class FakeLessor extends Lessor {
var list: List[Lessee] = Nil
def register(lessee: Lessee): Unit = {
list ::= lessee
}
def unregister(lessee: Lessee): Unit = ()
def observe(d: Duration): Unit = ()
def observeArrival(): Unit = ()
}
val lessor = new FakeLessor
val server = Mux.server
.configured(Lessor.Param(lessor))
.serve("localhost:*", new Service[mux.Request, mux.Response] {
def apply(req: Request) = ???
}
)
val sr = new InMemoryStatsReceiver
val factory = Mux.client.configured(param.Stats(sr)).newClient(server)
val fclient = factory()
eventually { assert(fclient.isDefined) }
val Some((_, available)) = sr.gauges.find {
case (_ +: Seq("loadbalancer", "available"), value) => true
case _ => false
}
val Some((_, leaseDuration)) = sr.gauges.find {
case (_ +: Seq("current_lease_ms"), value) => true
case _ => false
}
val leaseCtr: () => Int = { () =>
val Some((_, ctr)) = sr.counters.find {
case (_ +: Seq("leased"), value) => true
case _ => false
}
ctr
}
def format(duration: Duration): Float = duration.inMilliseconds.toFloat
eventually { assert(leaseDuration() === format(Time.Top - Time.now)) }
eventually { assert(available() === 1) }
lessor.list.foreach(_.issue(Tlease.MinLease))
eventually { assert(leaseCtr() === 1) }
ctl.advance(2.seconds) // must advance time to re-lease and expire
eventually { assert(leaseDuration() === format(Tlease.MinLease - 2.seconds)) }
eventually { assert(available() === 0) }
lessor.list.foreach(_.issue(Tlease.MaxLease))
eventually { assert(leaseCtr() === 2) }
eventually { assert(leaseDuration() === format(Tlease.MaxLease)) }
eventually { assert(available() === 1) }
Closable.sequence(Await.result(fclient), server, factory).close()
}
}
}
| jamescway/finagle | finagle-mux/src/test/scala/com/twitter/finagle/mux/EndToEndTest.scala | Scala | apache-2.0 | 6,066 |