diff --git a/CHANGELOG.md b/CHANGELOG.md index f2335f5d9d..ffb54d4d8e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,10 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.20.0] - 2020-06-30 +### Added +- Windows is somewhat supported now + ## [0.19.0] - 2020-06-28 ### Changed - Improving Windows support by removing Hadoop FS dependencies diff --git a/core.coordinator/src/main/resources/log4j2.xml b/core.coordinator/src/main/resources/log4j2.xml index 0cd48a58ad..50d8e21b0b 100644 --- a/core.coordinator/src/main/resources/log4j2.xml +++ b/core.coordinator/src/main/resources/log4j2.xml @@ -11,7 +11,7 @@ - + diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/Kamu.scala b/core.coordinator/src/main/scala/dev/kamu/cli/Kamu.scala index 7f04ed43e9..492304fad9 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/Kamu.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/Kamu.scala @@ -19,12 +19,14 @@ import dev.kamu.cli.metadata.MetadataRepository import dev.kamu.cli.output._ import dev.kamu.core.utils.fs._ import dev.kamu.core.utils.{Clock, DockerClient} -import org.apache.logging.log4j.Level +import org.apache.logging.log4j.{Level, LogManager} class Kamu( config: KamuConfig, systemClock: Clock ) { + val logger = LogManager.getLogger(getClass.getName) + val workspaceLayout = WorkspaceLayout( kamuRootDir = config.kamuRoot, metadataDir = config.kamuRoot / "datasets", @@ -32,6 +34,8 @@ class Kamu( localVolumeDir = config.localVolume ).toAbsolute + logger.debug("Workspace root: {}", workspaceLayout.kamuRootDir) + val metadataRepository = new MetadataRepository(workspaceLayout, systemClock) diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/KamuApp.scala b/core.coordinator/src/main/scala/dev/kamu/cli/KamuApp.scala index b0b0a62f92..5a53480c63 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/KamuApp.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/KamuApp.scala @@ -32,7 +32,7 @@ object KamuApp extends App { val cliArgs = new CliArgs(args) Configurator.setLevel( - getClass.getPackage.getName, + "dev.kamu", if (cliArgs.debug()) Level.ALL else cliArgs.logLevel() ) diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLServerCommand.scala b/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLServerCommand.scala index 064f33322e..7c6b136bca 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLServerCommand.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLServerCommand.scala @@ -36,12 +36,15 @@ class SQLServerCommand( // TODO: Avoid thrift ecxeption during testing of the port val hostPort = livyProcess.waitForHostPort(containerPort, 15 seconds) - logger.info(s"Server is running at: jdbc:hive2://localhost:$hostPort") + logger.info( + s"Server is running at: jdbc:hive2://${dockerClient.getDockerHost}:${hostPort}" + ) try { livyProcess.join() } finally { - livyProcess.kill() + livyProcess.stop() + livyProcess.join() } } } diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLShellCommand.scala b/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLShellCommand.scala index c7e7fdc32a..b83514f7cf 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLShellCommand.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/commands/SQLShellCommand.scala @@ -112,13 +112,15 @@ class SQLShellCommand( ) try { - val hostPort = livyProcess.waitForHostPort(containerPort, 15 seconds) - val livyUrl = URI.create(s"jdbc:hive2://localhost:$hostPort") + val hostPort = livyProcess.waitForHostPort(containerPort, 60 seconds) + val livyUrl = + URI.create(s"jdbc:hive2://${dockerClient.getDockerHost}:${hostPort}") logger.debug(s"Resolved Livy URL: $livyUrl") body(livyUrl) } finally { - livyProcess.kill() + livyProcess.stop() + livyProcess.join() } } } diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/external/DockerImages.scala b/core.coordinator/src/main/scala/dev/kamu/cli/external/DockerImages.scala index f8f5ab80ec..ea9c3e9a9d 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/external/DockerImages.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/external/DockerImages.scala @@ -9,8 +9,8 @@ package dev.kamu.cli.external object DockerImages { - val SPARK = "kamudata/engine-spark:0.6.0" - val FLINK = "kamudata/engine-flink:0.4.0" + val SPARK = "kamudata/engine-spark:0.7.0" + val FLINK = "kamudata/engine-flink:0.5.0" val LIVY = SPARK val JUPYTER = "kamudata/jupyter-uber:0.0.1" diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/external/JupyterDockerProcessBuilder.scala b/core.coordinator/src/main/scala/dev/kamu/cli/external/JupyterDockerProcessBuilder.scala index ab2343e47a..dbb69742e2 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/external/JupyterDockerProcessBuilder.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/external/JupyterDockerProcessBuilder.scala @@ -17,7 +17,8 @@ import dev.kamu.core.utils.{ DockerClient, DockerProcess, DockerProcessBuilder, - DockerRunArgs + DockerRunArgs, + OS } import org.apache.logging.log4j.LogManager @@ -57,24 +58,25 @@ class JupyterDockerProcessBuilder( // TODO: avoid this by setting up correct user inside the container def chown(): Unit = { - logger.debug("Fixing file ownership") - - val unix = new com.sun.security.auth.module.UnixSystem() - val shellCommand = Seq( - "chown", - "-R", - s"${unix.getUid}:${unix.getGid}", - "/opt/workdir" - ) + if (!OS.isWindows) { + logger.debug("Fixing file ownership") + + val shellCommand = Seq( + "chown", + "-R", + s"${OS.uid}:${OS.gid}", + "/opt/workdir" + ) - dockerClient.runShell( - DockerRunArgs( - image = runArgs.image, - volumeMap = - Map(Paths.get("").toAbsolutePath -> Paths.get("/opt/workdir")) - ), - shellCommand - ) + dockerClient.runShell( + DockerRunArgs( + image = runArgs.image, + volumeMap = + Map(Paths.get("").toAbsolutePath -> Paths.get("/opt/workdir")) + ), + shellCommand + ) + } } } @@ -139,23 +141,26 @@ class JupyterDockerProcess( } def openBrowserWhenReady(): Unit = { - if (Desktop.isDesktopSupported && Desktop.getDesktop.isSupported( - Desktop.Action.BROWSE - )) { - val browserOpenerThread = new Thread { - override def run(): Unit = { - val token = waitForToken() - - val hostPort = getHostPort(80).get - val uri = URI.create(s"http://localhost:$hostPort/?token=$token") - - logger.info(s"Opening in browser: $uri") - Desktop.getDesktop.browse(uri) - } - } + if (!Desktop.isDesktopSupported || + !Desktop.getDesktop.isSupported(Desktop.Action.BROWSE)) + return + + val browserOpenerThread = new Thread { + override def run(): Unit = { + val token = waitForToken() + + val hostPort = getHostPort(80).get + val uri = URI.create( + s"http://${dockerClient.getDockerHost}:$hostPort/?token=$token" + ) - browserOpenerThread.setDaemon(true) - browserOpenerThread.start() + logger.info(s"Opening in browser: $uri") + Desktop.getDesktop.browse(uri) + } } + + browserOpenerThread.setDaemon(true) + browserOpenerThread.start() + } } diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/external/NotebookRunnerDocker.scala b/core.coordinator/src/main/scala/dev/kamu/cli/external/NotebookRunnerDocker.scala index 4100ac3dfb..4065e35f05 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/external/NotebookRunnerDocker.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/external/NotebookRunnerDocker.scala @@ -41,10 +41,14 @@ class NotebookRunnerDocker( var jupyterProcess: JupyterDockerProcess = null def stopAll(): Unit = { - if (livyProcess != null) - livyProcess.kill() - if (jupyterProcess != null) - jupyterProcess.kill() + if (livyProcess != null) { + livyProcess.stop() + livyProcess.join() + } + if (jupyterProcess != null) { + jupyterProcess.stop() + jupyterProcess.join() + } } Signal.handle(new Signal("INT"), new SignalHandler { diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/ingest/IngestService.scala b/core.coordinator/src/main/scala/dev/kamu/cli/ingest/IngestService.scala index ca9d9d7b4e..23c7dcb062 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/ingest/IngestService.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/ingest/IngestService.scala @@ -283,13 +283,16 @@ class IngestService( prepDataPath: Path, vocabulary: DatasetVocabulary ): MetadataBlock = { + val layout = metadataRepository.getDatasetLayout(datasetID) + val request = IngestRequest( datasetID = datasetID, - source = source, - datasetLayout = metadataRepository.getDatasetLayout(datasetID), - dataToIngest = prepDataPath, + ingestPath = prepDataPath.toString, eventTime = eventTime, - datasetVocab = vocabulary + source = source, + datasetVocab = vocabulary, + dataDir = layout.dataDir.toString, + checkpointsDir = layout.checkpointsDir.toString ) val engine = diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/metadata/MetadataRepository.scala b/core.coordinator/src/main/scala/dev/kamu/cli/metadata/MetadataRepository.scala index c596abca01..0374d83697 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/metadata/MetadataRepository.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/metadata/MetadataRepository.scala @@ -226,7 +226,7 @@ class MetadataRepository( layout.checkpointsDir, layout.metadataDir, workspaceLayout.metadataDir.resolve(id.toString) - ).foreach(p => File(p).delete()) + ).foreach(p => File(p).delete(true)) } //////////////////////////////////////////////////////////////////////////// diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/transform/EngineUtils.scala b/core.coordinator/src/main/scala/dev/kamu/cli/transform/EngineUtils.scala new file mode 100644 index 0000000000..28a09ee07b --- /dev/null +++ b/core.coordinator/src/main/scala/dev/kamu/cli/transform/EngineUtils.scala @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2018 kamu.dev + * + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. + */ + +package dev.kamu.cli.transform + +import java.nio.file.{Path, Paths} + +import dev.kamu.core.utils.OS + +trait EngineUtils { + ///////////////////////////////////////////////////////////////////////////// + // Path mappings between host and container + ///////////////////////////////////////////////////////////////////////////// + + protected val volumeDirInContainer: String = "/opt/engine/volume" + protected val inOutDirInContainer: String = "/opt/engine/in-out" + + protected def isSubPathOf(p: Path, parent: Path): Boolean = { + var pp = p.getParent + while (pp != null) { + if (pp == parent) + return true + pp = pp.getParent + } + false + } + + protected def toContainerPath(ps: String, volumePath: Path): String = { + val p = Paths.get(ps).normalize().toAbsolutePath + val rel = volumePath.relativize(p) + val x = if (!OS.isWindows) { + Paths.get(volumeDirInContainer).resolve(rel).toString + } else { + volumeDirInContainer + "/" + rel.toString.replace("\\", "/") + } + println(s"Mapped path $ps to $x") + x + } +} diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/transform/FlinkEngine.scala b/core.coordinator/src/main/scala/dev/kamu/cli/transform/FlinkEngine.scala index 63d84c5ab5..bc27452c38 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/transform/FlinkEngine.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/transform/FlinkEngine.scala @@ -26,13 +26,14 @@ import dev.kamu.core.manifests.infra.{ IngestResult } import dev.kamu.core.utils.fs._ -import dev.kamu.core.utils.Temp import dev.kamu.core.utils.{ DockerClient, DockerProcessBuilder, DockerRunArgs, ExecArgs, - IOHandlerPresets + IOHandlerPresets, + OS, + Temp } import org.slf4j.LoggerFactory @@ -41,9 +42,13 @@ class FlinkEngine( dockerClient: DockerClient, image: String = DockerImages.FLINK, networkName: String = "kamu-flink" -) extends Engine { +) extends Engine + with EngineUtils { private val logger = LoggerFactory.getLogger(getClass) + private val engineJarInContainer = + Paths.get("/opt/engine/bin/engine.flink.jar") + override def ingest(request: IngestRequest): IngestResult = { throw new NotImplementedError() } @@ -51,19 +56,26 @@ class FlinkEngine( override def executeQuery( request: ExecuteQueryRequest ): ExecuteQueryResult = { - val inOutDirInContainer = Paths.get("/opt/engine/in-out") - val engineJarInContainer = Paths.get("/opt/engine/bin/engine.flink.jar") - - val workspaceVolumes = - Seq(workspaceLayout.kamuRootDir, workspaceLayout.localVolumeDir) - .filter(p => File(p).exists) - .map(p => (p, p)) - .toMap + val workspaceVolumes = Map( + workspaceLayout.localVolumeDir -> Paths.get(volumeDirInContainer) + ) Temp.withRandomTempDir( "kamu-inout-" ) { inOutDir => - yaml.save(Manifest(request), inOutDir / "request.yaml") + val newRequest = + request.copy( + checkpointsDir = toContainerPath( + request.checkpointsDir, + workspaceLayout.localVolumeDir + ), + dataDirs = request.dataDirs.map { + case (k, v) => + (k, toContainerPath(v, workspaceLayout.localVolumeDir)) + } + ) + + yaml.save(Manifest(newRequest), inOutDir / "request.yaml") dockerClient.withNetwork(networkName) { @@ -81,7 +93,7 @@ class FlinkEngine( exposePorts = List(6123, 8081), network = Some(networkName), volumeMap = Map( - inOutDir -> inOutDirInContainer + inOutDir -> Paths.get(inOutDirInContainer) ) ++ workspaceVolumes ) ).run(Some(IOHandlerPresets.redirectOutputTagged("jobmanager: "))) @@ -104,7 +116,10 @@ class FlinkEngine( jobManager.waitForHostPort(8081, 15 seconds) val prevSavepoint = getPrevSavepoint(request) - val savepointArgs = prevSavepoint.map(p => s"-s $p").getOrElse("") + val savepointArgs = prevSavepoint + .map(p => toContainerPath(p.toString, workspaceLayout.localVolumeDir)) + .map(p => s"-s $p") + .getOrElse("") try { val exitCode = dockerClient @@ -127,23 +142,24 @@ class FlinkEngine( commitSavepoint(prevSavepoint) } finally { - logger.debug("Fixing file ownership") - - val unix = new com.sun.security.auth.module.UnixSystem() - val chownCmd = s"chown -R ${unix.getUid}:${unix.getGid} " + workspaceVolumes.values - .map(_.toUri.getPath) - .mkString(" ") - - dockerClient - .exec( - ExecArgs(), - jobManager.containerName, - Seq("bash", "-c", chownCmd) - ) - .! + if (!OS.isWindows) { + logger.debug("Fixing file ownership") + + dockerClient + .exec( + ExecArgs(), + jobManager.containerName, + Seq( + "bash", + "-c", + s"chown -R ${OS.uid}:${OS.gid} $volumeDirInContainer" + ) + ) + .! + } - taskManager.kill() - jobManager.kill() + taskManager.stop() + jobManager.stop() taskManager.join() jobManager.join() @@ -155,13 +171,12 @@ class FlinkEngine( } protected def getPrevSavepoint(request: ExecuteQueryRequest): Option[Path] = { - val checkpointsDir = - request.datasetLayouts(request.datasetID.toString).checkpointsDir + val checkpointsDir = File(request.checkpointsDir) - if (!File(checkpointsDir).exists) + if (!checkpointsDir.exists) return None - val allSavepoints = File(checkpointsDir).list + val allSavepoints = checkpointsDir.list .filter(_.isDirectory) .toList diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/transform/SparkEngine.scala b/core.coordinator/src/main/scala/dev/kamu/cli/transform/SparkEngine.scala index 389b280c38..4762ca12a7 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/transform/SparkEngine.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/transform/SparkEngine.scala @@ -11,22 +11,20 @@ package dev.kamu.cli.transform import java.io.OutputStream import java.nio.file.{Path, Paths} -import better.files.File import pureconfig.generic.auto._ import dev.kamu.core.manifests.parsing.pureconfig.yaml import dev.kamu.core.manifests.parsing.pureconfig.yaml.defaults._ import dev.kamu.cli.WorkspaceLayout import dev.kamu.cli.external.DockerImages -import dev.kamu.core.manifests.{FetchSourceKind, Manifest} +import dev.kamu.core.manifests.Manifest import dev.kamu.core.manifests.infra.{ ExecuteQueryRequest, ExecuteQueryResult, IngestRequest, IngestResult } -import dev.kamu.core.utils.{DockerClient, DockerRunArgs} +import dev.kamu.core.utils.{DockerClient, DockerRunArgs, OS, Temp} import dev.kamu.core.utils.fs._ -import dev.kamu.core.utils.Temp import org.apache.commons.io.IOUtils import org.apache.logging.log4j.{Level, LogManager, Logger} @@ -35,31 +33,28 @@ class SparkEngine( logLevel: Level, dockerClient: DockerClient, image: String = DockerImages.SPARK -) extends Engine { - protected var logger: Logger = LogManager.getLogger(getClass.getName) +) extends Engine + with EngineUtils { + protected val logger: Logger = LogManager.getLogger(getClass.getName) override def ingest(request: IngestRequest): IngestResult = { Temp.withRandomTempDir( "kamu-inout-" ) { inOutDir => - yaml.save(Manifest(request), inOutDir / "request.yaml") - - // TODO: Account for missing files - val extraMounts = request.source.fetch match { - case furl: FetchSourceKind.Url => - furl.url.getScheme match { - case "file" | null => List(Paths.get(furl.url)) - case _ => List.empty - } - case glob: FetchSourceKind.FilesGlob => - List(glob.path.getParent) - case _ => - throw new RuntimeException( - s"Unsupported fetch kind: ${request.source.fetch}" - ) - } + val newRequest = request.copy( + ingestPath = toContainerPath( + request.ingestPath, + workspaceLayout.localVolumeDir + ), + dataDir = toContainerPath( + request.dataDir, + workspaceLayout.localVolumeDir + ) + ) + + yaml.save(Manifest(newRequest), inOutDir / "request.yaml") - submit("dev.kamu.engine.spark.ingest.IngestApp", inOutDir, extraMounts) + submit("dev.kamu.engine.spark.ingest.IngestApp", inOutDir) yaml.load[Manifest[IngestResult]](inOutDir / "result.yaml").content } @@ -71,43 +66,39 @@ class SparkEngine( Temp.withRandomTempDir( "kamu-inout-" ) { inOutDir => - yaml.save(Manifest(request), inOutDir / "request.yaml") + val newRequest = + request.copy( + checkpointsDir = toContainerPath( + request.checkpointsDir, + workspaceLayout.localVolumeDir + ), + dataDirs = request.dataDirs.map { + case (k, v) => + (k, toContainerPath(v, workspaceLayout.localVolumeDir)) + } + ) - submit( - "dev.kamu.engine.spark.transform.TransformApp", - inOutDir, - Seq.empty - ) + yaml.save(Manifest(newRequest), inOutDir / "request.yaml") + + submit("dev.kamu.engine.spark.transform.TransformApp", inOutDir) yaml.load[Manifest[ExecuteQueryResult]](inOutDir / "result.yaml").content } } - protected def submit( - appClass: String, - inOutDir: Path, - extraMounts: Seq[Path] - ): Unit = { - val inOutDirInContainer = Paths.get("/opt/engine/in-out") - val engineJarInContainer = Paths.get("/opt/engine/bin/engine.spark.jar") - + protected def submit(appClass: String, inOutDir: Path): Unit = { Temp.withTempFile( "kamu-logging-cfg-", writeLog4jConfig ) { loggingConfigPath => val workspaceVolumes = - Seq(workspaceLayout.kamuRootDir, workspaceLayout.localVolumeDir) - .filter(p => File(p).exists) - .map(p => (p, p)) - .toMap + Map(workspaceLayout.localVolumeDir -> Paths.get(volumeDirInContainer)) val appVolumes = Map( loggingConfigPath -> Paths.get("/opt/spark/conf/log4j.properties"), - inOutDir -> inOutDirInContainer + inOutDir -> Paths.get(inOutDirInContainer) ) - val extraVolumes = extraMounts.map(p => (p, p)).toMap - val submitArgs = List( "/opt/spark/bin/spark-submit", "--master=local[4]", @@ -115,7 +106,7 @@ class SparkEngine( "--conf", "spark.sql.warehouse.dir=/opt/spark-warehouse", s"--class=$appClass", - engineJarInContainer.toUri.getPath + "/opt/engine/bin/engine.spark.jar" ) logger.info("Starting Spark job") @@ -124,28 +115,30 @@ class SparkEngine( dockerClient.runShell( DockerRunArgs( image = image, - volumeMap = workspaceVolumes ++ appVolumes ++ extraVolumes + volumeMap = workspaceVolumes ++ appVolumes ), submitArgs ) } finally { - // TODO: avoid this by setting up correct user inside the container - logger.debug("Fixing file ownership") - - val unix = new com.sun.security.auth.module.UnixSystem() - val chownArgs = Seq( - "chown", - "-R", - s"${unix.getUid}:${unix.getGid}" - ) ++ workspaceVolumes.values.map(_.toUri.getPath) + if (!OS.isWindows) { + // TODO: avoid this by setting up correct user inside the container + logger.debug("Fixing file ownership") + + val chownArgs = Seq( + "chown", + "-R", + s"${OS.uid}:${OS.gid}", + volumeDirInContainer + ) - dockerClient.runShell( - DockerRunArgs( - image = image, - volumeMap = workspaceVolumes - ), - chownArgs - ) + dockerClient.runShell( + DockerRunArgs( + image = image, + volumeMap = workspaceVolumes + ), + chownArgs + ) + } } } } @@ -166,4 +159,5 @@ class SparkEngine( outputStream.close() configStream.close() } + } diff --git a/core.coordinator/src/main/scala/dev/kamu/cli/transform/TransformService.scala b/core.coordinator/src/main/scala/dev/kamu/cli/transform/TransformService.scala index 0d07152cac..e6d7741cac 100644 --- a/core.coordinator/src/main/scala/dev/kamu/cli/transform/TransformService.scala +++ b/core.coordinator/src/main/scala/dev/kamu/cli/transform/TransformService.scala @@ -82,9 +82,17 @@ class TransformService( id => (id.toString, metadataRepository.getDatasetVocabulary(id)) ) .toMap, - datasetLayouts = allDatasets - .map(i => (i.toString, metadataRepository.getDatasetLayout(i))) - .toMap + dataDirs = allDatasets + .map( + i => + ( + i.toString, + metadataRepository.getDatasetLayout(i).dataDir.toString + ) + ) + .toMap, + checkpointsDir = + metadataRepository.getDatasetLayout(datasetID).checkpointsDir.toString ) val engine = engineFactory.getEngine(batch.source.transformEngine) diff --git a/core.coordinator/src/test/scala/dev/kamu/cli/MetadataRepositorySpec.scala b/core.coordinator/src/test/scala/dev/kamu/cli/MetadataRepositorySpec.scala index febcdc29cd..d1a976cf7d 100644 --- a/core.coordinator/src/test/scala/dev/kamu/cli/MetadataRepositorySpec.scala +++ b/core.coordinator/src/test/scala/dev/kamu/cli/MetadataRepositorySpec.scala @@ -110,7 +110,7 @@ class MetadataRepositorySpec extends FunSuite with Matchers with KamuTestBase { actual shouldEqual expected } finally { // stop the server - testHttpServer.kill(testServerName) + testHttpServer.stop(testServerName) } } } diff --git a/core.manifests b/core.manifests index 56938115c0..38f11f91ad 160000 --- a/core.manifests +++ b/core.manifests @@ -1 +1 @@ -Subproject commit 56938115c010349a4e689eedbde2324527f793b5 +Subproject commit 38f11f91ad19eb229a52bf6f575686f18d62e575 diff --git a/core.utils b/core.utils index af7a18f669..56907c73a1 160000 --- a/core.utils +++ b/core.utils @@ -1 +1 @@ -Subproject commit af7a18f66904dab116fe7b053abf4b6572ff67d7 +Subproject commit 56907c73a192333ad709b3e99dc2ed1677893914 diff --git a/docs/install.md b/docs/install.md index 76669872ff..5972065baf 100644 --- a/docs/install.md +++ b/docs/install.md @@ -4,7 +4,10 @@ - [General Information](#general-information) - [A Note on Security](#a-note-on-security) - [Supported Platforms](#supported-platforms) + - [Linux](#linux) - [MacOS X via Homebrew](#macos-x-via-homebrew) + - [Windows](#windows) + - [Known issues](#known-issues) - [Other platforms](#other-platforms) - [Installing shell completions](#installing-shell-completions) @@ -32,6 +35,10 @@ If there is anything else we can do to make you feel confident in using this too ## Supported Platforms +### Linux + +We don't have packages for various Linux flavors yet, but the project is being developed on Linux and supports it well. Simply install `docker` and `jre`, download the binary, `chown +x` it, and link it into your preferred location on your `PATH`. + ### MacOS X via Homebrew Install [Docker for Mac](https://docs.docker.com/docker-for-mac/install/) and make sure it's running by executing some simple command like `docker ps`. @@ -49,6 +56,31 @@ brew install kamu kamu version ``` +### Windows + +Windows is supported partially, see [known issues](#known-issues). + +You should already have Java, but check `java -version` just in case. + +Install Docker for Windows. If you are using Docker toolbox backed by VirtualBox it's a good idea to give the Docker's VM more CPU and RAM. Make sure that you can run `docker ps` successfully. + +Download `kamu` binary and save it as `kamu.jar`. You should already be able to run it as `java -jar kamu.jar`. + +If you decide to use `cygwin` as your shell you can create the following laucher script and add it to `PATH`: + +```sh +#!/bin/bash + +java -jar `cygpath -w /home/me/kamu/bin/kamu.jar` $@ +``` + +#### Known issues +- Docker is quite slow under VirtualBox +- Default `cmd` shell doesn't understand ANSI colors +- SQL shell history doesn't work +- SQL shell arrow keys don't work +- SQL shell Input/output formatting is sometimes off + ### Other platforms In case you didn't find your OS/distribution in this list you can try to download `kamu` binary from our [releases page](https://github.com/kamu-data/kamu-cli/releases). It should run on most systems capable of running Java and Docker.