spark { # spark.master will be passed to each job's JobContext master = "yarn-client" jobserver { port = 8090 jar-store-rootdir = /mnt/tmp/spark-jobserver/jars jobdao = spark.jobserver.io.JobFileDAO filedao { rootdir = /mnt/tmp/spark-jobserver/filedao/data } } # predefined Spark contexts contexts { ml-context { num-cpu-cores = 20 # Number of cores to allocate. Required. memory-per-node = 20G # Executor memory per node, -Xmx style eg 512m, 1G, etc. driver-memory = 8G driver-core = 2 } } # universal context configuration. These settings can be overridden, see README.md context-settings { num-cpu-cores = 10 # Number of cores to allocate. Required. memory-per-node = 8g # Executor memory per node, -Xmx style eg 512m, #1G, etc. #spark.executor.instances = 2 # If you wish to pass any settings directly to the sparkConf as-is, add them here in passthrough, # such as hadoop connection settings that don't use the "spark." prefix passthrough { #es.nodes = "192.1.1.1" } } # This needs to match SPARK_HOME for cluster SparkContexts to be created successfully home = "/usr/lib/spark" }