# default settings for Hadoop test #test.disable.local.opensearch=true # OpenSearch #opensearch.nodes=localhost #opensearch.nodes=http://84f306619beadeb16048ae2d4d652ac5.eu-west-1.aws.found.io:9200/ # by default this is set dynamically based on the port used by the embedded OpenSearch instance #opensearch.port=9200 #opensearch.port=9500 opensearch.batch.size.bytes=1kb #opensearch.nodes.client.only=true #opensarch.nodes.wan.only=true # Minimal Security opensearch.net.http.auth.user=admin_user opensearch.net.http.auth.pass=admin-password # put pressure on the bulk API opensearch.batch.size.entries=3 opensearch.batch.write.retry.wait=1s mapred.max.split.size=134217728 #mapreduce.local.map.tasks.maximum=10 # M&R fs.default.name=file:/// mapred.job.tracker=local # Locally Deployed MR #fs.defaultFS=hdfs://localhost:9000 #mapreduce.framework.name=yarn #Hive - only works if the HDFS is setup as well hive=local # Locally Deployed Hive #hive=jdbc:hive2://localhost:10000/ #fs.default.name=hdfs://sandbox:8020 #mapred.job.tracker=sandbox:50300 #opensearch.net.proxy.http.host=localhost #opensearch.net.proxy.http.port=8080 #opensearch.net.proxy.http.user=test #opensearch.net.proxy.http.pass=test # Spark props spark.master=local spark.ui.enabled=false spark.sql.warehouse.dir=/tmp/spark spark.serializer=org.apache.spark.serializer.KryoSerializer spark.executor.extraJavaOptions=-XX:MaxPermSize=256m