scala
$ wget https://downloads.lightbend.com/scala/2.12.11/scala-2.12.11.tgz$ tar -zxvf scala-2.12.11.tgz -C /usr/local
$ su hadoop$ cd$ vim ~/.bashrc#scalaexport SCALA_HOME=/usr/local/scala-2.12.11export PATH=$PATH:$SCALA_HOME/bin$ source ~/.bashrc$ exit
spark
$ wget https://mirrors.tuna.tsinghua.edu.cn/apache/spark/spark-2.4.6/spark-2.4.6-bin-without-hadoop.tgz$ tar -zxvf spark-2.4.6-bin-without-hadoop.tgz -C /data$ mv /data/spark-2.4.6-bin-without-hadoop/ /data/spark$ chown -R hadoop.hadoop /data/spark/$ su hadoop
spark配置文件
$ cd /data/spark/conf$ cp spark-env.sh.template spark-env.sh$ cp spark-defaults.sh.template spark-defaults.sh$ cp slaves.template slaves
spark-env.sh
$ vim spark-env.shexport JAVA_HOME=/usr/local/jdk1.8.0_231export SPARK_MASTER_PORT=7077export SPARK_MASTER_WEBUI_PORT=18088export SPARK_WORKER_WEBUI_PORT=18081export SPARK_WORKER_CORES=2export SPARK_WORKER_MEMORY=4096mLD_LIBRARY_PATH=/data/hadoop/lib/nativeSPARK_DIST_CLASSPATH=$(hadoop classpath)export SPARK_MASTER_HOSTexport SPARK_MASTER_PORTexport SPARK_WORKER_CORESexport SPARK_WORKER_MEMORYexport LD_LIBRARY_PATHexport SPARK_DIST_CLASSPATHexport SPARK_WORKER_INSTANCESexport HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoopexport SPARK_HOME=/data/sparkexport SPARK_WORKER_DIR=/data/spark/workexport SPARK_PID_DIR=/tmpexport SPARK_JAR=/data/spark/jars/*.jarexport PATH=$SPARK_HOME/bin:$PATHexport SPARK_CLASSPATH=$SPARK_CLASSPATH:/data/spark/jars/mysql-connector-java-5.1.49-bin.jarexport SPARK_DAEMON_JAVA_OPTS="-Dspark.deploy.recoveryMode=ZOOKEEPER -Dspark.deploy.zookeeper.url=192.168.233.17:2181,192.168.233.238:2181,192.168.233.157:2181 -Dspark.deploy.zookeeper.dir=/spark"
spark-defaults.conf
$ vim spark-defaults.confspark.master spark://192.168.233.65:7077,192.168.233.94:7077spark.eventLog.enabled truespark.eventLog.dir hdfs://hadoop-test-cluster/logsspark.serializer org.apache.spark.serializer.KryoSerializerspark.driver.memory 1gspark.executor.memory 2gspark.executor.extraJavaOptions -XX:+PrintGCDetails -Dkey=value -Dnumbers="one two three"export SPARK_HISTORY_OPTS="-Dspark.history.ui.port=18080 -Dspark.history.retainedApplications=30 -Dspark.history.fs.logDirectory=hdfs://hadoop-test-cluster/logs"
slaves
$ vim slaves192.168.233.17192.168.233.238192.168.233.157
所有节点配置环境
$ vim ~/.bashrc# sparkexport SPARK_HOME=/data/sparkexport PATH=$SPARK_HOME/bin:$PATH$ source ~/.bashrc$ hdfs dfs -mkdir /logs
同步所有spark节点配置文件
在主节点启动master
$ /data/spark/sbin/start-all.sh
在备节点master
/data/spark/sbin/start-master.sh
测试
$ spark-shell --master spark://192.168.233.65:7077 --executor-memory 500m --total-executor-cores 1Setting default log level to "WARN".To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel).Spark context Web UI available at http://hadoop-test-2:4040Spark context available as 'sc' (master = spark://192.168.233.65:7077, app id = app-20200618155948-0001).Spark session available as 'spark'.Welcome to ____ __ / __/__ ___ _____/ /__ _\ \/ _ \/ _ `/ __/ '_/ /___/ .__/\_,_/_/ /_/\_\ version 2.4.6 /_/ Using Scala version 2.11.12 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_231)Type in expressions to have them evaluated.Type :help for more information.scala>
访问WebUI