Browse Source

Kubernetes related stuff

demo.masouros 4 years ago
parent
commit
8c301bcb1f

+ 26 - 0
kube/hdfs/deploy.sh

@@ -0,0 +1,26 @@
+#!/bin/bash
+
+if [ -z $1 ]
+then
+	for i in $(ls manifests/ | grep nn)
+	do
+		echo "Deploying $i..."
+		kubectl create -f manifests/$i
+		sleep 2s
+	done
+
+	sleep 10s
+
+	for i in $(ls manifests/ | grep dn)
+	do
+	        echo "Deploying $i..."
+	        kubectl create -f manifests/$i
+	        sleep 2s
+	done
+elif [ $1 == "clean" ]
+then
+	kubectl delete -f manifests
+fi
+
+
+

+ 58 - 0
kube/hdfs/manifests/ime-dn.yaml

@@ -0,0 +1,58 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-datanode-ime
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-datanode-ime
+spec:
+  ports:
+  - port: 50010
+    name: fs
+  clusterIP: None
+  selector:
+    app: hdfs-datanode-ime
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: hdfs-datanode-ime
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-datanode-ime
+  serviceName: "hdfs-datanode-ime"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-datanode-ime
+    spec:
+      containers:
+        - name: datanode
+          image: uhopper/hadoop-datanode:2.7.2
+          volumeMounts:
+          - name: dataset-ime
+            mountPath: /hadoop/dfs/data
+          env:
+            - name: CORE_CONF_fs_defaultFS
+              value: hdfs://hdfs-namenode-ime.iccs-hibench:8020
+          ports:
+          - containerPort: 50010
+            name: fs
+      restartPolicy: Always
+      volumes:
+      - name: dataset-ime
+        hostPath:
+          path: /ime/ime_masourod/hibench-hdfs
+          type: Directory
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns64

+ 56 - 0
kube/hdfs/manifests/ime-nn.yaml

@@ -0,0 +1,56 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-namenode-ime
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-namenode-ime
+spec:
+  ports:
+  - port: 8020
+    name: fs
+  - port: 50070
+    name: http
+  clusterIP: None
+  selector:
+    app: hdfs-namenode-ime
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: hdfs-namenode-ime
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-namenode-ime
+  serviceName: "hdfs-namenode-ime"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-namenode-ime
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: hdfs-namenode-ime
+          image: uhopper/hadoop-namenode:2.7.2
+          env:
+            - name: CLUSTER_NAME
+              value: hdfs-k8s
+          ports:
+          - containerPort: 8020
+            name: fs
+          - containerPort: 50070
+            name: http
+      restartPolicy: Always
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns64

+ 58 - 0
kube/hdfs/manifests/lustre-dn.yaml

@@ -0,0 +1,58 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-datanode-lustre
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-datanode-lustre
+spec:
+  ports:
+  - port: 50010
+    name: fs
+  clusterIP: None
+  selector:
+    app: hdfs-datanode-lustre
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: hdfs-datanode-lustre
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-datanode-lustre
+  serviceName: "hdfs-datanode-lustre"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-datanode-lustre
+    spec:
+      containers:
+        - name: datanode
+          image: uhopper/hadoop-datanode:2.7.2
+          volumeMounts:
+          - name: dataset-lustre
+            mountPath: /hadoop/dfs/data
+          env:
+            - name: CORE_CONF_fs_defaultFS
+              value: hdfs://hdfs-namenode-lustre.iccs-hibench:8020
+          ports:
+          - containerPort: 50010
+            name: fs
+      restartPolicy: Always
+      volumes:
+      - name: dataset-lustre
+        hostPath:
+          path: /fslustre/evolve-data/hibench-hdfs
+          type: Directory
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns51

+ 56 - 0
kube/hdfs/manifests/lustre-nn.yaml

@@ -0,0 +1,56 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-namenode-lustre
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-namenode-lustre
+spec:
+  ports:
+  - port: 8020
+    name: fs
+  - port: 50070
+    name: http
+  clusterIP: None
+  selector:
+    app: hdfs-namenode-lustre
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: hdfs-namenode-lustre
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-namenode-lustre
+  serviceName: "hdfs-namenode-lustre"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-namenode-lustre
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: hdfs-namenode-lustre
+          image: uhopper/hadoop-namenode:2.7.2
+          env:
+            - name: CLUSTER_NAME
+              value: hdfs-k8s
+          ports:
+          - containerPort: 8020
+            name: fs
+          - containerPort: 50070
+            name: http
+      restartPolicy: Always
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns51

+ 58 - 0
kube/hdfs/manifests/nfs-dn.yaml

@@ -0,0 +1,58 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-datanode-nfs
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-datanode-nfs
+spec:
+  ports:
+  - port: 50010
+    name: fs
+  clusterIP: None
+  selector:
+    app: hdfs-datanode-nfs
+---
+apiVersion: apps/v1beta1
+kind: StatefulSet
+metadata:
+  name: hdfs-datanode-nfs
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-datanode-nfs
+  serviceName: "hdfs-datanode-nfs"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-datanode-nfs
+    spec:
+      containers:
+        - name: datanode
+          image: uhopper/hadoop-datanode:2.7.2
+          volumeMounts:
+          - name: dataset-nfs
+            mountPath: /hadoop/dfs/data
+          env:
+            - name: CORE_CONF_fs_defaultFS
+              value: hdfs://hdfs-namenode-nfs.iccs-hibench:8020
+          ports:
+          - containerPort: 50010
+            name: fs
+      restartPolicy: Always
+      volumes:
+      - name: dataset-nfs
+        hostPath:
+          path: /home_nfs/home_masourod/hibench-hdfs
+          type: Directory
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns50

+ 56 - 0
kube/hdfs/manifests/nfs-nn.yaml

@@ -0,0 +1,56 @@
+# A headless service to create DNS records.
+apiVersion: v1
+kind: Service
+metadata:
+  name: hdfs-namenode-nfs
+  namespace: iccs-hibench
+  labels:
+    app: hdfs-namenode-nfs
+spec:
+  ports:
+  - port: 8020
+    name: fs
+  - port: 50070
+    name: http
+  clusterIP: None
+  selector:
+    app: hdfs-namenode-nfs
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+  name: hdfs-namenode-nfs
+  namespace: iccs-hibench
+spec:
+  selector:
+    matchLabels:
+      app: hdfs-namenode-nfs
+  serviceName: "hdfs-namenode-nfs"
+  replicas: 1
+  template:
+    metadata:
+      labels:
+        app: hdfs-namenode-nfs
+    spec:
+      terminationGracePeriodSeconds: 0
+      containers:
+        - name: hdfs-namenode-nfs
+          image: uhopper/hadoop-namenode:2.7.2
+          env:
+            - name: CLUSTER_NAME
+              value: hdfs-k8s
+          ports:
+          - containerPort: 8020
+            name: fs
+          - containerPort: 50070
+            name: http
+      restartPolicy: Always
+      affinity:
+        nodeAffinity:
+          requiredDuringSchedulingIgnoredDuringExecution:
+            nodeSelectorTerms:
+            - matchExpressions:
+              - key: kubernetes.io/hostname
+                operator: In
+                values:
+                - ns50

+ 24 - 0
kube/hibench/conf/core-site-ime.xml

@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+    <configuration>
+        <property>
+            <name>fs.default.name</name>
+            <value>hdfs://hdfs-namenode-ime.iccs-hibench:8020</value>
+        </property>
+    </configuration>

+ 24 - 0
kube/hibench/conf/core-site-lustre.xml

@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+    <configuration>
+        <property>
+            <name>fs.default.name</name>
+            <value>hdfs://hdfs-namenode-lustre.iccs-hibench:8020</value>
+        </property>
+    </configuration>

+ 24 - 0
kube/hibench/conf/core-site-nfs.xml

@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+    <configuration>
+        <property>
+            <name>fs.default.name</name>
+            <value>hdfs://hdfs-namenode-nfs.iccs-hibench:8020</value>
+        </property>
+    </configuration>

+ 14 - 0
kube/hibench/conf/hadoop-ime.conf

@@ -0,0 +1,14 @@
+# Hadoop home
+hibench.hadoop.home /usr/local/hadoop
+
+# The path of hadoop executable
+hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop
+
+# Hadoop configraution directory
+hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop
+
+# The root HDFS path to store HiBench data
+hibench.hdfs.master hdfs://hdfs-namenode-ime.iccs-hibench:8020
+
+# Hadoop release provider. Supported value: apache, cdh5, hdp
+hibench.hadoop.release apache

+ 14 - 0
kube/hibench/conf/hadoop-lustre.conf

@@ -0,0 +1,14 @@
+# Hadoop home
+hibench.hadoop.home /usr/local/hadoop
+
+# The path of hadoop executable
+hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop
+
+# Hadoop configraution directory
+hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop
+
+# The root HDFS path to store HiBench data
+hibench.hdfs.master hdfs://hdfs-namenode-lustre.iccs-hibench:8020
+
+# Hadoop release provider. Supported value: apache, cdh5, hdp
+hibench.hadoop.release apache

+ 14 - 0
kube/hibench/conf/hadoop-nfs.conf

@@ -0,0 +1,14 @@
+# Hadoop home
+hibench.hadoop.home /usr/local/hadoop
+
+# The path of hadoop executable
+hibench.hadoop.executable ${hibench.hadoop.home}/bin/hadoop
+
+# Hadoop configraution directory
+hibench.hadoop.configure.dir ${hibench.hadoop.home}/etc/hadoop
+
+# The root HDFS path to store HiBench data
+hibench.hdfs.master hdfs://hdfs-namenode-nfs.iccs-hibench:8020
+
+# Hadoop release provider. Supported value: apache, cdh5, hdp
+hibench.hadoop.release apache

+ 46 - 0
kube/hibench/conf/spark.conf

@@ -0,0 +1,46 @@
+# Spark home
+hibench.spark.home      /spark
+
+# Spark master
+#   standalone mode: spark://xxx:7077
+#   YARN mode: yarn-client
+hibench.spark.master    k8s://https://172.9.0.240:6443
+
+# executor number and cores when running on Yarn
+hibench.yarn.executor.num     1
+hibench.yarn.executor.cores   4
+
+# executor and driver memory in standalone & YARN mode
+spark.executor.memory  4g
+spark.driver.memory    4g
+
+# set spark parallelism property according to hibench's parallelism value
+spark.default.parallelism     ${hibench.default.map.parallelism}
+
+# set spark sql's default shuffle partitions according to hibench's parallelism value
+spark.sql.shuffle.partitions  ${hibench.default.shuffle.parallelism}
+
+#======================================================
+# Spark Streaming
+#======================================================
+# Spark streaming Batchnterval in millisecond (default 100)
+hibench.streambench.spark.batchInterval          100
+
+# Number of nodes that will receive kafka input (default: 4)
+hibench.streambench.spark.receiverNumber        4
+
+# Indicate RDD storage level. (default: 2)
+# 0 = StorageLevel.MEMORY_ONLY
+# 1 = StorageLevel.MEMORY_AND_DISK_SER
+# other = StorageLevel.MEMORY_AND_DISK_SER_2
+hibench.streambench.spark.storageLevel 2
+
+# indicate whether to test the write ahead log new feature (default: false)
+hibench.streambench.spark.enableWAL false
+
+# if testWAL is true, this path to store stream context in hdfs shall be specified. If false, it can be empty (default: /var/tmp)
+hibench.streambench.spark.checkpointPath /var/tmp
+
+# whether to use direct approach or not (dafault: true)
+hibench.streambench.spark.useDirectMode true
+

+ 33 - 0
kube/hibench/deploy.sh

@@ -0,0 +1,33 @@
+#!/bin/bash
+
+if [ -z $1 ]
+then
+	for i in $(ls conf/)
+	do
+		name=$(echo $i | awk -F'.' '{print $1}')
+		echo "Creating configmap $name..."
+		kubectl -n iccs-hibench create configmap $name --from-file ./conf/$i
+	done
+	echo "Creating configmap kube-config"
+	kubectl -n iccs-hibench create configmap kube-config --from-file ~/.kube/config
+	sleep 2	
+	for i in $(ls manifests/)
+	do
+		echo "Deploying $i..."
+		kubectl -n iccs-hibench create -f manifests/$i
+		sleep 2
+	done
+
+elif [ $1 == "clean" ]
+then
+	kubectl delete -n iccs-hibench -f manifests
+        for i in $(ls conf/)
+        do
+                name=$(echo $i | awk -F'.' '{print $1}')
+                kubectl -n iccs-hibench delete configmap $name
+	done
+	kubectl -n iccs-hibench delete configmap kube-config	
+fi
+
+
+

+ 52 - 0
kube/hibench/manifests/hibench-ime.yaml

@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: hibench-master-ime
+  labels:
+    app: hibench-master-ime
+spec:
+    containers:
+    - name: hibench-master-ime
+      image: pl4tinum/hibench:spark-3.0-hadoop-2.7-scala-2.12
+      imagePullPolicy: Always
+      command:
+      - sleep
+      - "3600"
+      volumeMounts:
+      - name: core-site
+        mountPath: /usr/local/hadoop/etc/hadoop/core-site.xml
+        subPath: core-site.xml
+      - name: hadoop-conf
+        mountPath: /HiBench/conf/hadoop.conf
+        subPath: hadoop.conf
+      - name: spark-conf
+        mountPath: /HiBench/conf/spark.conf
+        subPath: spark.conf
+    volumes:
+    - name: core-site
+      configMap:
+        name: core-site-ime
+        items:
+        - key: core-site-ime.xml
+          path: core-site.xml
+    - name: hadoop-conf
+      configMap:
+        name: hadoop-ime
+        items:
+        - key: hadoop-ime.conf
+          path: hadoop.conf       
+    - name: spark-conf
+      configMap:
+        name: spark
+        items:
+        - key: spark.conf
+          path: spark.conf      
+    affinity:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: kubernetes.io/hostname
+              operator: In
+              values:
+              - ns65

+ 52 - 0
kube/hibench/manifests/hibench-lustre.yaml

@@ -0,0 +1,52 @@
+apiVersion: v1
+kind: Pod
+metadata:
+  name: hibench-master-lustre
+  labels:
+    app: hibench-master-lustre
+spec:
+    containers:
+    - name: hibench-master-lustre
+      image: pl4tinum/hibench:spark-3.0-hadoop-2.7-scala-2.12
+      imagePullPolicy: Always
+      command:
+      - sleep
+      - "3600"
+      volumeMounts:
+      - name: core-site
+        mountPath: /usr/local/hadoop/etc/hadoop/core-site.xml
+        subPath: core-site.xml
+      - name: hadoop-conf
+        mountPath: /HiBench/conf/hadoop.conf
+        subPath: hadoop.conf
+      - name: spark-conf
+        mountPath: /HiBench/conf/spark.conf
+        subPath: spark.conf
+    volumes:
+    - name: core-site
+      configMap:
+        name: core-site-lustre
+        items:
+        - key: core-site-lustre.xml
+          path: core-site.xml
+    - name: hadoop-conf
+      configMap:
+        name: hadoop-lustre
+        items:
+        - key: hadoop-lustre.conf
+          path: hadoop.conf       
+    - name: spark-conf
+      configMap:
+        name: spark
+        items:
+        - key: spark.conf
+          path: spark.conf      
+    affinity:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: kubernetes.io/hostname
+              operator: In
+              values:
+              - ns65

+ 74 - 0
kube/hibench/manifests/hibench-nfs.yaml

@@ -0,0 +1,74 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: hibench-master-nfs-svc
+spec:
+  clusterIP: None
+  selector:
+    app: hibench-master-nfs
+  ports:
+    - protocol: TCP
+      port: 5000
+      targetPort: 3000
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: hibench-master-nfs
+  labels:
+    app: hibench-master-nfs
+spec:
+    containers:
+    - name: hibench-master-nfs
+      image: pl4tinum/hibench:spark-3.0-hadoop-2.7-scala-2.12
+      imagePullPolicy: Always
+      command:
+      - sleep
+      - "36000000"
+      volumeMounts:
+      - name: core-site
+        mountPath: /usr/local/hadoop/etc/hadoop/core-site.xml
+        subPath: core-site.xml
+      - name: hadoop-conf
+        mountPath: /HiBench/conf/hadoop.conf
+        subPath: hadoop.conf
+      - name: spark-conf
+        mountPath: /HiBench/conf/spark.conf
+        subPath: spark.conf
+      - name: kube-conf
+        mountPath: /root/.kube/config
+        subPath: config
+    volumes:
+    - name: core-site
+      configMap:
+        name: core-site-nfs
+        items:
+        - key: core-site-nfs.xml
+          path: core-site.xml
+    - name: hadoop-conf
+      configMap:
+        name: hadoop-nfs
+        items:
+        - key: hadoop-nfs.conf
+          path: hadoop.conf       
+    - name: spark-conf
+      configMap:
+        name: spark
+        items:
+        - key: spark.conf
+          path: spark.conf      
+    - name: kube-conf
+      configMap:
+        name: kube-config
+        items:
+        - key: config
+          path: config
+    affinity:
+      nodeAffinity:
+        requiredDuringSchedulingIgnoredDuringExecution:
+          nodeSelectorTerms:
+          - matchExpressions:
+            - key: kubernetes.io/hostname
+              operator: In
+              values:
+              - ns65