> 文档中心 > k8s-java-api 创建mysql集群

k8s-java-api 创建mysql集群

一:两种方式pvc挂在方式:

1:可以手动创建pvc 名字要符合 pvc-pod的名字(当时多副本的时候pod的名字=statefulset+数字,数字从 0开始)。

2;可以用pvc模板volumeClaimTemplates 自动创建pvc,这个不多说了,网上有很多资料

二:需要注意的点

我这边是用java 调用apiserver 创建的mysql集群,中间遇到的最大问题就是脚本执行不成功,下面说下解决思路:

1.将脚本粘贴到notepad++ ,编辑->文档格式转换->unix格式。

2.将转换后的脚本直接粘贴到command中就可以了。

下面是涉及的几个yaml文件内容:

当然官网的就可以直接用:

Run a Replicated Stateful Application | KubernetesThis page shows how to run a replicated stateful application using a StatefulSet controller. This application is a replicated MySQL database. The example topology has a single primary server and multiple replicas, using asynchronous row-based replication.Note: This is not a production configuration. MySQL settings remain on insecure defaults to keep the focus on general patterns for running stateful applications in Kubernetes. Before you begin You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with your cluster.https://kubernetes.io/docs/tasks/run-application/run-replicated-stateful-application/

configmap:

apiVersion: v1kind: ConfigMapmetadata:  name: mysql  labels:    app: mysqldata:  master.cnf: |    # Apply this config only on the master.    [mysqld]    log-bin  slave.cnf: |    # Apply this config only on slaves.    [mysqld]    super-read-only

service:

# Headless service for stable DNS entries of StatefulSet members.apiVersion: v1kind: Servicemetadata:  name: mysql  labels:    app: mysqlspec:  ports:  - name: mysql    port: 3306  clusterIP: None  selector:    app: mysql---# Client service for connecting to any MySQL instance for reads.# For writes, you must instead connect to the master: mysql-0.mysql.apiVersion: v1kind: Servicemetadata:  name: mysql-read  labels:    app: mysqlspec:  ports:  - name: mysql    port: 3306  selector:    app: mysql

statefuleset:

apiVersion: apps/v1kind: StatefulSetmetadata:  name: mysqlspec:  selector:    matchLabels:      app: mysql  serviceName: mysql  replicas: 3  template:    metadata:      labels: app: mysql    spec:      initContainers:      - name: init-mysql image: mysql:5.7 command: - bash - "-c" - |   set -ex   # Generate mysql server-id from pod ordinal index.   [[ `hostname` =~ -([0-9]+)$ ]] || exit 1   ordinal=${BASH_REMATCH[1]}   echo [mysqld] > /mnt/conf.d/server-id.cnf   # Add an offset to avoid reserved server-id=0 value.   echo server-id=$((100 + $ordinal)) >> /mnt/conf.d/server-id.cnf   # Copy appropriate conf.d files from config-map to emptyDir.   if [[ $ordinal -eq 0 ]]; then     cp /mnt/config-map/master.cnf /mnt/conf.d/   else     cp /mnt/config-map/slave.cnf /mnt/conf.d/   fi volumeMounts: - name: conf   mountPath: /mnt/conf.d - name: config-map   mountPath: /mnt/config-map      - name: clone-mysql image: ist0ne/xtrabackup command: - bash - "-c" - |   set -ex   # Skip the clone if data already exists.   [[ -d /var/lib/mysql/mysql ]] && exit 0   # Skip the clone on master (ordinal index 0).   [[ `hostname` =~ -([0-9]+)$ ]] || exit 1   ordinal=${BASH_REMATCH[1]}   [[ $ordinal -eq 0 ]] && exit 0   # Clone data from previous peer.   ncat --recv-only mysql-$(($ordinal-1)).mysql 3307 | xbstream -x -C /var/lib/mysql   # Prepare the backup.   xtrabackup --prepare --target-dir=/var/lib/mysql volumeMounts: - name: data   mountPath: /var/lib/mysql   subPath: mysql - name: conf   mountPath: /etc/mysql/conf.d      containers:      - name: mysql image: mysql:5.7 env: - name: MYSQL_ALLOW_EMPTY_PASSWORD   value: "1" ports: - name: mysql   containerPort: 3306 volumeMounts: - name: data   mountPath: /var/lib/mysql   subPath: mysql - name: conf   mountPath: /etc/mysql/conf.d resources:   requests:     cpu: 500m     memory: 1Gi livenessProbe:   exec:     command: ["mysqladmin", "ping"]   initialDelaySeconds: 30   periodSeconds: 10   timeoutSeconds: 5 readinessProbe:   exec:     # Check we can execute queries over TCP (skip-networking is off).     command: ["mysql", "-h", "127.0.0.1", "-e", "SELECT 1"]   initialDelaySeconds: 5   periodSeconds: 2   timeoutSeconds: 1      - name: xtrabackup image: ist0ne/xtrabackup ports: - name: xtrabackup   containerPort: 3307 command: - bash - "-c" - |   set -ex   cd /var/lib/mysql   # Determine binlog position of cloned data, if any.   if [[ -f xtrabackup_slave_info && "x$( change_master_to.sql.in     # Ignore xtrabackup_binlog_info in this case (it's useless).     rm -f xtrabackup_slave_info xtrabackup_binlog_info   elif [[ -f xtrabackup_binlog_info ]]; then     # We're cloning directly from master. Parse binlog position.     [[ `cat xtrabackup_binlog_info` =~ ^(.*?)[[:space:]]+(.*?)$ ]] || exit 1     rm -f xtrabackup_binlog_info xtrabackup_slave_info     echo "CHANGE MASTER TO MASTER_LOG_FILE='${BASH_REMATCH[1]}',\    MASTER_LOG_POS=${BASH_REMATCH[2]}" > change_master_to.sql.in   fi   # Check if we need to complete a clone by starting replication.   if [[ -f change_master_to.sql.in ]]; then     echo "Waiting for mysqld to be ready (accepting connections)"     until mysql -h 127.0.0.1 -e "SELECT 1"; do sleep 1; done     echo "Initializing replication from clone position"     mysql -h 127.0.0.1 \    -e "$(<change_master_to.sql.in), \     MASTER_HOST='mysql-0.mysql', \     MASTER_USER='root', \     MASTER_PASSWORD='', \     MASTER_CONNECT_RETRY=10; \   START SLAVE;" || exit 1     # In case of container restart, attempt this at-most-once.     mv change_master_to.sql.in change_master_to.sql.orig   fi   # Start a server to send backups when requested by peers.   exec ncat --listen --keep-open --send-only --max-conns=1 3307 -c \     "xtrabackup --backup --slave-info --stream=xbstream --host=127.0.0.1 --user=root" volumeMounts: - name: data   mountPath: /var/lib/mysql   subPath: mysql - name: conf   mountPath: /etc/mysql/conf.d resources:   requests:     cpu: 100m     memory: 100Mi      volumes:      - name: conf emptyDir: {}      - name: config-map configMap:   name: mysql  volumeClaimTemplates:  - metadata:      name: data    spec:      accessModes: ["ReadWriteOnce"]      storageClassName: "managed-nfs-storage"      resources: requests:   storage: 10Gi

狗狗宠物资料大全