chore(tikv): add headless services, storage for pds, configmap etc.

add configmap for tikv
add headless services for tikv and pd
add storage for pd
fix issues with the StatefulSets
This commit is contained in:
technofab 2024-04-09 20:42:24 +02:00
parent 5bb1c11c31
commit b343866134
5 changed files with 144 additions and 41 deletions

View file

@ -0,0 +1,15 @@
{
values,
pkgs,
...
}: let
tomlFormat = pkgs.formats.toml {};
in {
kubernetes.resources = {
configMaps."${values.uniqueName}-config" = {
data = {
"tikv.toml" = builtins.readFile (tomlFormat.generate "tikv.toml" values.tikv.config);
};
};
};
}

View file

@ -1,5 +1,6 @@
{...}: { {...}: {
imports = [ imports = [
./configMap.nix
./statefulSet.nix ./statefulSet.nix
./service.nix ./service.nix
]; ];

View file

@ -1,21 +1,52 @@
{values, ...}: { {values, ...}: {
kubernetes.resources = { kubernetes.resources = {
services."${values.uniqueName}-pd" = { services = {
metadata.annotations."service.alpha.kubernetes.io/tolerate-unready-endpoints" = "true"; /*
spec = { PD HEADLESS SERVICE
*/
"${values.uniqueName}-pd".spec = {
selector.app = "${values.uniqueName}-pd"; selector.app = "${values.uniqueName}-pd";
ports = [ ports = [
{ {
name = "pd-server"; name = "pd-server";
port = values.pd.service.client_port; port = values.pd.service.port;
} }
{ {
name = "peer"; name = "peer";
port = values.pd.service.peer_port; port = values.pd.service.peer_port;
} }
]; ];
type = values.pd.service.type; type = "ClusterIP";
clusterIP = "None"; clusterIP = "None";
publishNotReadyAddresses = true;
};
/*
TIKV HEADLESS SERVICE
*/
"${values.uniqueName}-peer".spec = {
selector.app = "${values.uniqueName}";
ports = [
{
name = "peer";
port = values.tikv.service.port;
}
];
type = "ClusterIP";
clusterIP = "None";
publishNotReadyAddresses = true;
};
/*
CLUSTER SERVICE
*/
"${values.uniqueName}".spec = {
selector.app = "${values.uniqueName}-pd";
ports = [
{
name = "server";
port = values.pd.service.port;
}
];
type = values.pd.service.type;
}; };
}; };
}; };

View file

@ -1,10 +1,14 @@
{values, ...}: { {values, ...}: {
kubernetes.resources = { kubernetes.resources = {
/*
Placement Driver
*/
statefulSets."${values.uniqueName}-pd".spec = { statefulSets."${values.uniqueName}-pd".spec = {
replicas = values.pd.replicaCount; replicas = values.pd.replicaCount;
selector.matchLabels.name = "${values.uniqueName}-pd"; selector.matchLabels.name = "${values.uniqueName}-pd";
serviceName = "${values.uniqueName}-pd"; serviceName = "${values.uniqueName}-pd";
updateStrategy.type = "RollingUpdate"; updateStrategy.type = "RollingUpdate";
podManagementPolicy = "Parallel";
template = { template = {
metadata.labels = rec { metadata.labels = rec {
name = "${values.uniqueName}-pd"; name = "${values.uniqueName}-pd";
@ -16,85 +20,121 @@
imagePullPolicy = values.pd.image.pullPolicy; imagePullPolicy = values.pd.image.pullPolicy;
env = [ env = [
{ {
name = "INITIAL_CLUSTER_SIZE"; name = "HEADLESS_SERVICE_NAME";
value = "${builtins.toString values.pd.replicaCount}";
}
{
name = "SET_NAME";
value = "${values.uniqueName}-pd"; value = "${values.uniqueName}-pd";
} }
{ {
name = "MY_POD_IP"; name = "NAMESPACE";
valueFrom.fieldRef.fieldPath = "status.podIP"; valueFrom.fieldRef.fieldPath = "metadata.namespace";
} }
]; ];
ports = { ports = {
"pd-server".containerPort = values.pd.service.client_port; "pd-server".containerPort = values.pd.service.port;
"peer".containerPort = values.pd.service.peer_port; "peer".containerPort = values.pd.service.peer_port;
}; };
command = [ command = [
"/bin/sh" "/bin/sh"
"-ec" "-ecx"
'' ''
HOSTNAME=$(hostname)
PEERS="" PEERS=""
for i in $(seq 0 $((${builtins.toString values.pd.replicaCount} - 1))); do
for i in $(seq 0 $((''${INITIAL_CLUSTER_SIZE} - 1))); do PEERS="''${PEERS}''${PEERS:+,}''${HEADLESS_SERVICE_NAME}-''${i}=http://''${HEADLESS_SERVICE_NAME}-''${i}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.peer_port}"
PEERS="''${PEERS}''${PEERS:+,}''${SET_NAME}-''${i}=http://''${SET_NAME}-''${i}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port}"
done done
/pd-server --name=''${HOSTNAME} \ /pd-server --name=''${HOSTNAME} \
--client-urls=http://0.0.0.0:${builtins.toString values.pd.service.client_port} \ --client-urls=http://0.0.0.0:${builtins.toString values.pd.service.port} \
--advertise-client-urls=http://''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.port} \
--peer-urls=http://0.0.0.0:${builtins.toString values.pd.service.peer_port} \ --peer-urls=http://0.0.0.0:${builtins.toString values.pd.service.peer_port} \
--advertise-client-urls=http://$(MY_POD_IP):${builtins.toString values.pd.service.client_port} \ --advertise-peer-urls=http://''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.peer_port} \
--advertise-peer-urls=http://''${HOSTNAME}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port} \ --data-dir /var/lib/pd \
--initial-cluster ''${PEERS} --initial-cluster ''${PEERS}
'' ''
]; ];
volumeMounts."data" = {
name = "${values.uniqueName}-pd-data";
mountPath = "/var/lib/pd";
};
}; };
}; };
}; };
volumeClaimTemplates = [
{
metadata.name = "${values.uniqueName}-pd-data";
spec = {
accessModes = ["ReadWriteOnce"];
resources.requests.storage = values.pd.storage;
};
}
];
}; };
/*
TiKV
*/
statefulSets."${values.uniqueName}".spec = { statefulSets."${values.uniqueName}".spec = {
replicas = values.tikv.replicaCount; replicas = values.tikv.replicaCount;
selector.matchLabels.name = "${values.uniqueName}"; selector.matchLabels.name = "${values.uniqueName}";
serviceName = "${values.uniqueName}"; serviceName = "${values.uniqueName}-peer";
updateStrategy.type = "RollingUpdate"; updateStrategy.type = "RollingUpdate";
podManagementPolicy = "Parallel";
template = { template = {
metadata.labels.name = "${values.uniqueName}"; metadata.labels = rec {
name = "${values.uniqueName}";
app = name;
};
spec = { spec = {
initContainers."check-pd-port" = {
image = "busybox";
command = ["sh" "-c" "echo STATUS nc -w 1 ${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}"];
};
containers."tikv" = { containers."tikv" = {
image = "${values.tikv.image.repository}:${values.tikv.image.tag}"; image = "${values.tikv.image.repository}:${values.tikv.image.tag}";
imagePullPolicy = values.tikv.image.pullPolicy; imagePullPolicy = values.tikv.image.pullPolicy;
env = [ env = [
{ {
name = "MY_POD_IP"; name = "HEADLESS_SERVICE_NAME";
valueFrom.fieldRef.fieldPath = "status.podIP"; value = "${values.uniqueName}-peer";
}
{
name = "NAMESPACE";
valueFrom.fieldRef.fieldPath = "metadata.namespace";
} }
]; ];
ports."client".containerPort = values.tikv.service.client_port; ports."server".containerPort = values.tikv.service.port;
command = [ command = [
"/bin/sh" "/bin/sh"
"-ecx" "-ecx"
'' ''
/tikv-server \ /tikv-server \
--addr="0.0.0.0:${builtins.toString values.tikv.service.client_port}" \ --addr="0.0.0.0:${builtins.toString values.tikv.service.port}" \
--advertise-addr="$(MY_POD_IP):${builtins.toString values.tikv.service.client_port}" \ --advertise-addr="''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.tikv.service.port}" \
--data-dir="/data/tikv" \ --status-addr=0.0.0.0:${builtins.toString values.tikv.service.status_port} \
--pd="${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}" --advertise-status-addr="''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.tikv.service.status_port}" \
--data-dir="/var/lib/tikv" \
--capacity=0 \
--config=/etc/tikv/tikv.toml \
--pd="http://${values.uniqueName}-pd:${builtins.toString values.pd.service.port}"
'' ''
]; ];
volumeMounts."data" = { volumeMounts = {
name = "${values.uniqueName}-data"; "data" = {
mountPath = "/data"; name = "${values.uniqueName}-data";
mountPath = "/var/lib/tikv";
};
"config" = {
name = "config";
mountPath = "/etc/tikv";
readOnly = true;
};
}; };
# TODO: liveness and readiness probes # TODO: liveness and readiness probes
}; };
volumes."config".configMap = {
defaultMode = 420;
items = [
{
key = "tikv.toml";
path = "tikv.toml";
}
];
name = "${values.uniqueName}-config";
};
}; };
}; };
volumeClaimTemplates = [ volumeClaimTemplates = [

View file

@ -19,7 +19,7 @@ with lib; {
}; };
tag = mkOption { tag = mkOption {
type = types.str; type = types.str;
default = "latest"; default = "v7.1.0";
}; };
pullPolicy = mkOption { pullPolicy = mkOption {
type = types.str; type = types.str;
@ -31,7 +31,7 @@ with lib; {
type = types.int; type = types.int;
default = 2380; default = 2380;
}; };
client_port = mkOption { port = mkOption {
type = types.int; type = types.int;
default = 2379; default = 2379;
}; };
@ -40,6 +40,10 @@ with lib; {
default = "ClusterIP"; default = "ClusterIP";
}; };
}; };
storage = mkOption {
type = types.str;
default = "5G";
};
}; };
tikv = utils.mkNestedOption { tikv = utils.mkNestedOption {
replicaCount = mkOption { replicaCount = mkOption {
@ -53,7 +57,7 @@ with lib; {
}; };
tag = mkOption { tag = mkOption {
type = types.str; type = types.str;
default = "latest"; default = "v7.1.0";
}; };
pullPolicy = mkOption { pullPolicy = mkOption {
type = types.str; type = types.str;
@ -61,10 +65,14 @@ with lib; {
}; };
}; };
service = utils.mkNestedOption { service = utils.mkNestedOption {
client_port = mkOption { port = mkOption {
type = types.int; type = types.int;
default = 20160; default = 20160;
}; };
status_port = mkOption {
type = types.int;
default = 20180;
};
type = mkOption { type = mkOption {
type = types.str; type = types.str;
default = "ClusterIP"; default = "ClusterIP";
@ -74,6 +82,14 @@ with lib; {
type = types.str; type = types.str;
default = "5G"; default = "5G";
}; };
config = mkOption {
type = types.attrs;
default = {
raftdb.max-open-files = 256;
rocksdb.max-open-files = 256;
storage.reserve-space = "0MB";
};
};
}; };
# internal # internal