chore(tikv): add headless services, storage for pds, configmap etc.

add configmap for tikv
add headless services for tikv and pd
add storage for pd
fix issues with the StatefulSets
This commit is contained in:
technofab 2024-04-09 20:42:24 +02:00
parent 5bb1c11c31
commit b343866134
5 changed files with 144 additions and 41 deletions

View file

@ -0,0 +1,15 @@
{
values,
pkgs,
...
}: let
tomlFormat = pkgs.formats.toml {};
in {
kubernetes.resources = {
configMaps."${values.uniqueName}-config" = {
data = {
"tikv.toml" = builtins.readFile (tomlFormat.generate "tikv.toml" values.tikv.config);
};
};
};
}

View file

@ -1,5 +1,6 @@
{...}: {
imports = [
./configMap.nix
./statefulSet.nix
./service.nix
];

View file

@ -1,21 +1,52 @@
{values, ...}: {
kubernetes.resources = {
services."${values.uniqueName}-pd" = {
metadata.annotations."service.alpha.kubernetes.io/tolerate-unready-endpoints" = "true";
spec = {
services = {
/*
PD HEADLESS SERVICE
*/
"${values.uniqueName}-pd".spec = {
selector.app = "${values.uniqueName}-pd";
ports = [
{
name = "pd-server";
port = values.pd.service.client_port;
port = values.pd.service.port;
}
{
name = "peer";
port = values.pd.service.peer_port;
}
];
type = values.pd.service.type;
type = "ClusterIP";
clusterIP = "None";
publishNotReadyAddresses = true;
};
/*
TIKV HEADLESS SERVICE
*/
"${values.uniqueName}-peer".spec = {
selector.app = "${values.uniqueName}";
ports = [
{
name = "peer";
port = values.tikv.service.port;
}
];
type = "ClusterIP";
clusterIP = "None";
publishNotReadyAddresses = true;
};
/*
CLUSTER SERVICE
*/
"${values.uniqueName}".spec = {
selector.app = "${values.uniqueName}-pd";
ports = [
{
name = "server";
port = values.pd.service.port;
}
];
type = values.pd.service.type;
};
};
};

View file

@ -1,10 +1,14 @@
{values, ...}: {
kubernetes.resources = {
/*
Placement Driver
*/
statefulSets."${values.uniqueName}-pd".spec = {
replicas = values.pd.replicaCount;
selector.matchLabels.name = "${values.uniqueName}-pd";
serviceName = "${values.uniqueName}-pd";
updateStrategy.type = "RollingUpdate";
podManagementPolicy = "Parallel";
template = {
metadata.labels = rec {
name = "${values.uniqueName}-pd";
@ -16,85 +20,121 @@
imagePullPolicy = values.pd.image.pullPolicy;
env = [
{
name = "INITIAL_CLUSTER_SIZE";
value = "${builtins.toString values.pd.replicaCount}";
}
{
name = "SET_NAME";
name = "HEADLESS_SERVICE_NAME";
value = "${values.uniqueName}-pd";
}
{
name = "MY_POD_IP";
valueFrom.fieldRef.fieldPath = "status.podIP";
name = "NAMESPACE";
valueFrom.fieldRef.fieldPath = "metadata.namespace";
}
];
ports = {
"pd-server".containerPort = values.pd.service.client_port;
"pd-server".containerPort = values.pd.service.port;
"peer".containerPort = values.pd.service.peer_port;
};
command = [
"/bin/sh"
"-ec"
"-ecx"
''
HOSTNAME=$(hostname)
PEERS=""
for i in $(seq 0 $((''${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="''${PEERS}''${PEERS:+,}''${SET_NAME}-''${i}=http://''${SET_NAME}-''${i}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port}"
for i in $(seq 0 $((${builtins.toString values.pd.replicaCount} - 1))); do
PEERS="''${PEERS}''${PEERS:+,}''${HEADLESS_SERVICE_NAME}-''${i}=http://''${HEADLESS_SERVICE_NAME}-''${i}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.peer_port}"
done
/pd-server --name=''${HOSTNAME} \
--client-urls=http://0.0.0.0:${builtins.toString values.pd.service.client_port} \
--client-urls=http://0.0.0.0:${builtins.toString values.pd.service.port} \
--advertise-client-urls=http://''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.port} \
--peer-urls=http://0.0.0.0:${builtins.toString values.pd.service.peer_port} \
--advertise-client-urls=http://$(MY_POD_IP):${builtins.toString values.pd.service.client_port} \
--advertise-peer-urls=http://''${HOSTNAME}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port} \
--advertise-peer-urls=http://''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.pd.service.peer_port} \
--data-dir /var/lib/pd \
--initial-cluster ''${PEERS}
''
];
volumeMounts."data" = {
name = "${values.uniqueName}-pd-data";
mountPath = "/var/lib/pd";
};
};
};
};
volumeClaimTemplates = [
{
metadata.name = "${values.uniqueName}-pd-data";
spec = {
accessModes = ["ReadWriteOnce"];
resources.requests.storage = values.pd.storage;
};
}
];
};
/*
TiKV
*/
statefulSets."${values.uniqueName}".spec = {
replicas = values.tikv.replicaCount;
selector.matchLabels.name = "${values.uniqueName}";
serviceName = "${values.uniqueName}";
serviceName = "${values.uniqueName}-peer";
updateStrategy.type = "RollingUpdate";
podManagementPolicy = "Parallel";
template = {
metadata.labels.name = "${values.uniqueName}";
spec = {
initContainers."check-pd-port" = {
image = "busybox";
command = ["sh" "-c" "echo STATUS nc -w 1 ${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}"];
metadata.labels = rec {
name = "${values.uniqueName}";
app = name;
};
spec = {
containers."tikv" = {
image = "${values.tikv.image.repository}:${values.tikv.image.tag}";
imagePullPolicy = values.tikv.image.pullPolicy;
env = [
{
name = "MY_POD_IP";
valueFrom.fieldRef.fieldPath = "status.podIP";
name = "HEADLESS_SERVICE_NAME";
value = "${values.uniqueName}-peer";
}
{
name = "NAMESPACE";
valueFrom.fieldRef.fieldPath = "metadata.namespace";
}
];
ports."client".containerPort = values.tikv.service.client_port;
ports."server".containerPort = values.tikv.service.port;
command = [
"/bin/sh"
"-ecx"
''
/tikv-server \
--addr="0.0.0.0:${builtins.toString values.tikv.service.client_port}" \
--advertise-addr="$(MY_POD_IP):${builtins.toString values.tikv.service.client_port}" \
--data-dir="/data/tikv" \
--pd="${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}"
--addr="0.0.0.0:${builtins.toString values.tikv.service.port}" \
--advertise-addr="''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.tikv.service.port}" \
--status-addr=0.0.0.0:${builtins.toString values.tikv.service.status_port} \
--advertise-status-addr="''${HOSTNAME}.''${HEADLESS_SERVICE_NAME}.''${NAMESPACE}.svc:${builtins.toString values.tikv.service.status_port}" \
--data-dir="/var/lib/tikv" \
--capacity=0 \
--config=/etc/tikv/tikv.toml \
--pd="http://${values.uniqueName}-pd:${builtins.toString values.pd.service.port}"
''
];
volumeMounts."data" = {
volumeMounts = {
"data" = {
name = "${values.uniqueName}-data";
mountPath = "/data";
mountPath = "/var/lib/tikv";
};
"config" = {
name = "config";
mountPath = "/etc/tikv";
readOnly = true;
};
};
# TODO: liveness and readiness probes
};
volumes."config".configMap = {
defaultMode = 420;
items = [
{
key = "tikv.toml";
path = "tikv.toml";
}
];
name = "${values.uniqueName}-config";
};
};
};
volumeClaimTemplates = [

View file

@ -19,7 +19,7 @@ with lib; {
};
tag = mkOption {
type = types.str;
default = "latest";
default = "v7.1.0";
};
pullPolicy = mkOption {
type = types.str;
@ -31,7 +31,7 @@ with lib; {
type = types.int;
default = 2380;
};
client_port = mkOption {
port = mkOption {
type = types.int;
default = 2379;
};
@ -40,6 +40,10 @@ with lib; {
default = "ClusterIP";
};
};
storage = mkOption {
type = types.str;
default = "5G";
};
};
tikv = utils.mkNestedOption {
replicaCount = mkOption {
@ -53,7 +57,7 @@ with lib; {
};
tag = mkOption {
type = types.str;
default = "latest";
default = "v7.1.0";
};
pullPolicy = mkOption {
type = types.str;
@ -61,10 +65,14 @@ with lib; {
};
};
service = utils.mkNestedOption {
client_port = mkOption {
port = mkOption {
type = types.int;
default = 20160;
};
status_port = mkOption {
type = types.int;
default = 20180;
};
type = mkOption {
type = types.str;
default = "ClusterIP";
@ -74,6 +82,14 @@ with lib; {
type = types.str;
default = "5G";
};
config = mkOption {
type = types.attrs;
default = {
raftdb.max-open-files = 256;
rocksdb.max-open-files = 256;
storage.reserve-space = "0MB";
};
};
};
# internal