feat: add tikv

This commit is contained in:
technofab 2024-04-07 11:27:30 +00:00
parent 0872fd859f
commit cfe321c0ab
5 changed files with 223 additions and 0 deletions

View file

@ -24,6 +24,7 @@
mosquitto = utils.mkNixlet ./nixlets/mosquitto; mosquitto = utils.mkNixlet ./nixlets/mosquitto;
attic = utils.mkNixlet ./nixlets/attic; attic = utils.mkNixlet ./nixlets/attic;
postgres = utils.mkNixlet ./nixlets/postgres; postgres = utils.mkNixlet ./nixlets/postgres;
tikv = utils.mkNixlet ./nixlets/tikv;
}; };
}; };
perSystem = { perSystem = {

6
nixlets/tikv/default.nix Normal file
View file

@ -0,0 +1,6 @@
{...}: {
imports = [
./statefulSet.nix
./service.nix
];
}

22
nixlets/tikv/service.nix Normal file
View file

@ -0,0 +1,22 @@
{values, ...}: {
kubernetes.resources = {
services."${values.uniqueName}-pd" = {
metadata.annotations."service.alpha.kubernetes.io/tolerate-unready-endpoints" = "true";
spec = {
selector.app = "${values.uniqueName}-pd";
ports = [
{
name = "pd-server";
port = values.pd.service.client_port;
}
{
name = "peer";
port = values.pd.service.peer_port;
}
];
type = values.pd.service.type;
clusterIP = "None";
};
};
};
}

View file

@ -0,0 +1,108 @@
{values, ...}: {
kubernetes.resources = {
statefulSets."${values.uniqueName}-pd".spec = {
replicas = values.pd.replicaCount;
selector.matchLabels.name = "${values.uniqueName}-pd";
serviceName = "${values.uniqueName}-pd";
updateStrategy.type = "RollingUpdate";
template = {
metadata.labels.name = "${values.uniqueName}-pd";
spec = {
containers."pd" = {
image = "${values.pd.image.repository}:${values.pd.image.tag}";
imagePullPolicy = values.pd.image.pullPolicy;
env = [
{
name = "INITIAL_CLUSTER_SIZE";
value = "${builtins.toString values.pd.replicaCount}";
}
{
name = "SET_NAME";
value = "${values.uniqueName}-pd";
}
{
name = "MY_POD_IP";
valueFrom.fieldRef.fieldPath = "status.podIP";
}
];
ports = {
"pd-server".containerPort = values.pd.service.client_port;
"peer".containerPort = values.pd.service.peer_port;
};
command = [
"/bin/sh"
"-ec"
''
HOSTNAME=$(hostname)
PEERS=""
for i in $(seq 0 $((''${INITIAL_CLUSTER_SIZE} - 1))); do
PEERS="''${PEERS}''${PEERS:+,}''${SET_NAME}-''${i}=http://''${SET_NAME}-''${i}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port}"
done
/pd-server --name=''${HOSTNAME} \
--client-urls=http://0.0.0.0:${builtins.toString values.pd.service.client_port} \
--peer-urls=http://0.0.0.0:${builtins.toString values.pd.service.peer_port} \
--advertise-client-urls=http://$(MY_POD_IP):${builtins.toString values.pd.service.client_port} \
--advertise-peer-urls=http://''${HOSTNAME}.''${SET_NAME}:${builtins.toString values.pd.service.peer_port} \
--initial-cluster ''${PEERS}
''
];
};
};
};
};
statefulSets."${values.uniqueName}".spec = {
replicas = values.tikv.replicaCount;
selector.matchLabels.name = "${values.uniqueName}";
serviceName = "${values.uniqueName}";
updateStrategy.type = "RollingUpdate";
template = {
metadata.labels.name = "${values.uniqueName}";
spec = {
initContainers."check-pd-port" = {
image = "busybox";
command = ["sh" "-c" "echo STATUS nc -w 1 ${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}"];
};
containers."tikv" = {
image = "${values.tikv.image.repository}:${values.tikv.image.tag}";
imagePullPolicy = values.tikv.image.pullPolicy;
env = [
{
name = "MY_POD_IP";
valueFrom.fieldRef.fieldPath = "status.podIP";
}
];
ports."client".containerPort = values.tikv.service.client_port;
command = [
"/bin/sh"
"-ecx"
''
/tikv-server \
--addr="0.0.0.0:${builtins.toString values.tikv.service.client_port}" \
--advertise-addr="$(MY_POD_IP):${builtins.toString values.tikv.service.client_port}" \
--data-dir="/data/tikv" \
--pd="${values.uniqueName}-pd:${builtins.toString values.pd.service.client_port}"
''
];
volumeMounts."data" = {
name = "${values.uniqueName}-data";
mountPath = "/data";
};
# TODO: liveness and readiness probes
};
};
};
volumeClaimTemplates = [
{
metadata.name = "${values.uniqueName}-data";
spec = {
accessModes = ["ReadWriteOnce"];
resources.requests.storage = values.tikv.storage;
};
}
];
};
};
}

86
nixlets/tikv/values.nix Normal file
View file

@ -0,0 +1,86 @@
{
lib,
utils,
project,
...
}:
with lib; {
# for some basic values see https://github.com/helm/examples/blob/4888ba8fb8180dd0c36d1e84c1fcafc6efd81532/charts/hello-world/values.yaml
options = {
pd = utils.mkNestedOption {
replicaCount = mkOption {
type = types.int;
default = 3;
};
image = utils.mkNestedOption {
repository = mkOption {
type = types.str;
default = "pingcap/pd";
};
tag = mkOption {
type = types.str;
default = "latest";
};
pullPolicy = mkOption {
type = types.str;
default = "IfNotPresent";
};
};
service = utils.mkNestedOption {
peer_port = mkOption {
type = types.int;
default = 2380;
};
client_port = mkOption {
type = types.int;
default = 2379;
};
type = mkOption {
type = types.str;
default = "ClusterIP";
};
};
};
tikv = utils.mkNestedOption {
replicaCount = mkOption {
type = types.int;
default = 3;
};
image = utils.mkNestedOption {
repository = mkOption {
type = types.str;
default = "pingcap/tikv";
};
tag = mkOption {
type = types.str;
default = "latest";
};
pullPolicy = mkOption {
type = types.str;
default = "IfNotPresent";
};
};
service = utils.mkNestedOption {
client_port = mkOption {
type = types.int;
default = 20160;
};
type = mkOption {
type = types.str;
default = "ClusterIP";
};
};
storage = mkOption {
type = types.str;
default = "5G";
};
};
# internal
uniqueName = mkOption {
internal = true;
type = types.str;
default = "${project}-tikv";
};
};
}