add logging to the operator
Some checks failed
Build images / Run e2e tests (push) Failing after 1m57s
Build images / Run lint test (push) Failing after 7m27s
Lint / Run on Ubuntu (push) Failing after 6m47s
Build images / Run unit test (push) Failing after 8m53s
Build images / Build docker image (push) Has been skipped
E2E Tests / Run on Ubuntu (push) Failing after 1m50s
Tests / Run on Ubuntu (push) Failing after 3m1s
Some checks failed
Build images / Run e2e tests (push) Failing after 1m57s
Build images / Run lint test (push) Failing after 7m27s
Lint / Run on Ubuntu (push) Failing after 6m47s
Build images / Run unit test (push) Failing after 8m53s
Build images / Build docker image (push) Has been skipped
E2E Tests / Run on Ubuntu (push) Failing after 1m50s
Tests / Run on Ubuntu (push) Failing after 3m1s
add logging
This commit is contained in:
@@ -2,6 +2,9 @@ package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
locustCluster "git.lilpenguins.com/crichardson/locust-operator/api/v1alpha1"
|
||||
"github.com/go-logr/logr"
|
||||
@@ -17,3 +20,212 @@ func (r *LocustClusterReconciler) CreateLeaderService(log logr.Logger, locustClu
|
||||
log.Info("Creating service for leader")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *LocustClusterReconciler) BuildLeaderJob(log logr.Logger, locustCluster *locustCluster.LocustCluster) {
|
||||
|
||||
ttlSecondsAfterFinished := int32(60)
|
||||
|
||||
var envVars []v1.EnvVar
|
||||
|
||||
cpuMax := LeaderMaxCPU
|
||||
cpuMin := LeaderMinCPU
|
||||
memMax := LeaderMaxMemory
|
||||
memMin := LeaderMinMemory
|
||||
|
||||
if len(locustCluster.Spec.Leader.Resources.MaxCpu) > 0 {
|
||||
cpuMax = locustCluster.Spec.Leader.Resources.MaxCpu
|
||||
}
|
||||
if len(locustCluster.Spec.Leader.Resources.MinCpu) > 0 {
|
||||
cpuMin = locustCluster.Spec.Leader.Resources.MinCpu
|
||||
}
|
||||
if len(locustCluster.Spec.Leader.Resources.MaxMem) > 0 {
|
||||
memMax = locustCluster.Spec.Leader.Resources.MaxMem
|
||||
}
|
||||
if len(locustCluster.Spec.Leader.Resources.MinMem) > 0 {
|
||||
memMin = locustCluster.Spec.Leader.Resources.MinMem
|
||||
}
|
||||
|
||||
// Set testFile if not in request
|
||||
testFile := LeaderTestFile
|
||||
if len(locustCluster.Spec.Leader.TestFile) > 0 {
|
||||
testFile = locustCluster.Spec.Leader.TestFile
|
||||
}
|
||||
|
||||
scenarioEnv := v1.EnvVar{
|
||||
Name: "LOCUST_SCENARIO_NAME",
|
||||
Value: locustCluster.Spec.Worker.ScenarioName,
|
||||
}
|
||||
envVars = append(envVars, scenarioEnv)
|
||||
if isMosaicDefined(locustCluster) {
|
||||
mosaicEnv := v1.EnvVar{
|
||||
Name: "LOCUST_MOSAIC_API_KEY",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "mosaic-api-key"},
|
||||
Key: "api_key",
|
||||
},
|
||||
},
|
||||
}
|
||||
mosaicTag := v1.EnvVar{
|
||||
Name: "LOCUST_PROMETHEUS_TAG",
|
||||
Value: locustCluster.Spec.Metrics.Mosaic.PrometheusTag,
|
||||
ValueFrom: nil,
|
||||
}
|
||||
grafanaEndpoint := GrafanaEndpoint
|
||||
if len(locustCluster.Spec.Metrics.Mosaic.GrafanaEndpoint) > 0 {
|
||||
grafanaEndpoint = locustCluster.Spec.Metrics.Mosaic.GrafanaEndpoint
|
||||
}
|
||||
grafanaEndpointEnv := v1.EnvVar{
|
||||
Name: "LOCUST_GRAFANA_ENDPOINT",
|
||||
Value: grafanaEndpoint,
|
||||
}
|
||||
groupEmail := v1.EnvVar{
|
||||
Name: "LOCUST_GROUP_EMAIL",
|
||||
Value: locustCluster.Spec.Metrics.Mosaic.GroupEmail,
|
||||
}
|
||||
envVars = append(envVars, mosaicEnv, mosaicTag, grafanaEndpointEnv, groupEmail)
|
||||
}
|
||||
// Add endpoint env vars to container env vars if it exists.
|
||||
if len(endpointsEnvVars) != 0 {
|
||||
envVars = append(envVars, endpointsEnvVars...)
|
||||
}
|
||||
|
||||
// Add secrets env vars to container if specified in locust spec
|
||||
if len(locustCluster.Spec.Secrets) > 0 {
|
||||
secretVars := r.BuildSecretsEnvVars(locustCluster)
|
||||
envVars = append(envVars, secretVars...)
|
||||
}
|
||||
|
||||
if len(locustCluster.Spec.Leader.Download.Bucket) > 0 &&
|
||||
len(locustCluster.Spec.Leader.Download.Filename) > 0 {
|
||||
bucketEnv := v1.EnvVar{
|
||||
Name: "LOCUST_S3_BUCKET",
|
||||
Value: locustCluster.Spec.Leader.Download.Bucket,
|
||||
}
|
||||
fileEnv := v1.EnvVar{
|
||||
Name: "LOCUST_S3_FILENAME",
|
||||
Value: locustCluster.Spec.Leader.Download.Filename,
|
||||
}
|
||||
awsKey := v1.EnvVar{
|
||||
Name: "AWS_ACCESS_KEY_ID",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "loadtest-blob"},
|
||||
Key: "blob_key",
|
||||
},
|
||||
},
|
||||
}
|
||||
awsSecret := v1.EnvVar{
|
||||
Name: "AWS_SECRET_ACCESS_KEY",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: "loadtest-blob"},
|
||||
Key: "blob_secret",
|
||||
},
|
||||
},
|
||||
}
|
||||
envVars = append(envVars, bucketEnv, fileEnv, awsKey, awsSecret)
|
||||
}
|
||||
// Allow user to define the leader docker image to use from CRD
|
||||
leaderImage := LeaderContainer
|
||||
if locustCluster.Spec.Leader.Image != "" {
|
||||
leaderImage = locustCluster.Spec.Leader.Image
|
||||
}
|
||||
|
||||
envSource, err := r.BuildTestRunEnvVars(ctx, locustCluster)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podSpec := v1.PodSpec{
|
||||
RestartPolicy: "Never",
|
||||
Containers: []v1.Container{
|
||||
v1.Container{
|
||||
Name: locustCluster.Name + "-leader",
|
||||
Image: leaderImage,
|
||||
Command: []string{"locust", "-f", testFile, "--master"},
|
||||
Ports: []v1.ContainerPort{
|
||||
v1.ContainerPort{
|
||||
Name: LeaderPortNameWeb,
|
||||
ContainerPort: LeaderPortWeb,
|
||||
Protocol: LeaderPortProtocol,
|
||||
},
|
||||
v1.ContainerPort{
|
||||
Name: LeaderPortNameLocust1,
|
||||
ContainerPort: LeaderPortListener1,
|
||||
Protocol: LeaderPortProtocol,
|
||||
},
|
||||
v1.ContainerPort{
|
||||
Name: LeaderPortNameLocust2,
|
||||
ContainerPort: LeaderPortListener2,
|
||||
Protocol: LeaderPortProtocol,
|
||||
},
|
||||
},
|
||||
Env: envVars,
|
||||
EnvFrom: envSource,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(cpuMax),
|
||||
v1.ResourceMemory: resource.MustParse(memMax),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(cpuMin),
|
||||
v1.ResourceMemory: resource.MustParse(memMin),
|
||||
},
|
||||
},
|
||||
ImagePullPolicy: "Always",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Look to see if there is a config map <locust name>-settings if so mount it so we can pull the data to be used to attach it to the report email.
|
||||
settingsConfigMap := &v1.ConfigMap{}
|
||||
|
||||
endpointKey := kclient.ObjectKey{
|
||||
Name: locustCluster.Name + "-settings",
|
||||
Namespace: locustCluster.Namespace,
|
||||
}
|
||||
|
||||
err = r.Get(ctx, endpointKey, settingsConfigMap)
|
||||
if err == nil {
|
||||
// Add the configMap as a volume to the pod
|
||||
podSpec.Volumes = []v1.Volume{
|
||||
{
|
||||
Name: "config",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{Name: locustCluster.Name + "-settings"},
|
||||
Items: nil,
|
||||
DefaultMode: nil,
|
||||
Optional: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Mount the volume to the container
|
||||
podSpec.Containers[0].VolumeMounts = []v1.VolumeMount{
|
||||
{
|
||||
Name: "config",
|
||||
MountPath: LeaderBuildParamConfig,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Set priority class name to p4 if unspecified else set it
|
||||
if len(locustCluster.Spec.Leader.PriorityClass) > 0 {
|
||||
podSpec.PriorityClassName = locustCluster.Spec.Leader.PriorityClass
|
||||
}
|
||||
|
||||
return &batch.Job{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: BuildObjectMeta(locustCluster, "leader"),
|
||||
Spec: batch.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: BuildPodObjectMeta(locustCluster, "leader"),
|
||||
Spec: podSpec,
|
||||
},
|
||||
TTLSecondsAfterFinished: &ttlSecondsAfterFinished,
|
||||
},
|
||||
Status: batch.JobStatus{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user