add logging to the operator
Some checks failed
Build images / Run e2e tests (push) Failing after 1m57s
Build images / Run lint test (push) Failing after 7m27s
Lint / Run on Ubuntu (push) Failing after 6m47s
Build images / Run unit test (push) Failing after 8m53s
Build images / Build docker image (push) Has been skipped
E2E Tests / Run on Ubuntu (push) Failing after 1m50s
Tests / Run on Ubuntu (push) Failing after 3m1s

add logging
This commit is contained in:
Chris Richardson
2025-07-02 12:48:45 -04:00
parent d9a945efd4
commit 4d9663fe60
7 changed files with 719 additions and 31 deletions

View File

@@ -24,12 +24,89 @@ import (
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// LocustClusterSpec defines the desired state of LocustCluster.
type LocustClusterSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
// Important: Run "make" to regenerate code after modifying this file
// Foo is an example field of LocustCluster. Edit locustcluster_types.go to remove/update
Foo string `json:"foo,omitempty"`
type ResourcesSpec struct {
MaxCpu string `json:"maxCpu,omitempty" protobuf:"bytes,1,name=maxCpu"`
MinCpu string `json:"minCpu,omitempty" protobuf:"bytes,2,name=minCpu"`
MaxMem string `json:"maxMem,omitempty" protobuf:"bytes,3,name=maxMem"`
MinMem string `json:"minMem,omitempty" protobuf:"bytes,4,name=minMem"`
}
type LeaderSpec struct {
TestFile string `json:"leaderFile,omitempty" protobuf:"bytes,1,name=leaderFile"`
PriorityClass string `json:"priorityClass,omitempty" protobuf:"bytes,2,name=priorityClass"`
Download LeaderDownloadSpec `json:"download,omitempty" protobuf:"bytes,3,rep,name=download"`
Resources ResourcesSpec `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"`
Image string `json:"image,omitempty" protobuf:"bytes,5,name=image"`
}
type LeaderDownloadSpec struct {
Bucket string `json:"bucket" protobuf:"bytes,1,name=bucket"`
Filename string `json:"filename" protobuf:"bytes,2,name=filename"`
}
type WorkerEnv struct {
Name string `json:"name" protobuf:"bytes,1,name=name"`
Value string `json:"value" protobuf:"bytes,2,name=value"`
}
type TestsSpecs struct {
TestName string `json:"testName" protobuf:"bytes,1,name=testName"`
RunTime string `json:"runTime" protobuf:"bytes,2,name=runTime"`
TestSettings []WorkerEnv `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"`
TestCommand []string `json:"testCommand,omitempty" protobuf:"bytes,4,name=testCommand"`
}
type WorkerSpec struct {
HatchRate int `json:"hatchRate" protobuf:"varint,1,name=hatchRate"`
TestContainer string `json:"testContainer" protobuf:"bytes,2,name=testContainer"`
ScenarioName string `json:"scenarioName" protobuf:"bytes,3,name=scenarioName"`
TestLanguage string `json:"testLanguage" protobuf:"bytes,4,name=testLanguage"`
TestPath string `json:"testPath" protobuf:"bytes,5,name=testPath"`
Tests []TestsSpecs `json:"tests" protobuf:"bytes,6,rep,name=tests"`
Clients int `json:"clients" protobuf:"varint,7,name=clients"`
Nodes int `json:"nodes" protobuf:"varint,8,name=nodes"`
PriorityClass string `json:"priorityClass,omitempty" protobuf:"bytes,9,name=priorityClass"`
Resources ResourcesSpec `json:"resources,omitempty" protobuf:"bytes,10,rep,name=resources"`
}
type MosaicSpec struct {
WorkSpace string `json:"workSpace" protobuf:"bytes,1,name=workSpace"`
NameSpace string `json:"nameSpace" protobuf:"bytes,2,name=nameSpace"`
PublishingEndpoint string `json:"publishing_endpoint,omitempty" protobuf:"bytes,3,name=publishingEndpoint"`
Mtls bool `json:"mtls,omitempty" protobuf:"bool,4,name=mtls"`
PrometheusTag string `json:"prometheusTag" protobuf:"bytes,3,name=prometheusTag"`
GroupEmail string `json:"groupEmail" protobuf:"bytes,4,name=groupEmail"`
GrafanaEndpoint string `json:"grafanaEndpoint,omitempty" protobuf:"bytes,5,name=grafanaEndpoint"`
}
type MetricsSpec struct {
Mosaic MosaicSpec `json:"mosaic,omitempty" protobuf:"bytes,1,rep,name=mosaic"`
}
type InfrastructureSpec struct {
Endpoint string `json:"endpoint" protobuf:"bytes,1,name=endpoint"`
PrometheusJobName string `json:"prometheusJobName,omitempty" protobuf:"bytes,2,name=prometheusJobName"`
PrometheusPort int `json:"prometheusPort,omitempty" protobuf:"varint,3,name=prometheusPort"`
PrometheusEndpoint string `json:"prometheusEndpoint,omitempty" protobuf:"bytes,4,name=prometheusEndpoint"`
}
type SecretsKeys struct {
SecretKey string `json:"secretKey" protobuf:"bytes,1,name=secretKey"`
}
type SecretsSpec struct {
SecretName string `json:"secretName" protobuf:"bytes,1,name=secretName"`
SecretsKeys []SecretsKeys `json:"secretKeys" protobuf:"bytes,2,name=secretKeys"`
}
// LocustClusterSpec defines the desired state of LocustCluster
type LocustClusterSpec struct {
Leader LeaderSpec `json:"leader,omitempty" protobuf:"bytes,1,rep,name=leader"`
Worker WorkerSpec `json:"worker" protobuf:"bytes,2,rep,name=worker"`
Metrics MetricsSpec `json:"metrics" protobuf:"bytes,3,rep,name=metrics"`
Infrastructure []InfrastructureSpec `json:"infrastructure,omitempty" protobuf:"bytes,4,rep,name=infrastructure"`
Secrets []SecretsSpec `json:"secrets,omitempty" protobuf:"bytes,5,rep,name=secrets"`
}
// LocustClusterStatus defines the observed state of LocustCluster.

View File

@@ -24,12 +24,59 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *InfrastructureSpec) DeepCopyInto(out *InfrastructureSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InfrastructureSpec.
func (in *InfrastructureSpec) DeepCopy() *InfrastructureSpec {
if in == nil {
return nil
}
out := new(InfrastructureSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaderDownloadSpec) DeepCopyInto(out *LeaderDownloadSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderDownloadSpec.
func (in *LeaderDownloadSpec) DeepCopy() *LeaderDownloadSpec {
if in == nil {
return nil
}
out := new(LeaderDownloadSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LeaderSpec) DeepCopyInto(out *LeaderSpec) {
*out = *in
out.Download = in.Download
out.Resources = in.Resources
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaderSpec.
func (in *LeaderSpec) DeepCopy() *LeaderSpec {
if in == nil {
return nil
}
out := new(LeaderSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocustCluster) DeepCopyInto(out *LocustCluster) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
out.Spec = in.Spec
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
}
@@ -86,6 +133,21 @@ func (in *LocustClusterList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *LocustClusterSpec) DeepCopyInto(out *LocustClusterSpec) {
*out = *in
out.Leader = in.Leader
in.Worker.DeepCopyInto(&out.Worker)
out.Metrics = in.Metrics
if in.Infrastructure != nil {
in, out := &in.Infrastructure, &out.Infrastructure
*out = make([]InfrastructureSpec, len(*in))
copy(*out, *in)
}
if in.Secrets != nil {
in, out := &in.Secrets, &out.Secrets
*out = make([]SecretsSpec, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocustClusterSpec.
@@ -112,3 +174,147 @@ func (in *LocustClusterStatus) DeepCopy() *LocustClusterStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MetricsSpec) DeepCopyInto(out *MetricsSpec) {
*out = *in
out.Mosaic = in.Mosaic
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricsSpec.
func (in *MetricsSpec) DeepCopy() *MetricsSpec {
if in == nil {
return nil
}
out := new(MetricsSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MosaicSpec) DeepCopyInto(out *MosaicSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MosaicSpec.
func (in *MosaicSpec) DeepCopy() *MosaicSpec {
if in == nil {
return nil
}
out := new(MosaicSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourcesSpec) DeepCopyInto(out *ResourcesSpec) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesSpec.
func (in *ResourcesSpec) DeepCopy() *ResourcesSpec {
if in == nil {
return nil
}
out := new(ResourcesSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretsKeys) DeepCopyInto(out *SecretsKeys) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsKeys.
func (in *SecretsKeys) DeepCopy() *SecretsKeys {
if in == nil {
return nil
}
out := new(SecretsKeys)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *SecretsSpec) DeepCopyInto(out *SecretsSpec) {
*out = *in
if in.SecretsKeys != nil {
in, out := &in.SecretsKeys, &out.SecretsKeys
*out = make([]SecretsKeys, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretsSpec.
func (in *SecretsSpec) DeepCopy() *SecretsSpec {
if in == nil {
return nil
}
out := new(SecretsSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *TestsSpecs) DeepCopyInto(out *TestsSpecs) {
*out = *in
if in.TestSettings != nil {
in, out := &in.TestSettings, &out.TestSettings
*out = make([]WorkerEnv, len(*in))
copy(*out, *in)
}
if in.TestCommand != nil {
in, out := &in.TestCommand, &out.TestCommand
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestsSpecs.
func (in *TestsSpecs) DeepCopy() *TestsSpecs {
if in == nil {
return nil
}
out := new(TestsSpecs)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkerEnv) DeepCopyInto(out *WorkerEnv) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerEnv.
func (in *WorkerEnv) DeepCopy() *WorkerEnv {
if in == nil {
return nil
}
out := new(WorkerEnv)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *WorkerSpec) DeepCopyInto(out *WorkerSpec) {
*out = *in
if in.Tests != nil {
in, out := &in.Tests, &out.Tests
*out = make([]TestsSpecs, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
out.Resources = in.Resources
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkerSpec.
func (in *WorkerSpec) DeepCopy() *WorkerSpec {
if in == nil {
return nil
}
out := new(WorkerSpec)
in.DeepCopyInto(out)
return out
}

43
cmd/logging.go Normal file
View File

@@ -0,0 +1,43 @@
package main
import (
"log"
"os"
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"gopkg.in/natefinch/lumberjack.v2"
)
// CreateFileLogger Create a logger that will write to console, and file along with log rotation
func CreateFileLogger(fileName string, logLevel zapcore.LevelEnabler) logr.Logger {
// Create file with desired permissions
file, err := os.OpenFile(fileName, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
log.Fatal(err)
}
defer file.Close()
config := zap.NewProductionEncoderConfig()
config.EncodeTime = zapcore.ISO8601TimeEncoder
fileEncoder := zapcore.NewJSONEncoder(config)
consoleEncoder := zapcore.NewConsoleEncoder(config)
writer := zapcore.AddSync(&lumberjack.Logger{
Filename: fileName,
MaxSize: 1000, //MB
MaxBackups: 3,
MaxAge: 90, //days
Compress: false,
})
core := zapcore.NewTee(
zapcore.NewCore(fileEncoder, writer, logLevel),
zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), logLevel),
)
logger := zap.New(core, zap.AddCaller(), zap.AddStacktrace(zapcore.ErrorLevel))
return zapr.NewLogger(logger)
}

View File

@@ -24,15 +24,15 @@ import (
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"go.uber.org/zap"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/certwatcher"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
//"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/metrics/filters"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
@@ -56,6 +56,7 @@ func init() {
// nolint:gocyclo
func main() {
var logFilePath string
var metricsAddr string
var metricsCertPath, metricsCertName, metricsCertKey string
var webhookCertPath, webhookCertName, webhookCertKey string
@@ -81,13 +82,19 @@ func main() {
flag.StringVar(&metricsCertKey, "metrics-cert-key", "tls.key", "The name of the metrics server key file.")
flag.BoolVar(&enableHTTP2, "enable-http2", false,
"If set, HTTP/2 will be enabled for the metrics and webhook servers")
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
flag.StringVar(&logFilePath, "log-file", "/var/log/etcd-operator", "The log file to write logs to.")
/*
opts := zap.Options{
Development: true,
}
opts.BindFlags(flag.CommandLine)
*/
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
logger := CreateFileLogger(logFilePath, zap.DebugLevel)
//ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
ctrl.SetLogger(logger)
// if the enable-http2 flag is false (the default), http/2 should be disabled
// due to its vulnerabilities. More specifically, disabling http/2 will

View File

@@ -37,12 +37,167 @@ spec:
metadata:
type: object
spec:
description: LocustClusterSpec defines the desired state of LocustCluster.
description: LocustClusterSpec defines the desired state of LocustCluster
properties:
foo:
description: Foo is an example field of LocustCluster. Edit locustcluster_types.go
to remove/update
type: string
infrastructure:
items:
properties:
endpoint:
type: string
prometheusEndpoint:
type: string
prometheusJobName:
type: string
prometheusPort:
type: integer
required:
- endpoint
type: object
type: array
leader:
properties:
download:
properties:
bucket:
type: string
filename:
type: string
required:
- bucket
- filename
type: object
image:
type: string
leaderFile:
type: string
priorityClass:
type: string
resources:
properties:
maxCpu:
type: string
maxMem:
type: string
minCpu:
type: string
minMem:
type: string
type: object
type: object
metrics:
properties:
mosaic:
properties:
grafanaEndpoint:
type: string
groupEmail:
type: string
mtls:
type: boolean
nameSpace:
type: string
prometheusTag:
type: string
publishing_endpoint:
type: string
workSpace:
type: string
required:
- groupEmail
- nameSpace
- prometheusTag
- workSpace
type: object
type: object
secrets:
items:
properties:
secretKeys:
items:
properties:
secretKey:
type: string
required:
- secretKey
type: object
type: array
secretName:
type: string
required:
- secretKeys
- secretName
type: object
type: array
worker:
properties:
clients:
type: integer
hatchRate:
type: integer
nodes:
type: integer
priorityClass:
type: string
resources:
properties:
maxCpu:
type: string
maxMem:
type: string
minCpu:
type: string
minMem:
type: string
type: object
scenarioName:
type: string
testContainer:
type: string
testLanguage:
type: string
testPath:
type: string
tests:
items:
properties:
env:
items:
properties:
name:
type: string
value:
type: string
required:
- name
- value
type: object
type: array
runTime:
type: string
testCommand:
items:
type: string
type: array
testName:
type: string
required:
- runTime
- testName
type: object
type: array
required:
- clients
- hatchRate
- nodes
- scenarioName
- testContainer
- testLanguage
- testPath
- tests
type: object
required:
- metrics
- worker
type: object
status:
description: LocustClusterStatus defines the observed state of LocustCluster.

View File

@@ -30,15 +30,3 @@ rules:
- get
- patch
- update
- apiGroups:
- ""
resources:
- pods
- services
- endpoints
- persistentvolumeclaims
- events
- configmaps
- secrets
verbs:
- '*'

View File

@@ -2,6 +2,9 @@ package controller
import (
"context"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
locustCluster "git.lilpenguins.com/crichardson/locust-operator/api/v1alpha1"
"github.com/go-logr/logr"
@@ -17,3 +20,212 @@ func (r *LocustClusterReconciler) CreateLeaderService(log logr.Logger, locustClu
log.Info("Creating service for leader")
return nil
}
func (r *LocustClusterReconciler) BuildLeaderJob(log logr.Logger, locustCluster *locustCluster.LocustCluster) {
ttlSecondsAfterFinished := int32(60)
var envVars []v1.EnvVar
cpuMax := LeaderMaxCPU
cpuMin := LeaderMinCPU
memMax := LeaderMaxMemory
memMin := LeaderMinMemory
if len(locustCluster.Spec.Leader.Resources.MaxCpu) > 0 {
cpuMax = locustCluster.Spec.Leader.Resources.MaxCpu
}
if len(locustCluster.Spec.Leader.Resources.MinCpu) > 0 {
cpuMin = locustCluster.Spec.Leader.Resources.MinCpu
}
if len(locustCluster.Spec.Leader.Resources.MaxMem) > 0 {
memMax = locustCluster.Spec.Leader.Resources.MaxMem
}
if len(locustCluster.Spec.Leader.Resources.MinMem) > 0 {
memMin = locustCluster.Spec.Leader.Resources.MinMem
}
// Set testFile if not in request
testFile := LeaderTestFile
if len(locustCluster.Spec.Leader.TestFile) > 0 {
testFile = locustCluster.Spec.Leader.TestFile
}
scenarioEnv := v1.EnvVar{
Name: "LOCUST_SCENARIO_NAME",
Value: locustCluster.Spec.Worker.ScenarioName,
}
envVars = append(envVars, scenarioEnv)
if isMosaicDefined(locustCluster) {
mosaicEnv := v1.EnvVar{
Name: "LOCUST_MOSAIC_API_KEY",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "mosaic-api-key"},
Key: "api_key",
},
},
}
mosaicTag := v1.EnvVar{
Name: "LOCUST_PROMETHEUS_TAG",
Value: locustCluster.Spec.Metrics.Mosaic.PrometheusTag,
ValueFrom: nil,
}
grafanaEndpoint := GrafanaEndpoint
if len(locustCluster.Spec.Metrics.Mosaic.GrafanaEndpoint) > 0 {
grafanaEndpoint = locustCluster.Spec.Metrics.Mosaic.GrafanaEndpoint
}
grafanaEndpointEnv := v1.EnvVar{
Name: "LOCUST_GRAFANA_ENDPOINT",
Value: grafanaEndpoint,
}
groupEmail := v1.EnvVar{
Name: "LOCUST_GROUP_EMAIL",
Value: locustCluster.Spec.Metrics.Mosaic.GroupEmail,
}
envVars = append(envVars, mosaicEnv, mosaicTag, grafanaEndpointEnv, groupEmail)
}
// Add endpoint env vars to container env vars if it exists.
if len(endpointsEnvVars) != 0 {
envVars = append(envVars, endpointsEnvVars...)
}
// Add secrets env vars to container if specified in locust spec
if len(locustCluster.Spec.Secrets) > 0 {
secretVars := r.BuildSecretsEnvVars(locustCluster)
envVars = append(envVars, secretVars...)
}
if len(locustCluster.Spec.Leader.Download.Bucket) > 0 &&
len(locustCluster.Spec.Leader.Download.Filename) > 0 {
bucketEnv := v1.EnvVar{
Name: "LOCUST_S3_BUCKET",
Value: locustCluster.Spec.Leader.Download.Bucket,
}
fileEnv := v1.EnvVar{
Name: "LOCUST_S3_FILENAME",
Value: locustCluster.Spec.Leader.Download.Filename,
}
awsKey := v1.EnvVar{
Name: "AWS_ACCESS_KEY_ID",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "loadtest-blob"},
Key: "blob_key",
},
},
}
awsSecret := v1.EnvVar{
Name: "AWS_SECRET_ACCESS_KEY",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{Name: "loadtest-blob"},
Key: "blob_secret",
},
},
}
envVars = append(envVars, bucketEnv, fileEnv, awsKey, awsSecret)
}
// Allow user to define the leader docker image to use from CRD
leaderImage := LeaderContainer
if locustCluster.Spec.Leader.Image != "" {
leaderImage = locustCluster.Spec.Leader.Image
}
envSource, err := r.BuildTestRunEnvVars(ctx, locustCluster)
if err != nil {
return nil, err
}
podSpec := v1.PodSpec{
RestartPolicy: "Never",
Containers: []v1.Container{
v1.Container{
Name: locustCluster.Name + "-leader",
Image: leaderImage,
Command: []string{"locust", "-f", testFile, "--master"},
Ports: []v1.ContainerPort{
v1.ContainerPort{
Name: LeaderPortNameWeb,
ContainerPort: LeaderPortWeb,
Protocol: LeaderPortProtocol,
},
v1.ContainerPort{
Name: LeaderPortNameLocust1,
ContainerPort: LeaderPortListener1,
Protocol: LeaderPortProtocol,
},
v1.ContainerPort{
Name: LeaderPortNameLocust2,
ContainerPort: LeaderPortListener2,
Protocol: LeaderPortProtocol,
},
},
Env: envVars,
EnvFrom: envSource,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpuMax),
v1.ResourceMemory: resource.MustParse(memMax),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpuMin),
v1.ResourceMemory: resource.MustParse(memMin),
},
},
ImagePullPolicy: "Always",
},
},
}
// Look to see if there is a config map <locust name>-settings if so mount it so we can pull the data to be used to attach it to the report email.
settingsConfigMap := &v1.ConfigMap{}
endpointKey := kclient.ObjectKey{
Name: locustCluster.Name + "-settings",
Namespace: locustCluster.Namespace,
}
err = r.Get(ctx, endpointKey, settingsConfigMap)
if err == nil {
// Add the configMap as a volume to the pod
podSpec.Volumes = []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{Name: locustCluster.Name + "-settings"},
Items: nil,
DefaultMode: nil,
Optional: nil,
},
},
},
}
// Mount the volume to the container
podSpec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: "config",
MountPath: LeaderBuildParamConfig,
},
}
}
// Set priority class name to p4 if unspecified else set it
if len(locustCluster.Spec.Leader.PriorityClass) > 0 {
podSpec.PriorityClassName = locustCluster.Spec.Leader.PriorityClass
}
return &batch.Job{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: BuildObjectMeta(locustCluster, "leader"),
Spec: batch.JobSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: BuildPodObjectMeta(locustCluster, "leader"),
Spec: podSpec,
},
TTLSecondsAfterFinished: &ttlSecondsAfterFinished,
},
Status: batch.JobStatus{},
}, nil
}