Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 9 additions & 5 deletions images/router/haproxy/conf/haproxy-config.template
Original file line number Diff line number Diff line change
Expand Up @@ -570,6 +570,10 @@ backend openshift_default
passed through to the backend pod by just looking at the TCP headers.
*/}}
{{- range $cfgIdx, $cfg := .State }}

{{- /* Common configurations for any backend or server */}}
{{- $health_check_interval := clipHAProxyTimeoutValue (firstMatch $timeSpecPattern (index $cfg.Annotations "router.openshift.io/haproxy.health.check.interval") (env "ROUTER_BACKEND_CHECK_INTERVAL") "5000ms") }}

{{- if matchValues (print $cfg.TLSTermination) "" "edge" "reencrypt" }}

# Plain http backend or backend with TLS terminated at the edge or a
Expand Down Expand Up @@ -755,7 +759,7 @@ backend {{ genBackendNamePrefix $cfg.TLSTermination }}:{{ $cfgIdx }}
{{- end }}
{{- end }}{{/* end type specific options*/}}

{{- if and (not $endpoint.NoHealthCheck) (gt $cfg.ActiveEndpoints 1) }} check inter {{ clipHAProxyTimeoutValue (firstMatch $timeSpecPattern (index $cfg.Annotations "router.openshift.io/haproxy.health.check.interval") (env "ROUTER_BACKEND_CHECK_INTERVAL") "5000ms") }}
{{- if and (not $endpoint.NoHealthCheck) (gt $cfg.ActiveEndpoints 1) }} check inter {{ $health_check_interval }}
{{- end }}{{/* end else no health check */}}
{{- with $podMaxConn := index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections" }}
{{- if (isInteger (index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections")) }} maxconn {{$podMaxConn }} {{- end }}
Expand All @@ -770,7 +774,7 @@ backend {{ genBackendNamePrefix $cfg.TLSTermination }}:{{ $cfgIdx }}
{{- if (eq $cfg.TLSTermination "reencrypt") }}
dynamic-cookie-key {{ $cfg.RoutingKeyName }}
{{- range $idx, $serverName := $dynamicConfigManager.GenerateDynamicServerNames $cfgIdx }}
server {{ $serverName }} 172.4.0.4:8765 weight 0 ssl disabled check inter {{ clipHAProxyTimeoutValue (firstMatch $timeSpecPattern (index $cfg.Annotations "router.openshift.io/haproxy.health.check.interval") (env "ROUTER_BACKEND_CHECK_INTERVAL") "5000ms") }}
server {{ $serverName }} 172.4.0.4:8765 weight 0 ssl disabled check inter {{ $health_check_interval }}
{{- if gt (len (index $cfg.Certificates (printf "%s_pod" $cfg.Host)).Contents) 0 }} verify required ca-file {{ $workingDir }}/router/cacerts/{{$cfgIdx }}.pem
{{- else }}
{{- if not (isTrue $router_disable_http2) }} alpn h2,http/1.1
Expand All @@ -797,7 +801,7 @@ backend {{ genBackendNamePrefix $cfg.TLSTermination }}:{{ $cfgIdx }}
{{- with $name := $dynamicConfigManager.ServerTemplateName $cfgIdx }}
{{- with $size := $dynamicConfigManager.ServerTemplateSize $cfgIdx }}
dynamic-cookie-key {{ $cfg.RoutingKeyName }}
server-template {{ $name }}- 1-{{ $size }} 172.4.0.4:8765 check disabled
server-template {{ $name }}- 1-{{ $size }} 172.4.0.4:8765 check inter {{ $health_check_interval }} disabled
{{- end }}
{{- end }}
{{- end }}
Expand Down Expand Up @@ -851,7 +855,7 @@ backend {{ genBackendNamePrefix $cfg.TLSTermination }}:{{ $cfgIdx }}
{{- with $serviceUnit := index $.ServiceUnits $serviceUnitName }}
{{- range $idx, $endpoint := processEndpointsForAlias $cfg $serviceUnit (env "ROUTER_BACKEND_PROCESS_ENDPOINTS" "") }}
server {{ $endpoint.ID }} {{ $endpoint.IP }}:{{ $endpoint.Port }} weight {{ $weight }}
{{- if and (not $endpoint.NoHealthCheck) (gt $cfg.ActiveEndpoints 1) }} check inter {{ clipHAProxyTimeoutValue (firstMatch $timeSpecPattern (index $cfg.Annotations "router.openshift.io/haproxy.health.check.interval") (env "ROUTER_BACKEND_CHECK_INTERVAL") "5000ms") }}
{{- if and (not $endpoint.NoHealthCheck) (gt $cfg.ActiveEndpoints 1) }} check inter {{ $health_check_interval }}
{{- end }}{{/* end else no health check */}}
{{- with $podMaxConn := index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections" }}
{{- if (isInteger (index $cfg.Annotations "haproxy.router.openshift.io/pod-concurrent-connections")) }} maxconn {{$podMaxConn }} {{- end }}
Expand All @@ -866,7 +870,7 @@ backend {{ genBackendNamePrefix $cfg.TLSTermination }}:{{ $cfgIdx }}
{{- with $name := $dynamicConfigManager.ServerTemplateName $cfgIdx }}
{{- with $size := $dynamicConfigManager.ServerTemplateSize $cfgIdx }}
dynamic-cookie-key {{ $cfg.RoutingKeyName }}
server-template {{ $name }}- 1-{{ $size }} 172.4.0.4:8765 check disabled
server-template {{ $name }}- 1-{{ $size }} 172.4.0.4:8765 check inter {{ $health_check_interval }} disabled
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you consider adding unit test for this? I know unit testing for DCM is not comprehensive yet, but seems like it would fit in TestConfigTemplate in pkg/router/router_test.go:

		"server-template with default health check interval": {
			mustCreateWithConfig{
				mustCreateEndpointSlices: []mustCreateEndpointSlice{
					{
						name:        "servicest2",
						serviceName: "servicest2",
						addresses:   []string{"1.1.1.1"},
					},
				},
				mustCreateRoute: mustCreateRoute{
					name:              "st2",
					host:              "st2example.com",
					targetServiceName: "servicest2",
					weight:            1,
					time:              start,
				},
				mustMatchConfig: mustMatchConfig{
					section:     "backend",
					sectionName: insecureBackendName(h.namespace, "st2"),
					attribute:   "server-template",
					value:       "inter 5000ms", // Default value
				},
			},
		},
    "server-template with custom health check interval": {                                                                                                                                      
        mustCreateWithConfig{                                                                                                                                                                 
                mustCreateEndpointSlices: []mustCreateEndpointSlice{                                                                                                                          
                        {                                                                                                                                                                     
                                name:        "servicest1",                                                                                                                                    
                                serviceName: "servicest1",                                                                                                                                    
                                addresses:   []string{"1.1.1.1"},  // Single endpoint initially                                                                                               
                        },                                                                                                                                                                    
                },                                                                                                                                                                            
                mustCreateRoute: mustCreateRoute{                                                                                                                                             
                        name:              "st1",                                                                                                                                             
                        host:              "st1example.com",                                                                                                                                  
                        targetServiceName: "servicest1",                                                                                                                                      
                        weight:            1,                                                                                                                                                 
                        time:              start,                                                                                                                                             
                        annotations: map[string]string{                                                                                                                                       
                                "router.openshift.io/haproxy.health.check.interval": "15s",                                                                                                   
                        },                                                                                                                                                                    
                },                                                                                                                                                                            
                mustMatchConfig: mustMatchConfig{                                                                                                                                             
                        section:     "backend",                                                                                                                                               
                        sectionName: insecureBackendName(h.namespace, "st1"),                                                                                                                 
                        attribute:   "server-template",                                                                                                                                       
                        value:       "inter 15s",                                                                                                                                             
                },                                                                                                                                                                            
        },                                                                                                                                                                                    
  },

Then support for ServerTemplate would need to be added to matchConfig:

	case []haproxyconfparsertypes.ServerTemplate:
		if m.fullMatch {
			for _, a := range data {
				params := ""
				for _, p := range a.Params {
					params += " " + p.String()
				}
				fullValue := a.Prefix + " " + a.NumOrRange + " " + a.Fqdn + params
				if fullValue == m.value {
					contains = true
					break
				}
			}
		} else {
			for _, a := range data {
				for _, b := range a.Params {
					contains = contains || b.String() == m.value
				}
			}
		}

If you have unit testing planned in future work - feel free to ignore.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Your suggestion makes sense. Maybe things change in a way that the test is completely outdated, but the functionality is better covered until this happens.

Also, this was an opportunity to better understand how router tests the template, and this was a good experience, since I just used your first snippet, understood its intent, ran, it failed, I debugged, fixed it, and only after that I realized I just read half of your message. Btw I've just implemented the "not fullMatch" branch on my change. Learning every day 😅

{{- end }}
{{- end }}
{{- end }}
Expand Down
71 changes: 71 additions & 0 deletions pkg/router/router_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -931,6 +931,57 @@ func TestConfigTemplate(t *testing.T) {
},
},
},
"server-template with default health check interval": {
mustCreateWithConfig{
mustCreateEndpointSlices: []mustCreateEndpointSlice{
{
name: "servicest2",
serviceName: "servicest2",
addresses: []string{"1.1.1.1"},
},
},
mustCreateRoute: mustCreateRoute{
name: "st2",
host: "st2example.com",
targetServiceName: "servicest2",
weight: 1,
time: start,
},
mustMatchConfig: mustMatchConfig{
section: "backend",
sectionName: insecureBackendName(h.namespace, "st2"),
attribute: "server-template",
value: "inter 5000ms", // Default value
},
},
},
"server-template with custom health check interval": {
mustCreateWithConfig{
mustCreateEndpointSlices: []mustCreateEndpointSlice{
{
name: "servicest1",
serviceName: "servicest1",
addresses: []string{"1.1.1.1"}, // Single endpoint initially
},
},
mustCreateRoute: mustCreateRoute{
name: "st1",
host: "st1example.com",
targetServiceName: "servicest1",
weight: 1,
time: start,
annotations: map[string]string{
"router.openshift.io/haproxy.health.check.interval": "15s",
},
},
mustMatchConfig: mustMatchConfig{
section: "backend",
sectionName: insecureBackendName(h.namespace, "st1"),
attribute: "server-template",
value: "inter 15s",
},
},
},
}

defer cleanUpRoutes(t)
Expand Down Expand Up @@ -1227,6 +1278,26 @@ func matchConfig(m mustMatchConfig, parser haproxyconfparser.Parser) error {
}
}
}
case []haproxyconfparsertypes.ServerTemplate:
if m.fullMatch {
for _, a := range data {
params := ""
for _, p := range a.Params {
params += " " + p.String()
}
fullValue := a.Prefix + " " + a.NumOrRange + " " + a.Fqdn + params
if fullValue == m.value {
contains = true
break
}
}
} else {
for _, a := range data {
for _, b := range a.Params {
contains = contains || b.String() == m.value
}
}
}
}

if !contains && !m.notFound {
Expand Down
37 changes: 37 additions & 0 deletions pkg/router/template/configmanager/haproxy/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -489,6 +489,43 @@ func (cm *haproxyConfigManager) ReplaceRouteEndpoints(id templaterouter.ServiceA
return err
}

// Backends having only one server have their server's health check turned off; health check
// is only enabled when there are two or more endpoints in the backend.
//
// Currently we don't support enabling or disabling health checks, so we are:
//
// * Checking whether we are adding endpoints; and
// * Checking if the current state of the backend is just one static endpoint.
//
// If both of the above matches, we cannot dynamically update and return from here.
// This behavior should be improved via https://issues.redhat.com/browse/NE-2496
// using `del server` and `add server` API calls.
//
// This is the expected lay out from the running HAProxy in case there is only one static
// backend server:
//
// # be_id be_name srv_id srv_name srv_addr ... srv_port ...
// 20 be_http:default:route 1 pod:app-848554c7d4-mbhsf:app::10.128.0.78:8000 10.128.0.78 ... 8000 ...
// 20 be_http:default:route 2 _dynamic-pod-1 172.4.0.4 ... 8765 ...
// 20 be_http:default:route 3 _dynamic-pod-2 172.4.0.4 ... 8765 ...
// ... other "_dynamic-pod-NN"

// checking for more newEndpoints than oldEndpoints, if this happens we are adding new ones
if len(newEndpoints) > len(oldEndpoints) {
var staticCount int
// we cannot infer the first ones are the static, since this list is built from a hashmap,
// so the order is not deterministic. Need to iterate over all of them.
for _, s := range servers {
// if not a dynamic backend server, count as static
if !isDynamicBackendServer(s) {
staticCount++
}
}
if staticCount == 1 {
return fmt.Errorf("single endpoint on backend %s, need to reload to enable health check", backendName)
}
}

log.V(4).Info("processing endpoint changes", "deleted", deletedEndpoints, "modified", modifiedEndpoints)

// First process the deleted endpoints and update the servers we
Expand Down