Completed until Part 4
This commit is contained in:
parent
cfa74ce963
commit
845ae9500c
@ -1,63 +0,0 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: ingressgateway
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http
|
||||
protocol: HTTP
|
||||
hosts:
|
||||
- "*"
|
||||
# - "*.filterhome.xyz"
|
||||
# - "filterhome.xyz"
|
||||
#
|
||||
# - "filterhome.duckdns.org"
|
||||
# - "*.filterhome.duckdns.org"
|
||||
tls:
|
||||
httpsRedirect: true
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*.filterhome.xyz"
|
||||
- "filterhome.xyz"
|
||||
|
||||
- "filterhome.duckdns.org"
|
||||
- "*.filterhome.duckdns.org"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
credentialName: filterhome-domain-cert-public
|
||||
---
|
||||
#apiVersion: networking.istio.io/v1alpha3
|
||||
#kind: Gateway
|
||||
#metadata:
|
||||
# name: home-gateway
|
||||
#spec:
|
||||
# selector:
|
||||
# istio: ingressgateway
|
||||
# servers:
|
||||
# - port:
|
||||
# number: 80
|
||||
# name: http
|
||||
# protocol: HTTP
|
||||
# hosts:
|
||||
# - "*.filter.home"
|
||||
# - "filter.home"
|
||||
# # tls:
|
||||
# # httpsRedirect: true
|
||||
## - port:
|
||||
## number: 443
|
||||
## name: secure-http
|
||||
## protocol: HTTPS
|
||||
## hosts:
|
||||
## - "*.filter.home"
|
||||
## - "filter.home"
|
||||
## tls:
|
||||
## mode: SIMPLE
|
||||
## credentialName: filterhome-domain-cert-public
|
@ -0,0 +1,6 @@
|
||||
#apiVersion: v1
|
||||
#kind: Namespace
|
||||
#metadata:
|
||||
# name: istio-ingress
|
||||
# labels:
|
||||
# istio-injection: "enabled"
|
@ -0,0 +1,82 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: istio-public-ingress
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: istio-public-ingress
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
istio: public-ingress
|
||||
app: istio-public-ingress
|
||||
ports:
|
||||
- port: 80
|
||||
name: http
|
||||
- port: 443
|
||||
name: https
|
||||
loadBalancerIP: 192.168.1.80
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: istio-public-ingress
|
||||
namespace: istio-system
|
||||
labels:
|
||||
app: istio-public-ingress
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
istio: public-ingress
|
||||
app: istio-public-ingress
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# Select the gateway injection template (rather than the default sidecar template)
|
||||
inject.istio.io/templates: gateway
|
||||
labels:
|
||||
# Set a unique label for the gateway. This is required to ensure Gateways can select this workload
|
||||
istio: public-ingress
|
||||
# Enable gateway injection. If connecting to a revisioned control plane, replace with "istio.io/rev: revision-name"
|
||||
sidecar.istio.io/inject: "true"
|
||||
app: istio-public-ingress
|
||||
spec:
|
||||
# Allow binding to all ports (such as 80 and 443)
|
||||
securityContext:
|
||||
sysctls:
|
||||
- name: net.ipv4.ip_unprivileged_port_start
|
||||
value: "0"
|
||||
containers:
|
||||
- name: istio-proxy
|
||||
image: auto # The image will automatically update each time the pod starts.
|
||||
# Drop all privileges, allowing to run as non-root
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
runAsUser: 1337
|
||||
runAsGroup: 1337
|
||||
---
|
||||
# Set up roles to allow reading credentials for TLS
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: istio-public-ingress-sds
|
||||
namespace: istio-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: istio-public-ingress-sds
|
||||
namespace: istio-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: istio-public-ingress-sds
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: default
|
17
Migrations/Forget_Traefik_2023/P2_Ingress_Public_LB/Gateway.yaml
Executable file
17
Migrations/Forget_Traefik_2023/P2_Ingress_Public_LB/Gateway.yaml
Executable file
@ -0,0 +1,17 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: http-to-https
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 80
|
||||
name: http2
|
||||
protocol: HTTP2
|
||||
hosts:
|
||||
- "*"
|
||||
tls:
|
||||
httpsRedirect: true
|
@ -2,10 +2,9 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: filebrowser
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
host: "filebrowser.filterhome.xyz"
|
||||
host: filebrowser.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
@ -2,13 +2,10 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: filebrowser-se
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
- filebrowser.filterhome.xyz
|
||||
- filebrowser.filterhome.duckdns.org
|
||||
- filebrowser.filter.home
|
||||
- filebrowser.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
@ -2,7 +2,6 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: filebrowser-vs
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
@ -14,10 +13,6 @@ spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: filebrowser.filterhome.xyz
|
||||
host: filebrowser.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
||||
# headers:
|
||||
# request:
|
||||
# set:
|
||||
# HOST: "filebrowser.filterhome.xyz"
|
||||
number: 443
|
22
Migrations/Forget_Traefik_2023/P3_External_Services/Gateway.yaml
Executable file
22
Migrations/Forget_Traefik_2023/P3_External_Services/Gateway.yaml
Executable file
@ -0,0 +1,22 @@
|
||||
apiVersion: networking.istio.io/v1alpha3
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: public-gateway
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
istio: public-ingress
|
||||
servers:
|
||||
- port:
|
||||
number: 443
|
||||
name: https
|
||||
protocol: HTTPS
|
||||
hosts:
|
||||
- "*.filterhome.xyz"
|
||||
- "filterhome.xyz"
|
||||
|
||||
- "filterhome.duckdns.org"
|
||||
- "*.filterhome.duckdns.org"
|
||||
tls:
|
||||
mode: SIMPLE
|
||||
credentialName: filterhome-domain-cert-public
|
@ -2,10 +2,9 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: gitea
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
host: "gitea.filterhome.xyz"
|
||||
host: gitea.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
@ -2,13 +2,10 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: gitea-se
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
- gitea.filterhome.xyz
|
||||
- gitea.filterhome.duckdns.org
|
||||
- gitea.filter.home
|
||||
- gitea.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
@ -2,7 +2,6 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: gitea-vs
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
@ -14,10 +13,6 @@ spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: gitea.filterhome.xyz
|
||||
host: gitea.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
||||
# headers:
|
||||
# request:
|
||||
# set:
|
||||
# HOST: "gitea.filterhome.xyz"
|
||||
number: 443
|
@ -6,4 +6,5 @@ metadata:
|
||||
spec:
|
||||
address: 192.168.1.3
|
||||
labels:
|
||||
host: srv
|
||||
host: srv
|
||||
# location: home
|
@ -2,10 +2,9 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: jelly
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
host: "jelly.filterhome.xyz"
|
||||
host: jelly.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
@ -2,13 +2,10 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: jelly-se
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
- jelly.filterhome.xyz
|
||||
- jelly.filterhome.duckdns.org
|
||||
- jelly.filter.home
|
||||
- jelly.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
@ -2,7 +2,6 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: jelly-vs
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
@ -14,10 +13,6 @@ spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: jelly.filterhome.xyz
|
||||
host: jelly.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
||||
# headers:
|
||||
# request:
|
||||
# set:
|
||||
# HOST: "jelly.filterhome.xyz"
|
||||
number: 443
|
@ -2,10 +2,9 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: DestinationRule
|
||||
metadata:
|
||||
name: tube
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
host: "tube.filterhome.xyz"
|
||||
host: tube.external.svc.cluster.local
|
||||
trafficPolicy:
|
||||
tls:
|
||||
mode: SIMPLE
|
@ -2,13 +2,10 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: ServiceEntry
|
||||
metadata:
|
||||
name: tube-se
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
- tube.filterhome.xyz
|
||||
- tube.filterhome.duckdns.org
|
||||
- tube.filter.home
|
||||
- tube.external.svc.cluster.local
|
||||
location: MESH_INTERNAL
|
||||
ports:
|
||||
- number: 443
|
@ -2,7 +2,6 @@ apiVersion: networking.istio.io/v1alpha3
|
||||
kind: VirtualService
|
||||
metadata:
|
||||
name: tube-vs
|
||||
# namespace: default
|
||||
namespace: external
|
||||
spec:
|
||||
hosts:
|
||||
@ -14,10 +13,6 @@ spec:
|
||||
http:
|
||||
- route:
|
||||
- destination:
|
||||
host: tube.filterhome.xyz
|
||||
host: tube.external.svc.cluster.local
|
||||
port:
|
||||
number: 443
|
||||
# headers:
|
||||
# request:
|
||||
# set:
|
||||
# HOST: "tube.filterhome.xyz"
|
||||
number: 443
|
@ -2,12 +2,13 @@ apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-public
|
||||
# namespace: istio-ingress
|
||||
namespace: istio-system
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
# server: https://acme-staging-v02.api.letsencrypt.org/directory # Testing
|
||||
server: https://acme-v02.api.letsencrypt.org/directory # Prod
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory # Testing
|
||||
# server: https://acme-v02.api.letsencrypt.org/directory # Prod
|
||||
# Email address used for ACME registration
|
||||
email: filter.oriol@gmail.com
|
||||
# Name of a secret used to store the ACME account private key
|
||||
@ -23,11 +24,13 @@ apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: filterhome-domain-cert-public
|
||||
# namespace: istio-ingress
|
||||
namespace: istio-system
|
||||
# namespace: istio-ingress
|
||||
spec:
|
||||
secretName: filterhome-domain-cert-public
|
||||
duration: 48h # 90d
|
||||
renewBefore: 2h # 15d
|
||||
duration: 2160h # 90d
|
||||
renewBefore: 360h # 15d
|
||||
isCA: false
|
||||
privateKey:
|
||||
algorithm: RSA
|
||||
@ -51,6 +54,9 @@ spec:
|
||||
|
||||
# Tube
|
||||
- "tube.filterhome.xyz"
|
||||
|
||||
# # hello
|
||||
# - "hello.filterhome.xyz"
|
||||
issuerRef:
|
||||
name: letsencrypt-public
|
||||
kind: ClusterIssuer
|
@ -86,6 +86,8 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- Filebrowser
|
||||
|
||||
- qBitTorrent (accessible locally)
|
||||
|
||||
## Limitations
|
||||
|
||||
- Limited hardware
|
||||
@ -129,27 +131,18 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
[//]: # (> Finished /July/2023)
|
||||
|
||||
> Completed 24/July/2023
|
||||
|
||||
- [x] Deploy Certificate Manager on the current `Kluster` with let's encrypt certificate provisioning.
|
||||
- [x] Deploy an Istio Ingress Gateway to allow access to the "core" services.
|
||||
|
||||
- [ ] Deploy an Istio Ingress Gateway to allow access to the "core" services.
|
||||
|
||||
- [ ] Deploy an Istio Egress Gateway to allow egress towards the "core" services.
|
||||
|
||||
- [x] Update the router `Virtual Service Port Mapping` to set the Istio Ingress deployed as the new Public Ingress.
|
||||
|
||||
- [x] Ensure the Certificates are being provisioned.
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
- Delete the deployed configurations.
|
||||
- Update the router `Virtual Service Port Mapping` to set back the Traefik Ingress form the Pi4 host.
|
||||
|
||||
### Part 3
|
||||
|
||||
> Completed 24/July/2023
|
||||
|
||||
- [x] Configure Istio to route traffic towards Jellyfin and Filebrowser services.
|
||||
|
||||
- [ ] Backup the Istio/Kubernetes configurations used.
|
||||
- [x] Backup the Istio/Kubernetes configurations used.
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
@ -157,6 +150,59 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
### Part 4
|
||||
|
||||
> Completed 24/July/2023
|
||||
|
||||
- [x] Deploy Certificate Manager on the current `Kluster` with let's encrypt certificate provisioning.
|
||||
|
||||
- [x] Update the router `NAT - Virtual Servers` to set the Istio Ingress deployed as the new Public Ingress.
|
||||
|
||||
- [x] Ensure the Certificates are being provisioned.
|
||||
|
||||
> **Note**:\
|
||||
> - https://istio.io/latest/docs/setup/additional-setup/config-profiles/
|
||||
> - https://istio.io/latest/docs/setup/additional-setup/customize-installation/
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
- Delete the deployed configurations.
|
||||
- Update the router `Virtual Service Port Mapping` to set back the Traefik Ingress form the Pi4 host.
|
||||
|
||||
|
||||
### Part 5
|
||||
|
||||
- [ ] Deploy an Istio Egress Gateway to allow egress towards the "core" local services.
|
||||
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
- As much, delete the deployed configurations.
|
||||
|
||||
### Part 6
|
||||
|
||||
- Deploy an Ingress LB for local thingies
|
||||
- Update local DNS records accordingly.
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
- As much, delete the deployed configurations.
|
||||
|
||||
### Part 7
|
||||
|
||||
- Deploy locally a Certificate Authorization Service (on the SRV host.)
|
||||
|
||||
> **Notes**:\
|
||||
> - https://www.reddit.com/r/selfhosted/comments/owplv5/any_self_hosted_certificate_authority/ \
|
||||
> - https://github.com/minio/certgen \
|
||||
> - https://github.com/rabbitmq/tls-gen \
|
||||
> - https://smallstep.com/blog/private-acme-server/ \
|
||||
> - https://hub.docker.com/r/smallstep/step-ca
|
||||
|
||||
#### Rollback plan
|
||||
|
||||
- As much, delete the deployed configurations.
|
||||
|
||||
### Part 8
|
||||
|
||||
- [ ] Explore Pi4 Storage options.
|
||||
|
||||
- [ ] Consider Storage options for the OrangePi5.
|
||||
@ -165,13 +211,21 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- Return the acquired drives to Amazon?
|
||||
|
||||
### Part 5
|
||||
### Part 9
|
||||
|
||||
- [ ] ~~Wipe~~ (**don't wipe** just use a different drive) and recreate the current `Kluster`, this time using the Pi4 as a _master_, and the 2 Orange Pi5 as _slaves_ (this will require updating the DNS/DHCP local services).
|
||||
- [ ] Set the CM configuration to use "Production" `let's encrypt CA`. aka. non-dev.
|
||||
- [ ] Deploy Istio security.
|
||||
|
||||
> **Note**:\
|
||||
> I can make a new cluster on the Pi4, and remove the taint that prevents from scheduling pods on that node. Deploy everything inside (a well a LB with the same exact IP than the current one, and proceed to stop the Orange PI 5), then "reformat" the OPi5s with a new distro, install stuff etc, and join them to the cluster running on the Pi4.
|
||||
|
||||
> **Notes:**\
|
||||
> https://istio.io/latest/docs/setup/platform-setup/prerequisites/ \
|
||||
> https://istio.io/latest/docs/ops/deployment/requirements/
|
||||
|
||||
### Part 10
|
||||
|
||||
- [ ] Update the `Current Setup` documentation with the new container and architecture rearrangement.
|
||||
|
||||
- [ ] Deploy NFS service on the `media SRV` host.
|
||||
@ -180,7 +234,7 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- [ ] Update the `Current Setup` documentation with the new container and architecture rearrangement.
|
||||
|
||||
### Part 6
|
||||
### Part 11
|
||||
|
||||
- Set wildcards certificates.
|
||||
|
||||
@ -194,6 +248,10 @@ Current Issue? For X and y, I need to wait for a while for the DNS provider to r
|
||||
|
||||
- Run the old migrated services back on the `media SRV` host.
|
||||
|
||||
### Extras?
|
||||
|
||||
#### Horizontal Pod Autoscaling for the Istio LBs.
|
||||
|
||||
# Execution
|
||||
|
||||
## Part 1
|
||||
@ -829,25 +887,283 @@ x-frame-options: DENY
|
||||
x-xss-protection: 1; mode=block
|
||||
```
|
||||
|
||||
## Part 2 & 3
|
||||
## Part 2
|
||||
|
||||
### Deploy
|
||||
|
||||
```shell
|
||||
kubectl apply -f P2_Ingress_Public_LB
|
||||
```
|
||||
|
||||
```text
|
||||
service/istio-public-ingress created
|
||||
deployment.apps/istio-public-ingress created
|
||||
role.rbac.authorization.k8s.io/istio-public-ingress-sds created
|
||||
rolebinding.rbac.authorization.k8s.io/istio-public-ingress-sds created
|
||||
gateway.networking.istio.io/http-to-https created
|
||||
```
|
||||
|
||||
### Check
|
||||
|
||||
We can see the LB running with the IP `192.168.1.80`.
|
||||
|
||||
> **Disclaimer:**\
|
||||
> I use MetalLB to assist on the external IP provisioning for the service(s) from the Kubernetes cluster.
|
||||
|
||||
```shell
|
||||
kubectl get svc -owide -n istio-system -l app=istio-public-ingress
|
||||
```
|
||||
|
||||
```text
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
|
||||
istio-public-ingress LoadBalancer 10.104.158.106 192.168.1.80 80:32029/TCP,443:32644/TCP 87s app=istio-public-ingress,istio=public-ingress
|
||||
```
|
||||
|
||||
### Test HTTP to HTTPS redirect
|
||||
|
||||
|
||||
```shell
|
||||
curl gitea.filterhome.xyz -I
|
||||
```
|
||||
|
||||
**I am HERE right now**
|
||||
```text
|
||||
HTTP/1.1 301 Moved Permanently
|
||||
location: https://gitea.filterhome.xyz/
|
||||
date: Mon, 24 Jul 2023 22:54:10 GMT
|
||||
server: istio-envoy
|
||||
transfer-encoding: chunked
|
||||
```
|
||||
|
||||
## Part 3
|
||||
|
||||
### Deploy
|
||||
|
||||
```shell
|
||||
kubectl apply -f P3_External_Services -R
|
||||
```
|
||||
|
||||
```text
|
||||
namespace/external created
|
||||
destinationrule.networking.istio.io/filebrowser created
|
||||
serviceentry.networking.istio.io/filebrowser-se created
|
||||
virtualservice.networking.istio.io/filebrowser-vs created
|
||||
gateway.networking.istio.io/public-gateway created
|
||||
destinationrule.networking.istio.io/gitea created
|
||||
serviceentry.networking.istio.io/gitea-se created
|
||||
virtualservice.networking.istio.io/gitea-vs created
|
||||
workloadentry.networking.istio.io/srv-host created
|
||||
destinationrule.networking.istio.io/jelly created
|
||||
serviceentry.networking.istio.io/jelly-se created
|
||||
virtualservice.networking.istio.io/jelly-vs created
|
||||
destinationrule.networking.istio.io/tube created
|
||||
serviceentry.networking.istio.io/tube-se created
|
||||
virtualservice.networking.istio.io/tube-vs created
|
||||
```
|
||||
|
||||
Since the services are expecting a certificate, we can't test the services so far, yet, we can test how the incoming HTTP traffic through the port 80, will be forwarded to HTTPS.
|
||||
|
||||
## Part 4
|
||||
|
||||
### Router configuration.
|
||||
|
||||
I have set the Istio deployed LB, as a ingress resource for my home router.
|
||||
|
||||
### Deploy CM
|
||||
|
||||
```shell
|
||||
kubectl apply -f P4_Certificate_Manager
|
||||
```
|
||||
|
||||
```text
|
||||
clusterissuer.cert-manager.io/letsencrypt-public created
|
||||
certificate.cert-manager.io/filterhome-domain-cert-public created
|
||||
```
|
||||
|
||||
### Check CM
|
||||
|
||||
#### Check Certificate Issuer status
|
||||
|
||||
```shell
|
||||
kubectl get -n istio-system certificate filterhome-domain-cert-public -o jsonpath='{.metadata.name}{"\t"}{.status.conditions[].reason}{"\t"}{.status.conditions[].message}{"\n"}'
|
||||
```
|
||||
|
||||
```text
|
||||
filterhome-domain-cert-public DoesNotExist Issuing certificate as Secret does not exist
|
||||
```
|
||||
|
||||
#### Check Certificate Issuer events
|
||||
|
||||
```shell
|
||||
kubectl get events -n istio-system --field-selector involvedObject.name=filterhome-domain-cert-public,involvedObject.kind=Certificate --sort-by=.metadata.creationTimestamp --watch
|
||||
```
|
||||
|
||||
```text
|
||||
LAST SEEN TYPE REASON OBJECT MESSAGE
|
||||
4s Normal Issuing certificate/filterhome-domain-cert-public Issuing certificate as Secret does not exist
|
||||
0s Normal Generated certificate/filterhome-domain-cert-public Stored new private key in temporary Secret resource "filterhome-domain-cert-public-v5ss4"
|
||||
1s Normal Requested certificate/filterhome-domain-cert-public Created new CertificateRequest resource "filterhome-domain-cert-public-s4vvt"
|
||||
0s Normal Issuing certificate/filterhome-domain-cert-public The certificate has been successfully issued
|
||||
```
|
||||
|
||||
Once the certificates are provided access the services, weather from the CLI or Browser, on my scenario I used the browser.
|
||||
|
||||
Since (as per the moment) I am using the testing `Let's encrypt` CA service, the certificates are flagged as "self-signed".
|
||||
|
||||
```shell
|
||||
curl gitea.filterhome.xyz
|
||||
```
|
||||
|
||||
```text
|
||||
curl: (60) SSL certificate problem: unable to get local issuer certificate
|
||||
More details here: https://curl.se/docs/sslcerts.html
|
||||
|
||||
curl failed to verify the legitimacy of the server and therefore could not
|
||||
establish a secure connection to it. To learn more about this situation and
|
||||
how to fix it, please visit the web page mentioned above.
|
||||
```
|
||||
|
||||
|
||||
### Part 5
|
||||
|
||||
Egress Gateway
|
||||
|
||||
#### I am here <----
|
||||
|
||||
---
|
||||
|
||||
### "Last", change the ingress from the Pi4 Traefik ingress, to the Istio Public Ingress.
|
||||
|
||||
This gotta be performed on the router, therefore gotta access the GUI from the ISP router and select the new IP address.
|
||||
|
||||
### Check for the Certificate Manager to confirm that it's generating the certificates.
|
||||
|
||||
|
||||
|
||||
### Update DNS records
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## Difficulties
|
||||
|
||||
|
||||
|
||||
The Certificate Manager must be located at the same namespace as the `istiod` service, without the LB location being taken into account.
|
||||
|
||||
|
||||
## no healthy upstream
|
||||
|
||||
During the "testing of configurations", I created a the `DestinationRule` for the Jelly on the namespace `default`, instead of `external`.
|
||||
|
||||
*In theory,* I did cleanup everything before going to sleep, to avoid, u know, "issues".
|
||||
|
||||
On this scenario, seems like I set the `DR` (DestinationRule) with the external namespace, then when I did a general cleanup (`kubectl delete -f P3etc`), the resource wasn't deleted, and seems like I overlooked it.
|
||||
|
||||
### How did the troubleshooting process go?
|
||||
|
||||
#### Check yaml
|
||||
|
||||
First of all, checking the `.yaml` files that I created on the `/external` namespace.
|
||||
|
||||
Also used the `diff` command to display the difference on the lines between the services that worked, and the `jelly` Istio resources.
|
||||
|
||||
Everything looked correct.
|
||||
|
||||
#### Check the destination server.
|
||||
|
||||
I checked the logs on the external resource reverse proxy, which managed the ingress traffic and forwards it into the respective container.
|
||||
|
||||
What did I find:
|
||||
|
||||
- **NOTHING**
|
||||
|
||||
Not a single trace of traffic when I was accessing through `jelly.filterhome.xyz`.
|
||||
|
||||
Weird right?
|
||||
|
||||
What happens when I try to access `gitea.filterhome.xyz`?
|
||||
|
||||
- I get into the service without any issue through the browser.
|
||||
- There is traffic logs
|
||||
|
||||
Conclusion?
|
||||
|
||||
<ins>**The traffic doesn't reach the destination server.**</ins>
|
||||
|
||||
#### Using mock services.
|
||||
|
||||
I have modified the `Gitea` service, adding a `HOST` header to artificially match the external's server reverse proxy rules.
|
||||
|
||||
```yaml
|
||||
http:
|
||||
route:
|
||||
- destination:
|
||||
headers:
|
||||
request:
|
||||
set:
|
||||
HOST: "jelly.filterhome.xyz"
|
||||
...
|
||||
```
|
||||
|
||||
> **Note:**\
|
||||
> Didn't require to modify anything else since the request where all going to the same destination server.
|
||||
|
||||
### Istio cluster service fqdn
|
||||
|
||||
```shell
|
||||
istioctl proxy-config cluster -n istio-system istio-public-ingress-85b86998fc-spksq | grep filterhome.xyz
|
||||
```
|
||||
|
||||
<pre>filebrowser.<span style="color:#FF7F7F"><b>filterhome.xyz</b></span> 443 - outbound EDS filebrowser.external
|
||||
gitea.<span style="color:#FF7F7F"><b>filterhome.xyz</b></span> 443 - outbound EDS gitea.external
|
||||
jelly.<span style="color:#FF7F7F"><b>filterhome.xyz</b></span> 443 - outbound EDS jelly.default
|
||||
tube.<span style="color:#FF7F7F"><b>filterhome.xyz</b></span> 443 - outbound EDS tube.external</pre>
|
||||
|
||||
Alright, we got the output, but why does it say `jelly.default`? All the other entries mantain the format `$SERVICE.external`, could it be ...?
|
||||
|
||||
```shell
|
||||
kubectl get dr -n default
|
||||
```
|
||||
|
||||
|
||||
```text
|
||||
NAME HOST AGE
|
||||
jelly jelly.filterhome.xyz 7h32m
|
||||
```
|
||||
|
||||
Effectively, I did miss removing that service. But... if I did miss that, **what else** did I miss?
|
||||
|
||||
I should check for the following istio objects then:
|
||||
|
||||
- DestinationRule
|
||||
- VirtualService
|
||||
- ServiceEntry
|
||||
|
||||
```shell
|
||||
kubectl get dr,vs,se -n default
|
||||
```
|
||||
|
||||
```text
|
||||
NAME HOST AGE
|
||||
destinationrule.networking.istio.io/jelly jelly.filterhome.xyz 7h35m
|
||||
|
||||
NAME HOSTS LOCATION RESOLUTION AGE
|
||||
serviceentry.networking.istio.io/jelly-se ["jelly.filterhome.xyz","jelly.filterhome.duckdns.org","jelly.filter.home"] MESH_INTERNAL NONE 7h35m
|
||||
```
|
||||
|
||||
At the end turns out I didn't delete this both resources (DestinationRule and ServiceEntry).
|
||||
|
||||
This called for a renaming of the configuration, so this doesn't occur _again_ (until occurs).
|
Loading…
x
Reference in New Issue
Block a user