Reputation: 43
I have ran a pod on the control plane called kube-nginx, and forward port. And I am a freshman in kubernetes.
[root@k8smaster ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.4", GitCommit:"e87da0bd6e03ec3fea7933c4b5263d151aafd07c", GitTreeState:"clean", BuildDate:"2021-02-18T16:12:00Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"20", GitVersion:"v1.20.4", GitCommit:"e87da0bd6e03ec3fea7933c4b5263d151aafd07c", GitTreeState:"clean", BuildDate:"2021-02-18T16:03:00Z", GoVersion:"go1.15.8", Compiler:"gc", Platform:"linux/amd64"}
[root@k8smaster ~]# cat /opt/pod.yaml
apiVersion: v1
kind: Pod
metadata:
name: kube-nginx
spec:
containers:
- name: nginx
image: nginx
[root@k8smaster ~]# kubectl get pod
NAME READY STATUS RESTARTS AGE
kube-nginx 1/1 Running 0 38m
[root@k8smaster ~]# kubectl port-forward pod/kube-nginx 10086:80
Forwarding from 127.0.0.1:10086 -> 80
Forwarding from [::1]:10086 -> 80
I can access the pod on localhost, But I cannot access the pod on the extenal IP.
[root@k8smaster opt]# ip a | awk '/^2: ens33/,/^3: docker/{print $2}' | sed -n '3P'
192.168.80.201/24
[root@k8smaster opt]# nc -vz localhost 10086
Ncat: Version 7.50 ( https://nmap.org/ncat )
Ncat: Connected to ::1:10086.
Ncat: 0 bytes sent, 0 bytes received in 0.01 seconds.
On the external IP host I can ping the k8smaster, but cannot access to the pod in it.
[root@k8snode01 opt]# ping -c 3 192.168.80.201
PING 192.168.80.201 (192.168.80.201) 56(84) bytes of data.
64 bytes from 192.168.80.201: icmp_seq=1 ttl=64 time=0.367 ms
64 bytes from 192.168.80.201: icmp_seq=2 ttl=64 time=0.215 ms
64 bytes from 192.168.80.201: icmp_seq=3 ttl=64 time=0.248 ms
--- 192.168.80.201 ping statistics ---
3 packets transmitted, 3 received, 0% packet loss, time 2001ms
rtt min/avg/max/mdev = 0.215/0.276/0.367/0.068 ms
[root@k8snode01 opt]# ip a | awk '/^2: ens33/,/^3: docker/{print $2}' | sed -n '3p'
192.168.80.202/24
[root@k8snode01 opt]# nc -nvz 192.168.80.201 10086
Ncat: Version 7.50 ( https://nmap.org/ncat )
Ncat: No route to host.
and the routing table show as follow
[root@k8snode01 opt]# route -n
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
0.0.0.0 192.168.80.2 0.0.0.0 UG 100 0 0 ens33
172.17.0.0 0.0.0.0 255.255.0.0 U 0 0 0 docker0
192.168.16.128 192.168.80.201 255.255.255.192 UG 0 0 0 ens33
192.168.80.0 0.0.0.0 255.255.255.0 U 100 0 0 ens33
the firewall settings
[root@k8smaster ~]# iptables-save | egrep -w -- INPUT
:INPUT ACCEPT [78784:15851659]
-A INPUT -j INPUT_direct
:INPUT ACCEPT [0:0]
:INPUT ACCEPT [61299:11943644]
-A INPUT -j INPUT_direct
:INPUT ACCEPT [0:0]
:cali-INPUT - [0:0]
-A INPUT -m comment --comment "cali:Cz_u1IQiXIMmKD4c" -j cali-INPUT
-A INPUT -m conntrack --ctstate NEW -m comment --comment "kubernetes externally-visible service portals" -j KUBE-EXTERNAL-SERVICES
-A INPUT -j KUBE-FIREWALL
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -j INPUT_direct
-A INPUT -j INPUT_ZONES_SOURCE
-A INPUT -j INPUT_ZONES
-A INPUT -m conntrack --ctstate INVALID -j DROP
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A cali-INPUT -p udp -m comment --comment "cali:w7ud0UgQSEi_zKuQ" -m comment --comment "Allow VXLAN packets from whitelisted hosts" -m multiport --dports 4789 -m set --match-set cali40all-vxlan-net src -m addrtype --dst-type LOCAL -j ACCEPT
-A cali-INPUT -p udp -m comment --comment "cali:4cgmbdWsLmozYhJh" -m comment --comment "Drop VXLAN packets from non-whitelisted hosts" -m multiport --dports 4789 -m addrtype --dst-type LOCAL -j DROP
-A cali-INPUT -i cali+ -m comment --comment "cali:t45BUBhpu3Wsmi1_" -g cali-wl-to-host
-A cali-INPUT -m comment --comment "cali:NOmsycyknYZaGOFf" -m mark --mark 0x10000/0x10000 -j ACCEPT
-A cali-INPUT -m comment --comment "cali:Or0B7eoenKO2p8Bf" -j MARK --set-xmark 0x0/0xf0000
-A cali-INPUT -m comment --comment "cali:AmIfvPGG2lYUK6mj" -j cali-from-host-endpoint
-A cali-INPUT -m comment --comment "cali:79fWWn1SpufdO7SE" -m comment --comment "Host endpoint policy accepted packet." -m mark --mark 0x10000/0x10000 -j ACCEPT
[root@k8smaster ~]# firewall-cmd --list-all
trusted (active)
target: ACCEPT
icmp-block-inversion: no
interfaces: ens33
sources:
services:
ports:
protocols:
masquerade: no
forward-ports:
source-ports:
icmp-blocks:
rich rules:
how to troubleshoot?
Upvotes: 1
Views: 4243
Reputation: 9905
Having in mind the following statement:
And I am a freshman in kubernetes.
I think there are some topics that should be addressed:
$ kubectl port-forward ...
Kubernetes as a platform has a specific objects to accomplish certain things/tasks. You've already created a Pod
named kube-nginx
which spawned on of the Nodes
.
To expose Pods
(like your kube-nginx
) for internal/external sources you will need to use a Service
. You can read more about it by following official documentation:
Service
An abstract way to expose an application running on a set of Pods as a network service. With Kubernetes you don't need to modify your application to use an unfamiliar service discovery mechanism. Kubernetes gives Pods their own IP addresses and a single DNS name for a set of Pods, and can load-balance across them.
-- Kubernetes.io: Docs: Concepts: Services networking: Service
Your Pod
definition is correct and it will spawn a single Pod
with nginx
image but Kubernetes won't be able to expose it to external sources (apart from $ kubectl port-forward
which I will address later) due to lack of certain field in your manifest.
The minimal example that should be exposable with Services
:
apiVersion: v1
kind: Pod
metadata:
name: kube-nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
I do also think that you should be using other Kubernetes objects like for example Deployment
:
Also you haven't added anything to your manifest to force the Pod
to be scheduled on the k8smaster
. It could be scheduled on k8snode01
. You can check on which Node
your Pod
is running by:
$ kubectl get pods/kube-nginx -o wide
NAME READY STATUS RESTARTS AGE IP --> NODE <-- NOMINATED NODE READINESS GATES
kube-nginx 1/1 Running 0 84m 10.32.2.7 --> raven-6k6m <-- <none> <none>
See the Node
column.
$ kubectl port-forward ...
Coming onto the topic of kubectl port-forward pod/kube-nginx 10086:80
command. This is a guess but I think the interpretation is that it will permanently expose the kube-nginx
Pod
on port: 10086
on a host that ran this command. Yes, it will expose your Pod
but in this setup only on localhost and it will run as long as the command is running. To expose your workload you should be using earlier mentioned Services
. If you want to expose your Pod
with $ kubectl port-forward
you should use the command as user @Phillipe pointed:
kubectl port-forward --address 0.0.0.0 pod/kube-nginx 10086:80
The --address 0.0.0.0
will allow incoming connections from your LAN
(assuming no firewall dropping the requests).
Citing another answer from Stackoverflow.com:
kubectl port-forward
makes a specific Kubernetes API request. That means the system running it needs access to the API server, and any traffic will get tunneled over a single HTTP connection.Having this is really useful for debugging (if one specific pod is acting up you can connect to it directly; in a microservice environment you can talk to a back-end service you wouldn't otherwise expose) but it's not an alternative to setting up service objects. When I've worked with
kubectl port-forward
it's been visibly slower than connecting to a pod via a service, and I've found seen the command just stop after a couple of minutes. Again these aren't big problems for debugging, but they're not what I'd want for a production system.-- Stackoverflow.com: Answer: How kubectl port-forward works
Assuming that your cluster is configured correctly as there are no steps in the question about the cluster provisioning process, you could use following example to expose your kube-nginx
Pod
to the external sources.
apiVersion: v1
kind: Pod
metadata:
name: kube-nginx
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx
---
apiVersion: v1
kind: Service
metadata:
name: kube-nginx-service
spec:
selector:
app: nginx # <-- this needs to match .metadata.labels field in Pod
type: NodePort
ports:
- protocol: TCP
port: 80 # <-- port inside of the cluster
targetPort: 80 # <-- port that app is listening on
nodePort: 30080 # <-- from 30000 to 32767 on the node (can be changed)
By this example you will spawn a Pod
with nginx
image and allow the connections on port: 30080
on each Node
to reach the Pod
(i.e. $ curl 192.168.0.114:30080)
.
Service
of type LoadBalancer
is also possible with it's External IP
allocation but due to the lack of information on the Kubernetes setup, it could be hard to point to the right solution (on-premises=metallb
, otherwise consult the provider documentation).
I also encourage you to check the following documentation to get more reference/best practices etc.:
Upvotes: 2
Reputation: 86
What you can do is to attach a service type Loadbalances like below:
kind: Service
apiVersion: v1
metadata:
name: kube-nginx
namespace: <your namespace>
labels:
app: kube-nginx
annotations:
service.beta.kubernetes.io/aws-load-balancer-ssl-cert: "your certificate"
service.beta.kubernetes.io/aws-load-balancer-ssl-negotiation-policy: "ELBSecurityPolicy-TLS-1-2-2017-01"
service.beta.kubernetes.io/aws-load-balancer-backend-protocol: "tcp"
service.beta.kubernetes.io/aws-load-balancer-ssl-ports: "443"
service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: '*'
service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: '3600'
spec:
type: LoadBalancer
selector:
app: kube-nginx
ports:
- name: http
port: 80
targetPort: proxy
- name: https
port: 443
targetPort: http
Upvotes: 0
Reputation: 26850
To listen on port 10086 on all addresses, forwarding to 80 in the pod
kubectl port-forward --address 0.0.0.0 pod/kube-nginx 10086:80
Upvotes: 4