Jellyfin
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: jellyfin
name: jellyfin
namespace: jellyfin
spec:
progressDeadlineSeconds: 600
replicas: 1
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app: jellyfin
template:
metadata:
labels:
app: jellyfin
spec:
volumes:
- name: nfs-jellyfin-config
persistentVolumeClaim:
claimName: jellyfin-pvc-config
- name: nfs-jellyfin-data
persistentVolumeClaim:
claimName: jellyfin-pvc-data
runtimeClassName: nvidia
containers:
- env:
- name: JELLYFIN_PublishedServerUrl
value: 10.200.1.212 # The IP number for your jellyfin server (see service config)
- name: PGID
value: "\x54\x53\x53\x51\x55" # < ASCII code for '65541'
- name: PUID
value: "\x49\x48\x50\x57" #< ACII code for '1044'
- name: TZ
value: Europe/Amsterdam
securityContext:
privileged: true # Container must run as privileged inside of the pod, required for hardware acceleration
image: ghcr.io/linuxserver/jellyfin
imagePullPolicy: Always
name: jellyfin
ports:
- containerPort: 8096
name: http-tcp
protocol: TCP
- containerPort: 8920
name: https-tcp
protocol: TCP
- containerPort: 1900
name: dlna-udp
protocol: UDP
- containerPort: 7359
name: discovery-udp
protocol: UDP
#resources: {}
resources:
limits:
nvidia.com/gpu: 1
stdin: true
tty: true
volumeMounts:
- mountPath: /config
name: nfs-jellyfin-config
- mountPath: /data
name: nfs-jellyfin-data
dnsPolicy: ClusterFirst
restartPolicy: AlwayapiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-pvc-config # < name of the persistant volume claim ("pvc'")
namespace: jellyfin # < namespace where place the pvc
spec:
storageClassName: "default"
volumeName: jellyfin-pv-nfs-config # < the pv it will "claim" to storage. Created in the previous yaml.
accessModes:
- ReadWriteMany # < Multiple pods can write to storage. Same value as pv
resources:
requests:
storage: 1Gi # < How much data can the pvc claim from pv
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: jellyfin-pvc-data
namespace: jellyfin
spec:
storageClassName: "default"
volumeName: jellyfin-pv-nfs-data
accessModes:
- ReadWriteMany
resources:
requests:
storage: 300GiapiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-pv-nfs-config # < name of the persisant volume ("pv") in kubenetes
namespace: jellyfin # < namespace where place the pv
spec:
capacity:
storage: 1Gi # < max. size we reserve for the pv
volumeMode: Filesystem
accessModes:
- ReadWriteMany # < Multiple pods can write to storage
persistentVolumeReclaimPolicy: Retain # < The persistent volume can reclaimed
storageClassName: default
hostPath:
path: /opt/local-path-provisioner/jellyfin
# nfs:
# path: /volume1/kubedate/jellyfin/config # < Name of your NFS share with subfolder
# server: 10.200.1.200 # < IP number of your NFS server
# readOnly: false
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: jellyfin-pv-nfs-data
namespace: jellyfin
spec:
capacity:
storage: 300Gi # < max. size we reserve for the pv. A bigger value than the configdata
volumeMode: Filesystem
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
# nfs:
# path: /volume1/kubedate/jellyfin/data
# server: 10.200.1.200
# readOnly: false
storageClassName: default
hostPath:
path: /opt/local-path-provisioner/jellyfinkind: Service
apiVersion: v1
metadata:
name: jellyfin-udp # < name of the service
namespace: jellyfin # < namespace where to place service
annotations:
metallb.universe.tf/allow-shared-ip: jellyfin # # < annotation name to combine the Service IP, make sure it's same name as in the service UDP yaml
spec:
selector:
app: jellyfin # < reference to the deployment (connects the service with the deployment)
ports:
- port: 1900 # < port to open on the outside on the server
targetPort: 1900 # < targetport. port on the pod to passthrough
name: dlna-udp # < reference name for the port in the deployment yaml
protocol: UDP
- port: 7359
targetPort: 7359
name: discovery-udp
protocol: UDP
type: LoadBalancer
loadBalancerIP: 192.168.1.212 # < IP to access your jellyfinserver. Should be one from the MetalLB range and the same as the UDP yaml
sessionAffinity: ClientIP # This is necessary for multi-replica deployments
---
kind: Service
apiVersion: v1
metadata:
name: jellyfin-tcp # < name of the service
namespace: jellyfin # < namespace where to place service
annotations:
metallb.universe.tf/allow-shared-ip: jellyfin # < annotation name to combine the Service IP, make sure it's same name as in the service UDP yaml
spec:
selector:
app: jellyfin # < reference to the deployment (connects the service with the deployment)
ports:
- port: 8096 # < port to open on the outside on the server
targetPort: 8096 # < targetport. port on the pod to passthrough
name: http-tcp # < reference name for the port in the deployment yaml
protocol: TCP
- port: 8920
targetPort: 8920
name: https-tcp
type: LoadBalancer
loadBalancerIP: 192.168.1.212 # < IP to access your jellyfinserver. Should be one from the MetalLB range and the same as the TCP yaml
sessionAffinity: ClientIP # This is necessary for multi-replica deployments
Mis à jour