Openshift Login and Configuration
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 | #login with a useroc login https://192.168.99.100:8443 -u developer -p developer#login as system adminoc login -u system:admin#User Informationoc whoami#View your configurationoc config view#Update the current context to have users login to the desired namespace:oc config set-context `oc config current-context` --namespace= | 
Basic Commands
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 
15 
16 
17 
18 
19 
20 
21 
22 
23 
24 
25 
26 
27 
28 
29 
30 
31 
32 
33 
34 
35 
36 
37 
38 
39 
40 
41 
42 
43 
44 
45 | #Use specific templateoc new-app https://github.com/name/project--template=#New app from a different branchoc new-app --name=html-dev nginx:1.10~https://github.com/joe-speedboat/openshift.html.devops.git#mybranch#Create objects from a file:oc create -f myobject.yaml -n #Create or merge objects from fileoc apply -f myobject.yaml -n #Update existing objectoc patch svc mysvc --typemerge --patch '{"spec":{"ports":[{"port": 8080, "targetPort": 5000 }]}}'#Monitor Pod statuswatchoc get pods#show labelsoc get pods --show-labels  #Gather information on a project's pod deployment with node information $ oc get pods -o wide#Hide inactive Podsoc get pods --show-all=false#Display all resources  oc get all,secret,configmap#Get the Openshift Console Addressoc get -n openshift-console route console#Get the Pod name from the Selector and rsh in itPOD=$(oc get pods -l app=myapp -o name)oc rsh -n $POD#exec single command in podoc exec$POD $COMMAND#Copy file from myrunning-pod-2 path in the current location oc rsyncmyrunning-pod-2:/tmp/LogginData_20180717220510.json .#Read resource schema dococ explain dc | 
Image Streams
| 
1 
2 
3 
4 
5 
6 
7 
8 | #List available IS for openshift projectoc get is -n openshift#Import an image from an external registryoc import-image --from=registry.access.redhat.com/jboss-amq-6/amq62-openshift-n openshift jboss-amq-62:1.3 --confirm#List available IS and templatesoc new-app --list | 
WildFly application example
| 
1 
2 
3 | oc create -f https://raw.githubusercontent.com/wildfly/wildfly-s2i/wf-18.0/imagestreams/wildfly-centos7.jsonoc new-app wildfly~https://github.com/fmarchioni/ocpdemos--context-dir=wildfly-basic --name=wildfly-basicoc expose svc/wildfly-basic | 
Create app from a Project with Dockerfile
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 | oc new-build --binary --name=mywildfly -l app=mywildflyoc patch bc/mywildfly-p '{"spec":{"strategy":{"dockerStrategy":{"dockerfilePath":"Dockerfile"}}}}'    oc start-build mywildfly --from-dir=. --followoc new-app --image-stream=mywildfly    oc expose svc/mywildfly | 
Nodes
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 
15 
16 
17 | #Get Nodes litsoc get nodes#Check on which Node your Pods are runningoc get pods -o wide#Schedule an application to run on another Nodeoc patch dcmyapp -p '{"spec":{"template":{"spec":{"nodeSelector":{"kubernetes.io/hostname": "ip-10-0-0-74.acme.compute.internal"}}}}}'#List all pods which are running on a Nodeoc adm manage-node node1.local--list-pods#Add a label to a Nodeoc label node node1.localmylabel=myvalue#Remove a label from a Nodeoc label node node1.localmylabel- | 
Storage
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 | #create a PersistentVolumeClaim (+update the DeploymentConfig to include a PV + update the DeploymentConfig to attach a volumemount into the specified mount-path) oc setvolume dc/file-uploader--add --name=my-shared-storage \-t pvc --claim-mode=ReadWriteMany --claim-size=1Gi \--claim-name=my-shared-storage --claim-class=ocs-storagecluster-cephfs \--mount-path=/opt/app-root/src/uploaded\-n my-shared-storage#List storage classesoc -n openshift-storage get sc | 
Build
| 
1 
2 
3 
4 
5 
6 
7 
8 | #Manual build from source  oc start-build ruby-ex#Stop a build that is in progress   oc cancel-build #Changing the log level of a build:oc setenvbc/my-build-nameBUILD_LOGLEVEL=[1-5] | 
Deployment
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 
15 
16 
17 
18 | #Manual deployment $ oc rollout latest ruby-ex#Pause automatic deployment rolloutoc rollout pause dc$DEPLOYMENT# Resume automatic deployment rolloutoc rollout resume dc$DEPLOYMENT #Define resource requests and limits in DeploymentConfigoc setresources deployment nginx --limits=cpu=200m,memory=512Mi --requests=cpu=100m,memory=256Mi#Define livenessProve and readinessProve in DeploymentConfigoc setprobe dc/nginx--readiness --get-url=http://:8080/healthz--initial-delay-seconds=10oc setprobe dc/nginx--liveness --get-url=http://:8080/healthz--initial-delay-seconds=10#Define Horizontal Pod Autoscaler (hpa)oc autoscale dc$DC_NAME --max=4 --cpu-percent=10 | 
Routes
| 
1 
2 
3 
4 
5 | #Create route $ oc expose service ruby-ex#Read the Route Host attributeoc get route my-route -o jsonpath --template="{.spec.host}" | 
Services
| 
1 
2 
3 
4 
5 | #Make a service idle. When the service is next accessed will automatically boot up the pods again: $ oc idle ruby-ex#Read a Service IPoc get services rook-ceph-mon-a --template='{{.spec.clusterIP}}' | 
Clean up resources
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 | #Delete all resourcesoc delete all --all#Delete resources for one specific app$ oc delete services -l app=ruby-ex$ oc delete all -l app=ruby-ex#CleanUp old docker images on nodes#Keeping up to three tag revisions 1, and keeping resources (images, image streams and pods) younger than sixty minutes:oc adm prune images --keep-tag-revisions=3 --keep-younger-than=60m#Pruning every image that exceeds defined limits:oc adm prune images --prune-over-size-limit | 
Troubleshooting
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 
15 
16 
17 
18 | #Check status of current project    oc status#Get events for a projectoc get events --sort-by='{.lastTimestamp}'# get the logs of the myrunning-pod-2-fdthn pod oc logs myrunning-pod-2-fdthn# follow the logs of the myrunning-pod-2-fdthn pod oc logs -f myrunning-pod-2-fdthn# tail the logs of the myrunning-pod-2-fdthn pod oc logs myrunning-pod-2-fdthn --tail=50#Check the integrated Docker registry logs:oc logs docker-registry-n-{xxxxx} -n default | less#run cluster diagnosticsoc adm diagnostics | 
Security
| 
1 
2 
3 
4 
5 | #Create a secret from the CLI and mount it as a volume to a deployment config:oc create secret generic oia-secret --from-literal=username=myuser --from-literal=password=mypasswordoc setvolumes dc/myapp--add --name=secret-volume --mount-path=/opt/app-root/ --secret-name=oia-secret | 
Manage user roles
| 
1 
2 
3 | oc adm policy add-role-to-user admin oia -n pythonoc adm policy add-cluster-role-to-user cluster-reader system:serviceaccount:monitoring:defaultoc adm policy add-scc-to-user anyuid -z default | 
Misc commands
| 
1 
2 
3 
4 
5 
6 
7 
8 
9 
10 
11 
12 
13 
14 
15 
16 
17 
18 
19 
20 
21 
22 
23 | #Manage node stateoc adm manage node false#List installed operatorsoc get csv#Export in a template the IS, BC, DC and SVCoc exportis,bc,dc,svc --as-template=app.yaml#Show user in promptfunctionps1(){   exportPS1='[\u@\h($(oc whoami -c 2>/dev/null|cut -d/ -f3,1)) \W]\$ '}#backup openshift objectsoc get all --all-namespaces --no-headers=true| awk'{print $1","$2}'| whilereadobjdo  NS=$(echo$obj | cut-d, -f1)  OBJ=$(echo$obj | cut-d, -f2)  FILE=$(echo$obj | sed's/\//-/g;s/,/-/g')  echo$NS $OBJ $FILE; oc export-n $NS $OBJ -o yaml > $FILE.ymldone | 
 
 
No comments:
Post a Comment