parted /dev/sda ‘print free’
ipmi reset root/admin password
ipmitool user list 1
ipmitool user set password 2
Password for user 2:
Password for user 2:
To setup new user:
ipmitool user set name 3 admin
ipmitool user set password 3
Enable access:
ipmitool channel setaccess 1 3 link=on ipmi=on callin=on privilege=4
ipmi restart
ipmitool mc reset cold
nginx request time latency
log_format time '$remote_addr - $remote_user [$time_local] '
'"$request" $status $body_bytes_sent '
'"$http_referer" "$http_user_agent" '
'$request_time $upstream_response_time $pipe';
access_log /var/log/nginx/access.log time;
dumps tasks that are in uninterruptable (blocked) state
echo w >/proc/sysrq-trigger
dmesg
[4680785.899872] Show Blocked State
[4680785.900063] task taskaddr stack pid father veid
[4680785.900223] lxc-start D ffff8802eed94640 0 74111 68892 113 0x00000084
[4680785.900228] ffff88008d5b3c18 0000000000000086 0000000000000000 ffff8804212a74c8
[4680785.900233] ffff88008d5b3b98 ffffffff811e374c 00109c9dbec7d9ab ffff8804212a7400
[4680785.900237] ffff8804212a74c8 0000000000000000 0000000216c01c50 00000000000004b4
[4680785.900241] Call Trace:
[4680785.900248] [
[4680785.900253] [
[4680785.900258] [
[4680785.900262] [
[4680785.900267] [
[4680785.900270] [
[4680785.900274] [
[4680785.900278] [
[4680785.900281] [
[4680785.900284] [
[4680785.900288] [
[4680785.900291] [
[4680785.900294] [
[4680785.900298] [
[4680785.900301] [
[4680785.900304] [
[4680785.900308] [
[4680785.900312] [
[4680785.900315] [
[4680785.900319] [
[4680785.900324] [
openvz find process by container id
ps ax | awk '{print $1}' | xargs vzpid | grep CTID
lxc-start: lxc_start.c: main: 290 Executing ‘/sbin/init’ with no configuration file may crash the host
lxc-start -n centos -F
Make sure server name is correct, you are trying to start
openvz vzctl enter save bash history
This also helps detect login/access on your OpenVZ from node
echo "HISTFILE=~/.bash_history" >> /root/.bashrc
glusterfs inside openvz
Enable Fuse:
modprobe fuse
vzctl set CID --devices c:10:229:rw --save
vzctl exec CID mknod /dev/fuse c 10 229
vzctl set CID --capability sys_admin:on --save
Server 1:
yum update
yum install epel-release
yum install centos-release-gluster41
yum -y install glusterfs-server
systemctl enable glusterd.service
systemctl start glusterd.service
systemctl status glusterd.service
* glusterd.service - GlusterFS, a clustered file-system server
Loaded: loaded (/usr/lib/systemd/system/glusterd.service; enabled; vendor preset: disabled)
Active: active (running) since Sat 2018-09-01 20:15:28 UTC; 9h ago
Main PID: 1854 (glusterd)
CGroup: /system.slice/glusterd.service
|-1854 /usr/sbin/glusterd -p /var/run/glusterd.pid --log-level INFO
|-1986 /usr/sbin/glusterfsd -s data1 --volfile-id datavol.data1.data -p /var/run/gluster/vols/datavol/data1-data.pid -S /var/run/gluster/52b652b9976d3fea.soc...
|-2008 /usr/sbin/glusterfsd -s data2 --volfile-id datavol.vit2.data -p /var/run/gluster/vols/datavol/data2-data.pid -S /var/run/gluster/8d0878ede60c3f54.soc...
`-2031 /usr/sbin/glusterfs -s localhost --volfile-id gluster/glustershd -p /var/run/gluster/glustershd/glustershd.pid -l /var/log/glusterfs/glustershd.log..
glusterfsd -V
glusterfs 4.1.3
Server 1:
gluster peer probe data2
Server 2:
gluster peer probe data1
gluster peer status
Number of Peers: 1
Hostname: xx.xx.xx.xx
Uuid: xxxxxxx-xxxx-xxxx-xxxxx-xxxxxxxxxx
State: Peer in Cluster (Connected)
gluster volume create datavol replica 2 transport tcp data1:/data data2:/data force
volume create: datavol: success: please start the volume to access data
gluster volume start datavol
Because its TCP replica, you can check:
netstat -tap | grep glusterfsd
tcp 0 0 0.0.0.0:49152 0.0.0.0:* LISTEN 1986/glusterfsd
tcp 0 0 0.0.0.0:49153 0.0.0.0:* LISTEN 2008/glusterfsd
or
Some protection:
gluster volume set datavol auth.allow xx.xx.xx.* # 192.168.100.*
gluster volume info
Volume Name: testvol
Type: Replicate
Volume ID: xxxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxxx
Status: Started
Snapshot Count: 0
Number of Bricks: 1 x 2 = 2
Transport-type: tcp
Bricks:
Brick1: data1:/data
Brick2: data2:/data
Options Reconfigured:
auth.allow: xx.xx.xx.*
transport.address-family: inet
nfs.disable: on
performance.client-io-threads: off
Mount data on server or client:
yum -y install glusterfs-client
mkdir /var/data
mount.glusterfs data1:/datavol /var/data
Save on boot:
vi /etc/rc.local
mount.glusterfs data1:/datavol /var/data
Mount failed. Please check the log file for more details glusterfs
tail -50 /var/log/glusterfs/mnt-data-volume.log
Fix was:
lsmod | grep fuse
modprobe fuse
Error: Package: kmod-kvdo vdo ( Requires: kernel dm_put_device)
Error: Package: kmod-kvdo-6.1.0.181-17.el7_5.x86_64 (updates)
Requires: kernel(dm_unregister_target) = 0x35ba4186
Installed: kernel-3.10.0-862.el7.x86_64 (@anaconda)
kernel(dm_unregister_target) = 0x62b8c739
Installed: vzkernel-3.10.0-862.9.1.vz7.63.3.x86_64 (@openvz-os)
kernel(dm_unregister_target) = 0x97f3d3e0
Available: kernel-debug-3.10.0-862.el7.x86_64 (base)
kernel(dm_unregister_target) = 0x9fea9142
Available: kernel-debug-3.10.0-862.2.3.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0x9fea9142
Available: kernel-debug-3.10.0-862.3.2.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0x9fea9142
Available: kernel-debug-3.10.0-862.3.3.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0x9fea9142
Available: kernel-debug-3.10.0-862.6.3.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0xe573fa0d
Available: kernel-debug-3.10.0-862.9.1.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0xe573fa0d
Available: kernel-debug-3.10.0-862.11.6.el7.x86_64 (updates)
kernel(dm_unregister_target) = 0xe573fa0d
Available: vzkernel-debug-3.10.0-862.9.1.vz7.63.3.x86_64 (openvz-os)
kernel(dm_unregister_target) = 0x854b76de
You could try using --skip-broken to work around the problem
You could try running: rpm -Va --nofiles --nodigest
Fix:
yum --disablerepo=* --enablerepo=base install vdo kmod-kvdo
centos 7 openvz
yum install epel*release
yum install prlctl prl-disp-service vzkernel
reboot
systemctl status vz
Kickstart install Centos 7 using virt-install
sudo qemu-img create -f qcow2 /var/lib/libvirt/images/centos7.qcow2 15G
sudo iptables -t nat -A POSTROUTING -j MASQUERADE
or
firewall-cmd --zone=external --add-masquerade --permanent
firewall-cmd --reload
This helps avoid errors like: dracut-initqueue[688]: Warning: unknown network kickstart URL: ...
mkdir kick && mkdir kick
vim centos7.cfg
#version=RHEL7
install
auth --enableshadow --passalgo=sha512
repo --name="EPEL" --baseurl=http://dl.fedoraproject.org/pub/epel/7/x86_64
eula --agreed
reboot
url --url="http://mirror.litnet.lt/centos/7/os/x86_64/"
firstboot --enable
ignoredisk --only-use=vda
keyboard --vckeymap=en --xlayouts='en'
lang en_US.UTF-8
network --bootproto=dhcp --device=enp0s3 --noipv6 --activate
network --hostname=centos7.test.local
rootpw mypassword
services --enabled=NetworkManager,sshd,chronyd
timezone Europe/Vilnius --isUtc --ntpservers=0.centos.pool.ntp.org,1.centos.pool.ntp.org,2.centos.pool.ntp.org,3.centos.pool.ntp.org
user --groups=wheel --homedir=/home/monit --name=monit --password=password --iscrypted --gecos="monit"
bootloader --location=mbr --boot-drive=vda
autopart --type=lvm
zerombr
clearpart --all --drives=vda
selinux --permissive
%packages
@base
@core
chrony
yum-cron
%end
python -m SimpleHTTPServer 1111
Serving HTTP on 0.0.0.0 port 1111 ...
or
python3 -m http.server 1111
Serving HTTP on 0.0.0.0 port 1111 (http://0.0.0.0:1111/) ...
sudo virt-install --connect=qemu:///system --network=bridge:virbr0 --extra-args="ks=http://192.168.0.101:1111/centos7.cfg console=tty0 console=ttyS0,115200" --name=centos7 --disk /var/lib/libvirt/images/centos7.qcow2,size=15,device=disk,bus=virtio,format=qcow2 --ram 1500 --vcpus=1 --check-cpu --accelerate --hvm --location=http://mirror.litnet.lt/centos/7/os/x86_64/ --nographics
CentOS Linux 7 (Core)
Kernel 3.10.0-862.el7.x86_64 on an x86_64
centos7 login:
union file systems overlay
mkdir -v lowerdir upperdir workdir overlay
sudo tree
.
├── lowerdir
├── overlay
├── upperdir
└── workdir
lowerdir layer (read only, base layer)
overlay layer (main view)
work layer (diff view, diff layer )
sudo mount -t overlay -o lowerdir=lowerdir,upperdir=upperdir,workdir=workdir overlay overlay
echo test > lowerdir/test
sudo tree
.
├── lowerdir
│ └── test
├── overlay
│ └── test
├── upperdir
└── workdir
└── work
echo 'some new data' >> overlay/test
sudo tree
.
├── lowerdir
│ └── test
├── overlay
│ └── test
├── upperdir
│ └── test
└── workdir
└── work
This is principe how docker is working.
ERROR Host does not support any virtualization options
sudo dnf install qemu-kvm