diff --git a/.env b/.env new file mode 100644 index 000000000..af98a9d3b --- /dev/null +++ b/.env @@ -0,0 +1,2 @@ +TAG=1.0 +TAG_DEVEL=1.1 diff --git a/.gitignore b/.gitignore index a6c8bbf73..d5f8fc794 100644 --- a/.gitignore +++ b/.gitignore @@ -57,3 +57,5 @@ csv/ #Wizard disable install/.wizard + +known_hosts diff --git a/CHANGELOG.md b/CHANGELOG.md index ade9ccb4b..e2817a134 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,10 @@ All notable changes to this project will be documented in this file. +## [1.1.0-rc1] - 2019-02-03 + +This is the first release candidate for the 1.1.0 version. The changelog is going to be updated after the release (approximately in a week [2019-02-10]). Check the [pull request](https://github.com/isard-vdi/isard/pull/94) for more information. + ## [1.0.1] - 2018-12-27 ### Fixed diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..38ccd3e86 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,29 @@ +# Contributing + +## New feature + +1. Fork the `isard-vdi/isard` repository +2. Clone **your** Isard fork and move (if you already have your fork clonned, make sure you have the latest changes: `git fetch upstream`) +3. Add the upstream remote: `git remote add upstream https://github.com/isard-vdi/isard` + +1. Initialize Git Flow: `git flow init` +2. Create the feature: `git flow feature start ` +3. Work and commit it +4. Publish the feature branch: `git flow feature publish ` +5. Create a pull request from `your username/isard` `feature/` to `isard-vdi/isard` `develop` branch + + + +## New release + +1. Clone the `isard-vdi/isard` repository +2. Create the release: `git flow release start X.X.X` +3. Publish the release branch: `git flow publish release X.X.X` +4. Create a pull request from the `isard-vdi/isard` `release/X.X.X` to `isard-vdi/isard` `master` +5. Update the Changelog, the `docker-compose.yml` file... +6. Merge the release to master +7. Create a new release to GitHub using as description the Changelog for the version +8. Pull the changes to the local `isard-vdi/isard` clone +9. Change to the new version tag: `git checkout X.X.X` +10. Build the Docker images and push them to Docker Hub + diff --git a/README.md b/README.md index 81c7bb411..e24f4004f 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,9 @@ # Isard**VDI** +IsardVDI Logo + +[![](https://img.shields.io/github/release/isard-vdi/isard.svg)](https://github.com/isard-vdi/isard/releases) [![](https://img.shields.io/badge/docker--compose-ready-blue.svg)](https://github.com/isard-vdi/isard/blob/master/docker-compose.yml) [![](https://img.shields.io/badge/docs-latest-brightgreen.svg)](https://isardvdi.readthedocs.io/en/latest/) [![](https://img.shields.io/badge/license-AGPL%20v3.0-brightgreen.svg)](https://github.com/isard-vdi/isard/blob/master/LICENSE) + Open Source KVM Virtual Desktops based on KVM Linux and dockers. - Engine that monitors hypervisors and domains (desktops) @@ -47,7 +51,7 @@ It will create a template from that desktop as it was now. You can create as man In Updates menu you will have access to different resources you can download from our IsardVDI updates server. -![Main admin screen](docs/images/main.png?raw=true "Main admin") +![Main admin screen](https://isardvdi.readthedocs.io/en/latest/images/main.png) ## Documentation @@ -67,3 +71,8 @@ Go to [IsardVDI Project website](http://www.isardvdi.com/) ### Support/Contact Please send us an email to info@isardvdi.com if you have any questions or fill in an issue. + +### Social Networks +Mastodon: [@isard@fosstodon.org](https://fosstodon.org/@isard) +Twitter: [@isard_vdi](https://twitter.com/isard_vdi) + diff --git a/build-docker-images.sh b/build-docker-images.sh new file mode 100755 index 000000000..c67af32ba --- /dev/null +++ b/build-docker-images.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Check that the version number was provided +if [ -z "$1" ]; then + echo "You need to specify a IsardVDI version! e.g. '1.1.0'" + exit 1 +fi + +if [ $1 = "-f" ]; then + force=1 + if [ -z "$2" ]; then + echo "You need to specify a IsardVDI version with -f option! e.g. '1.1.0'" + exit 1 + fi + version=$2 +else + force=0 + version=$1 +fi + +MAJOR=${version:0:1} +MINOR=${version:0:3} +PATCH=$version + +# If a command fails, the whole script is going to stop +set -e + +# Checkout to the specified version tag +if [ force = 1 ]; then + git checkout $1 > /dev/null +fi + +# Array containing all the images to build +images=( + #alpine-pandas + #grafana + nginx + hypervisor + app +) + +# Build all the images and tag them correctly +for image in "${images[@]}"; do + echo -e "\n\n\n" + echo "Building $image" + echo -e "\n\n\n" + cmd="docker build -f dockers/$image/Dockerfile -t isard/$image:latest -t isard/$image:$MAJOR -t isard/$image:$MINOR -t isard/$image:$PATCH ." + echo $cmd + $cmd +done + diff --git a/docker-compose.yml b/docker-compose.yml index b6bad79b2..ec4899757 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,97 +1,73 @@ -version: '2' +version: "3.5" services: -# Will take long time to build, better get dockerhub already build -# isard-alpine-pandas: -# image: isard/alpine-pandas:1.0.0 -# build: -# context: . -# dockerfile: dockers/alpine-pandas/Dockerfile - isard-database: - restart: always - image: rethinkdb - hostname: isard-database - volumes: - - "/opt/isard/database:/data" - expose: - - "28015" - networks: - main: - aliases: - - rethinkdb + isard-database: + container_name: isard-database + volumes: + - "/opt/isard/database:/data" + - "/etc/localtime:/etc/localtime:ro" + networks: + - isard_network + image: rethinkdb + restart: unless-stopped + logging: + driver: none - isard-nginx: - restart: always - image: isard/nginx:1.0.0 - build: - context: . - dockerfile: dockers/nginx/Dockerfile - ports: - - "80:80" - - "443:443" - volumes: - - "/opt/isard/certs/default:/etc/nginx/external" - - "/opt/isard/logs/nginx:/var/log/nginx" - hostname: isard-nginx - links: - - "isard-app" - networks: - main: - aliases: - - isard-nginx - - isard-hypervisor: - restart: always - image: isard/hypervisor:1.0.0 - build: - context: . - dockerfile: dockers/hypervisor/Dockerfile - hostname: isard-hypervisor - ports: - - "5900-5949:5900-5949" - - "55900-55949:55900-55949" - expose: - - "22" - privileged: true - volumes: - - "sshkeys:/root/.ssh" - - "/opt/isard:/isard" - - "/opt/isard/certs/default:/etc/pki/libvirt-spice" - networks: - main: - aliases: - - isard-hypervisor - command: /usr/bin/supervisord -c /etc/supervisord.conf + isard-nginx: + container_name: isard-nginx + volumes: + - "/opt/isard/certs/default:/etc/nginx/external" + - "/opt/isard/logs/nginx:/var/log/nginx" + - "/etc/localtime:/etc/localtime:ro" + ports: + - "80:80" + - "443:443" + networks: + - isard_network + image: isard/nginx:1.1.0-rc1 + restart: unless-stopped + depends_on: + - isard-app + + isard-hypervisor: + container_name: isard-hypervisor + volumes: + - "sshkeys:/root/.ssh" + - "/opt/isard:/isard" + - "/opt/isard/certs/default:/etc/pki/libvirt-spice" + - "/etc/localtime:/etc/localtime:ro" + ports: + - "5900-5949:5900-5949" + - "55900-55949:55900-55949" + networks: + - isard_network + image: isard/hypervisor:1.1.0-rc1 + privileged: true + restart: unless-stopped - isard-app: - restart: always - image: isard/app:1.0.0 - build: - context: . - dockerfile: dockers/app/Dockerfile - links: - - "isard-database" - - "isard-hypervisor" - hostname: isard-app - volumes: - - "sshkeys:/root/.ssh" - - "/opt/isard/certs:/certs" - - "/opt/isard/logs:/isard/logs" - - "/opt/isard/database/wizard:/isard/install/wizard" - - "/opt/isard/backups:/isard/backups" - - "/opt/isard/uploads:/isard/uploads" - expose: - - "5000" - environment: - PYTHONUNBUFFERED: 0 - extra_hosts: - - "isard-engine:127.0.0.1" - networks: - main: - aliases: - - isard-app - command: /usr/bin/supervisord -c /etc/supervisord.conf + isard-app: + container_name: isard-app + volumes: + - "sshkeys:/root/.ssh" + - "/opt/isard/certs:/certs" + - "/opt/isard/logs:/isard/logs" + - "/opt/isard/database/wizard:/isard/install/wizard" + - "/opt/isard/backups:/isard/backups" + - "/opt/isard/uploads:/isard/uploads" + - "/etc/localtime:/etc/localtime:ro" + extra_hosts: + - "isard-engine:127.0.0.1" + networks: + - isard_network + image: isard/app:1.1.0-rc1 + restart: unless-stopped + depends_on: + - isard-database + - isard-hypervisor -networks: - main: volumes: - sshkeys: + sshkeys: + +networks: + isard_network: + external: false + name: isard_network diff --git a/dockers/alpine-pandas/Dockerfile b/dockers/alpine-pandas/Dockerfile index f4352dd68..d2dfa00ef 100644 --- a/dockers/alpine-pandas/Dockerfile +++ b/dockers/alpine-pandas/Dockerfile @@ -1,7 +1,6 @@ -FROM alpine:latest +FROM alpine:3.8 MAINTAINER isard -RUN echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories RUN apk update RUN apk add --no-cache python3 && \ python3 -m ensurepip && \ diff --git a/dockers/app/Dockerfile b/dockers/app/Dockerfile index 40713db9f..08158fb17 100644 --- a/dockers/app/Dockerfile +++ b/dockers/app/Dockerfile @@ -1,30 +1,27 @@ -FROM isard/alpine-pandas:1.0.0 +FROM isard/alpine-pandas:latest MAINTAINER isard -RUN apk add --no-cache git yarn py3-libvirt py3-paramiko py3-lxml py3-xmltodict py3-pexpect py3-openssl py3-bcrypt py3-gevent py3-flask py3-flask-login py3-netaddr py3-requests curl - -RUN mkdir /isard -ADD ./src /isard +RUN apk add --no-cache bash yarn py3-libvirt py3-paramiko py3-lxml py3-pexpect py3-openssl py3-bcrypt py3-gevent py3-flask py3-netaddr py3-requests curl openssh-client COPY dockers/app/requirements.pip3 /requirements.pip3 RUN pip3 install --no-cache-dir -r requirements.pip3 -RUN mv /isard/isard.conf.docker /isard/isard.conf - RUN mkdir -p /root/.ssh RUN echo "Host isard-hypervisor \ StrictHostKeyChecking no" >/root/.ssh/config RUN chmod 600 /root/.ssh/config -RUN apk add --update bash -RUN apk add yarn -RUN apk add openssh-client - -RUN apk add supervisor +RUN apk add --no-cache supervisor RUN mkdir -p /var/log/supervisor COPY dockers/app/supervisord.conf /etc/supervisord.conf +EXPOSE 5000 + COPY dockers/app/certs.sh / -CMD /usr/bin/supervisord -c /etc/supervisord.conf -#CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] -#CMD ["sh", "/init.sh"] +COPY dockers/app/add-hypervisor.sh / + +RUN mkdir /isard +ADD ./src /isard +RUN mv /isard/isard.conf.docker /isard/isard.conf + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] diff --git a/dockers/app/add-hypervisor.sh b/dockers/app/add-hypervisor.sh new file mode 100755 index 000000000..24428f73e --- /dev/null +++ b/dockers/app/add-hypervisor.sh @@ -0,0 +1,57 @@ +if [[ -z $HYPERVISOR || -z $PASSWORD ]] +then + echo "You should add environment variables:" + echo " docker exec -e HYPERVISOR= -e PASSWORD= isard_isard-app_1 bash -c '/add-hypervisor.sh'" + echo "Optional parameters: USER (default is root), PORT (default is 22)" + echo "" + echo "Please run it again setting environment variables" + exit 1 +fi + +if [[ -z $PORT ]] +then + PORT=22 +fi + +if [[ -z $USER ]] +then + USER=root +fi + +apk add sshpass +if [ -f /NEWHYPER ] +then + rm /NEWHYPER +fi +sed -i '/'"$HYPERVISOR"'/d' /root/.ssh/known_hosts +echo "Trying to ssh into $HYPERVISOR..." +ssh-keyscan -p $PORT $HYPERVISOR > /NEWHYPER +if [ ! -s /NEWHYPER ] +then + echo "Hypervisor $HYPERVISOR:$PORT could not be reached. Aborting" + exit 1 +else + cat /NEWHYPER >> /root/.ssh/known_hosts + sshpass -p "$PASSWORD" ssh-copy-id -p $PORT $USER@"$HYPERVISOR" + if [ $? -ne 0 ] + then + sed -i '/'"$HYPERVISOR"'/d' /root/.ssh/known_hosts + echo "Can't access $USER@$HYPERVISOR:$PORT. Aborting" + exit 1 + fi +fi + +echo "Hypervisor ssh access granted." +virsh -c qemu+ssh://"$USER"@"$HYPERVISOR":"$PORT"/system quit +if [ $? -ne 0 ] +then + echo "Can't access libvirtd daemon. Please ensure that libvirt daemon is running in $USER@$HYPERVISOR:$PORT. Aborting" + sed -i '/'"$HYPERVISOR"'/d' /root/.ssh/known_hosts + exit 1 +fi + + +echo "Access to $USER@$HYPERVISOR:$PORT granted and found libvirtd service running." +echo "Now you can create this hypervisor in IsardVDI web interface." + + diff --git a/dockers/app/certs.sh b/dockers/app/certs.sh index 61f85acf8..da668a23f 100755 --- a/dockers/app/certs.sh +++ b/dockers/app/certs.sh @@ -1,50 +1,20 @@ #!/bin/bash -public_key="/root/.ssh/authorized_keys" -if [ -f "$public_key" ] -then - echo "$public_key found, so not generating new ones." -else - echo "$public_key not found, generating new ones." - cat /dev/zero | ssh-keygen -q -N "" - mv /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys - - #ssh-keyscan isard-hypervisor > /tmp/known_hosts - #DIFF=$(diff /root/.ssh/know_hosts /tmp/known_hosts) - #if [ "$DIFF" != "" ] - #then - # echo "The HYPERVISOR key has been regenerated" - # rm /root/.ssh/known_hosts - - echo "Scanning isard-hypervisor key..." - ssh-keyscan isard-hypervisor > /root/.ssh/known_hosts - while [ ! -s /root/.ssh/known_hosts ] - do - sleep .5 - echo "Waiting for isard-hypervisor to be online..." - ssh-keyscan isard-hypervisor > /root/.ssh/known_hosts - done - echo "isard-hypervisor online..." - - #fi - ######## Only on development - ####echo -e "isard\nisard" | (passwd --stdin root) - echo -e "isard\nisard" | passwd root - ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' - #ssh-keygen -t rsa -f /etc/ssh/ssh_host_rsa_key -N '' - #/usr/sbin/sshd - ######## +# Remove all isard-hypervisor lines from known_hosts +sed -i '/isard-hypervisor/d' /root/.ssh/known_hosts +# If no id_rsa.pub key yet, create new one +auth_keys="/root/.ssh/id_rsa.pub" +if [ -f "$auth_keys" ] +then + echo "$auth_keys found, so not generating new ones." +else + echo "$auth_keys not found, generating new ones." + cat /dev/zero | ssh-keygen -q -N "" + #Copy new host key to authorized_keys (so isard-hypervisor can get it also) + cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys fi -#!/bin/bash -#~ cd /isard - -#~ echo "Waiting for isard-hypervisor to be online" -#~ while [ ! -e /libvirt/libvirt-admin-sock ] -#~ do - #~ sleep 2 -#~ done -#~ echo "isard-hypervisor online, starting engine..." -#~ python3 /isard/run_engine.py - +# Now scan for isard-hypervisor for 10 seconds (should be more than enough) +echo "Scanning isard-hypervisor key..." +ssh-keyscan -T 10 isard-hypervisor > /root/.ssh/known_hosts diff --git a/dockers/app/requirements.pip3 b/dockers/app/requirements.pip3 index e59852427..e9e5f2db0 100644 --- a/dockers/app/requirements.pip3 +++ b/dockers/app/requirements.pip3 @@ -5,3 +5,6 @@ rethinkdb==2.3.0.post6 pynpm==0.1.1 graphyte==1.4 pem==18.2.0 +Flask-Login==0.4.1 +xmltodict==0.11.0 + diff --git a/dockers/app/supervisord.conf b/dockers/app/supervisord.conf index bb0efefab..94ac83d38 100644 --- a/dockers/app/supervisord.conf +++ b/dockers/app/supervisord.conf @@ -1,4 +1,5 @@ [supervisord] +user=root nodaemon=true logfile=/dev/stdout loglevel=error @@ -14,10 +15,8 @@ stdout_logfile=/isard/logs/certs.log stderr_logfile=/isard/logs/certs-error.log [program:webapp] -user=root directory=/isard command=python3 run_webapp.py -#1>/isard/logs/webapp.log 2>/isard/logs/webapp-error.log autostart=true autorestart=true startsecs=2 @@ -26,11 +25,8 @@ stdout_logfile=/isard/logs/webapp.log stderr_logfile=/isard/logs/webapp-error.log [program:engine] -user=root directory=/isard -command=sh -c "virsh -c qemu+ssh://isard-hypervisor/system quit && python3 run_engine.py" -# 1>/isard/logs/engine.log 2>/isard/logs/engine-error.log" -#command=python3 run_engine.py +command=python3 run_engine.py autostart=true autorestart=false startsecs=2 diff --git a/dockers/app_devel/Dockerfile b/dockers/app_devel/Dockerfile deleted file mode 100644 index e60e95adb..000000000 --- a/dockers/app_devel/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -FROM isard/alpine-pandas:latest -MAINTAINER isard - -RUN apk add --no-cache git yarn py3-libvirt py3-paramiko py3-lxml py3-xmltodict py3-pexpect py3-openssl py3-bcrypt py3-gevent py3-flask py3-flask-login py3-netaddr py3-requests curl - -######## only devel ######## -#RUN mkdir /isard -#ADD ./src /isard -############################ - -######## only devel ######## -COPY dockers/app_devel/requirements.pip3 /requirements.pip3 -############################ - -RUN pip3 install --no-cache-dir -r requirements.pip3 - -######## only devel ######## -RUN pip3 install ipython pytest -#not run in devel -#RUN mv /isard/isard.conf.docker /isard/isard.conf -############################ - -RUN mkdir -p /root/.ssh -RUN echo "Host isard-hypervisor \ - StrictHostKeyChecking no" >/root/.ssh/config -RUN chmod 600 /root/.ssh/config - -RUN apk add --update bash -RUN apk add yarn -RUN apk add openssh-client - -######## only devel ######## -RUN apk add vim openssh -############################ - -RUN apk add supervisor -RUN mkdir -p /var/log/supervisor - -######## only devel ######## -COPY dockers/app_devel/supervisord.conf /etc/supervisord.conf -############################ - -COPY dockers/app/certs.sh / -CMD /usr/bin/supervisord -c /etc/supervisord.conf -#CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] -#CMD ["sh", "/init.sh"] \ No newline at end of file diff --git a/dockers/app_devel/requirements.pip3 b/dockers/app_devel/requirements.pip3 deleted file mode 100644 index e59852427..000000000 --- a/dockers/app_devel/requirements.pip3 +++ /dev/null @@ -1,7 +0,0 @@ -APScheduler==3.3.1 -Flask-SocketIO==2.8.6 -iniparse==0.4 -rethinkdb==2.3.0.post6 -pynpm==0.1.1 -graphyte==1.4 -pem==18.2.0 diff --git a/dockers/app_devel/supervisord.conf b/dockers/app_devel/supervisord.conf deleted file mode 100644 index 55cd2b8de..000000000 --- a/dockers/app_devel/supervisord.conf +++ /dev/null @@ -1,35 +0,0 @@ -[supervisord] -nodaemon=true -logfile=/dev/stdout -loglevel=error -logfile_maxbytes=0 - -[program:certs] -command=sh /certs.sh -autostart=true -autorestart=false -startsecs=0 -priority=1 -stdout_logfile=/isard/logs/certs.log -stderr_logfile=/isard/logs/certs-error.log - -[program:webapp] -directory=/isard -command=python3 run_webapp.py 1>/isard/logs/webapp.log 2>/isard/logs/webapp-error.log -autostart=true -autorestart=true -startsecs=2 -priority=10 -stdout_logfile=/isard/logs/webapp-supervisord.log -stderr_logfile=/isard/logs/webapp-supervisord-error.log - -[program:engine] -directory=/isard -command=sh -c "sleep 15 && python3 run_engine.py 1>/isard/logs/engine.log 2>/isard/logs/engine-error.log" -#command=python3 run_engine.py -autostart=true -autorestart=true -startsecs=10 -priority=5 -stdout_logfile=/isard/logs/engine-supervisord.log -stderr_logfile=/isard/logs/engine-supervisord-error.log diff --git a/dockers/devel-debug.yml b/dockers/devel-debug.yml deleted file mode 100644 index a80681312..000000000 --- a/dockers/devel-debug.yml +++ /dev/null @@ -1,103 +0,0 @@ -version: '2' -services: - isard-alpine-pandas: - image: isard/alpine-pandas:1.0.0 -# Will take long time to build -# build: -# context: . -# dockerfile: dockers/alpine-pandas/Dockerfile - isard-database: - restart: always - image: rethinkdb - hostname: isard-database - volumes: - - "/opt/isard/src/database:/data" - ##### - only devel - ############################ - ports: - - "8080:8080" - ################################################# - expose: - - "28015" - networks: - main: - aliases: - - rethinkdb - - isard-nginx: - restart: always - image: isard/nginx:1.0.1b - build: - context: . - dockerfile: dockers/nginx/Dockerfile - ports: - - "80:80" - - "443:443" - volumes: - - "/opt/isard/certs/default:/etc/nginx/external" - hostname: isard-nginx - links: - - "isard-app" - networks: - main: - aliases: - - isard-nginx - - isard-hypervisor: - restart: always - image: isard/hypervisor:1.0.1b - build: - context: . - dockerfile: dockers/hypervisor/Dockerfile - hostname: isard-hypervisor - ports: - - "5900-5949:5900-5949" - - "55900-55949:55900-55949" - expose: - - "22" - privileged: true - volumes: - - "sshkeys:/root/.ssh" - - "/opt/isard:/isard" - - "/opt/isard/certs/default:/etc/pki/libvirt-spice" - networks: - main: - aliases: - - isard-hypervisor - command: /usr/bin/supervisord -c /etc/supervisord.conf - - isard-app: - restart: always - image: isard/app:1.0.0 - build: - context: . - dockerfile: dockers/app_devel/Dockerfile - links: - - "isard-database" - - "isard-hypervisor" - hostname: isard-app - volumes: - ##### - only devel - ############################ - - "/opt/isard_devel/src:/isard" - - "/opt/ipython_profile_default:/root/.ipython/profile_default" - ################################################# - - "sshkeys:/root/.ssh" - - "/opt/isard/certs:/certs" - - "/opt/isard/logs:/isard/logs" - - "/opt/isard/database/wizard:/isard/install/wizard" - - expose: - - "5000" - environment: - PYTHONUNBUFFERED: 0 - extra_hosts: - - "isard-engine:127.0.0.1" - networks: - main: - aliases: - - isard-app - command: /usr/bin/supervisord -c /etc/supervisord.conf - -networks: - main: -volumes: - sshkeys: diff --git a/dockers/hypervisor/Dockerfile b/dockers/hypervisor/Dockerfile index b04c340ed..dd1668cf8 100644 --- a/dockers/hypervisor/Dockerfile +++ b/dockers/hypervisor/Dockerfile @@ -3,10 +3,12 @@ MAINTAINER isard RUN pip3 uninstall pandas pytz python-dateutil six -y -RUN apk add qemu-system-x86_64 libvirt netcat-openbsd libvirt-daemon dbus polkit qemu-img +RUN apk --no-cache add qemu-system-x86_64 libvirt netcat-openbsd libvirt-daemon dbus polkit qemu-img RUN ln -s /usr/bin/qemu-system-x86_64 /usr/bin/qemu-kvm RUN apk add openssh curl bash RUN ssh-keygen -A +ADD dockers/hypervisor/reset-hyper.sh / +RUN chmod 744 reset-hyper.sh RUN echo "root:isard" |chpasswd RUN sed -i 's|[#]*PermitRootLogin prohibit-password|PermitRootLogin yes|g' /etc/ssh/sshd_config @@ -23,33 +25,18 @@ echo 'spice_tls = 1' >> /etc/libvirt/qemu.conf && \ echo 'spice_tls_x509_cert_dir = "/etc/pki/libvirt-spice"' >> /etc/libvirt/qemu.conf RUN mkdir -p /etc/pki/libvirt-spice -# Add default network -#ADD dockers/hypervisor/customlibvirtpost.sh /customlibvirtpost.sh -#RUN chmod a+x /customlibvirtpost.sh -#ADD dockers/hypervisor/network.xml /network.xml - -#RUN mkdir /root/.ssh - -#spice-html5 proxy -#RUN mkdir -p /var/www/html/spice -#COPY html5/html /var/www/html/spice - -ADD dockers/hypervisor/requirements.pip3 / RUN apk add --no-cache --virtual .build_deps build-base python3-dev -#libffi-dev openssl-dev -RUN pip3 install websockify +RUN pip3 install --no-cache-dir websockify==0.8.0 RUN apk del .build_deps ADD dockers/hypervisor/start_proxy.py / EXPOSE 22 -EXPOSE 16509 EXPOSE 5900-5950 -EXPOSE 5700-5750 -EXPOSE 55900-55900 +EXPOSE 55900-55950 VOLUME ["/isard" ] -RUN apk add supervisor +RUN apk add --no-cache supervisor RUN mkdir -p /var/log/supervisor COPY dockers/hypervisor/supervisord.conf /etc/supervisord.conf CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] diff --git a/dockers/hypervisor/reset-hyper.sh b/dockers/hypervisor/reset-hyper.sh new file mode 100644 index 000000000..cd607bbaf --- /dev/null +++ b/dockers/hypervisor/reset-hyper.sh @@ -0,0 +1,13 @@ +if [[ -z $PASSWORD ]] +then + echo "Usage:" + echo " docker exec -e PASSWORD= isard_isard-generic-hyper_1 bash -c '/reset-hyper.sh'" + exit 1 +fi + +/bin/rm -v /etc/ssh/ssh_host_* +ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa +ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa +echo "root:$PASSWORD" |chpasswd +pkill -9 sshd +echo "You can add this new hypervisor in IsardVDI with PORT=2022 and your new root password" diff --git a/dockers/nginx/nginx.conf b/dockers/nginx/nginx.conf index facbdf22f..08ec7897b 100644 --- a/dockers/nginx/nginx.conf +++ b/dockers/nginx/nginx.conf @@ -29,7 +29,7 @@ http { #gzip on; upstream isard-fe { - server isard-app:5000 fail_timeout=0; + server isard-app:5000 max_fails=5 fail_timeout=2s; } server { diff --git a/docs/images/list b/docs/images/list deleted file mode 100644 index 8b1378917..000000000 --- a/docs/images/list +++ /dev/null @@ -1 +0,0 @@ - diff --git a/docs/images/main.png b/docs/images/main.png deleted file mode 100644 index a1b417cf5..000000000 Binary files a/docs/images/main.png and /dev/null differ diff --git a/docs/index.md b/docs/index.md deleted file mode 100644 index 044f93669..000000000 --- a/docs/index.md +++ /dev/null @@ -1,59 +0,0 @@ -# Isard**VDI** - -Open Source VDI deployment based on KVM Linux. - -## What is it - -A quick and real time web interface to manage your virtual desktops. - -Bring it up: - -``` -git clone https://github.com/isard-vdi/isard -cd isard -docker-compose up -d -``` - -Connect with browser to the server and follow the wizard. You are ready -to test virtual desktops: - -- Start **demo desktops** and connect to it using your browser and spice or -vnc protocol. Nothing to be installed, but already secured with certificates. -- Install virt-viewer and connect to it using the spice client. **Sound -and USB** transparent plug will be available. - -Download new precreated desktops, isos and lots of resources from the **Updates** menu. - -Create your own desktop using isos downloaded from Updates or **Media** -menu option. When you finish installing the operating system and -applications create a **Template** and decide which users or categories -you want to be able to create a desktop identical to that template. Thanks to the **incremental disk creation** all this can be done within -minutes. - -Don't get tied to an 'stand-alone' installation in one server. You can -add more hypervisors to your **pool** and let IsardVDI decide where to -start each desktop. Each hypervisor needs only the KVM/qemu and libvirt -packages and SSH access. You should keep the storage shared between -those hypervisors. - -We currenly manage a **large IsardVDI infrastructure** at Escola del -Treball in Barcelona. 3K students and teachers have IsardVDI available -from our self-made pacemaker dual nas cluster and six hypervisors, -ranging from top level intel server dual core mainboards to gigabyte -gaming ones. - -We have experience in different **thin clients** that we use to lower renovation and -consumption costs at classrooms. - -[IsardVDI Project website](http://www.isardvdi.com/) - -### Authors -+ Josep Maria Viñolas Auquer -+ Alberto Larraz Dalmases - -### Contributors -+ Daniel Criado Casas -+ Néfix Estrada - -### Support/Contact -Please send us an email to info@isardvdi.com if you have any questions or fill in an issue. diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index dd9f07500..000000000 --- a/docs/install.md +++ /dev/null @@ -1,9 +0,0 @@ -# Installation - -## On a linux system - -We have tested on debian, ubuntu and fedora latests versions - -## Using docker-compose - -You can get a fully working IsardVDI installation within minutes with docker. diff --git a/docs/install/docker-compose.md b/docs/install/docker-compose.md deleted file mode 100644 index ef768612b..000000000 --- a/docs/install/docker-compose.md +++ /dev/null @@ -1,6 +0,0 @@ - Install docker: https://docs.docker.com/engine/installation/ - Install docker-compose: https://docs.docker.com/compose/install/ - Clone the repository: git clone https://github.com/isard-vdi/isard.git - cd isard && docker-compose up - -You should be able to access your IsardVDI through https://localhost diff --git a/docs/install/linux.md b/docs/install/linux.md deleted file mode 100644 index 0e38ad4d5..000000000 --- a/docs/install/linux.md +++ /dev/null @@ -1,91 +0,0 @@ -# IsardVDI installation on FEDORA 25 - -## Install OS - -Minimal Fedora 25 install -sudo dnf update -y - -## Clone IsardVDI repository - -``` -sudo dnf install git -git clone https://github.com/isard-vdi/isard.git -``` - -## Install IsardVDI requirements - -``` -cd isard/install/ -sudo dnf install wget gcc redhat-rpm-config python3-devel openldap-devel openssl-devel libvirt-python3 npm -sudo pip3 install -r requirements.pip3 -``` - -``` -sudo npm -g install bower -bower install -``` - -``` -sudo wget http://download.rethinkdb.com/centos/7/`uname -m`/rethinkdb.repo -O /etc/yum.repos.d/rethinkdb.repo -sudo dnf install -y rethinkdb -sudo cp /etc/rethinkdb/default.conf.sample /etc/rethinkdb/instances.d/default.conf -sudo systemctl daemon-reload -sudo systemctl start rethinkdb -``` - -## Selinux and Firewalld -For testing purposes, just disable both till next reboot: - -``` -sudo setenforce 0 -sudo systemctl stop firewalld -``` - -**Do not disable them in production!, please follow nginx.md and selinux.md documentation** - -## Run the application - -``` -cd .. -./run.sh -``` - -You can browse to your computer port 5000 -Default user is 'admin' and password 'isard' - - -# KNOWN ISSUES - -## IsardVDI engine can't contact hypervisor(s) - -### ssh authentication fail when connect: Server 'vdesktop6.escoladeltreball.org' not found in known_hosts - -You should generate int your IsardVDI machine your ssh key and copy it to the hypervisor(s): -``` -ssh-keygen -ssh-copy-id root@ -``` - - -Now you should be able to connect to frontend through http://localhost:5000 -Default user is admin and password isard. - - -# In hypervisors we need - - -in Fedora or centos: -``` -dnf -y install openssh-server qemu-kvm libguestfs-tools -``` - -Check that you can connect to the hypervisor using ssh root@ - -NOTE: Service sshd on hypervisor(s) should use ssh-rsa keys. Please check **/etc/ssh/sshd_config** on hypervisor that you have only **HostKey /etc/ssh/ssh_host_rsa_key** option active - -## IsardVDI does not start - -+ Check that you have rethinkdb database running: **systemctl status rethinkdb** -+ Check that rethinkdb tcp port 28015 it is open: **netstat -tulpn | grep 28015** -+ Check that there are no error logs on output. - diff --git a/extras/app-devel/Dockerfile b/extras/app-devel/Dockerfile new file mode 100644 index 000000000..b9980b831 --- /dev/null +++ b/extras/app-devel/Dockerfile @@ -0,0 +1,37 @@ +FROM isard/alpine-pandas:latest +MAINTAINER isard + +RUN apk add --no-cache bash yarn py3-libvirt py3-paramiko py3-lxml py3-pexpect py3-openssl py3-bcrypt py3-gevent py3-flask py3-netaddr py3-requests curl openssh-client + +######## only devel ######## +RUN apk add --no-cache git vim openssh +############################ + +COPY dockers/app/requirements.pip3 /requirements.pip3 +RUN pip3 install --no-cache-dir -r requirements.pip3 + +######## only devel ######## +RUN pip3 install ipython pytest +############################ + +RUN mkdir -p /root/.ssh +RUN echo "Host isard-hypervisor \ + StrictHostKeyChecking no" >/root/.ssh/config +RUN chmod 600 /root/.ssh/config + +RUN apk add --no-cache supervisor +RUN mkdir -p /var/log/supervisor +COPY dockers/app/supervisord.conf /etc/supervisord.conf + +EXPOSE 5000 + +COPY dockers/app/certs.sh / +COPY dockers/app/add-hypervisor.sh / + +######## not in devel ######## +# RUN mkdir /isard +# ADD ./src /isard +# RUN mv /isard/isard.conf.docker /isard/isard.conf +############################ + +CMD ["/usr/bin/supervisord", "-c", "/etc/supervisord.conf"] diff --git a/extras/app-devel/README b/extras/app-devel/README new file mode 100644 index 000000000..71810ed9e --- /dev/null +++ b/extras/app-devel/README @@ -0,0 +1,83 @@ +The local isard repo must be in the path: + + ln -s /your_isard_repo_dev_path /opt/isard_devel/ + +The version of devel for tag in image: + + echo "TAG=1.0" > /opt/isard_devel/.env + echo "TAG_DEVEL=1.1" >> /opt/isard_devel/.env + +Activate debug level creating a file in src: + + touch src/LOG_LEVEL_DEBUG + +You need to create a symbolic link in root of isard local repo to debug and run. + + ln -s extras/app-devel/devel-debug.yml devel-debug.yml + +And then run create devel image and run docker-compose: + + bash extras/app-devel/build-docker-images-devel.sh + docker-compose -f devel-debug.yml up + + +And then if you want to debug with ipython: + + sudo docker exec -it isard-beto_isard-app_1 ipython3 + +ipython profile history is saved out of container in: + + /opt/ipython_profile_default + + +Old configuration to run engine if have problems with running threads: + + #command=sh -c "sleep 15 && python3 run_engine.py 1>/isard/logs/engine.log 2>/isard/logs/engine-error.log" + + +Useful commands to devel: + + # stop all daemons + docker stop $(docker ps -a -q) + + # destroy containers (down) + sudo docker-compose -f devel-debug.yml down + + # list all containers + sudo docker container list --all + + # list all images + sudo docker images + + # rm docker image + sudo docker image rm isard/app-devel:latest + + # show logs + sudo docker container logs isard-hypervisor + + # prune all + sudo docker system prune --all --force --volumes + + # delete all /opt/isard (virtual disks, databases, logs...) + sudo docker system prune --all --force --volumes + + +# DEBUG wigh PyCharm + +run webapp in another terminal: + + sudo docker exec -it isard-app python3 run_webapp.py + +To debug in Pycharm: + +''' +1. Define python interpreter +--- docker-compose with docker-compose file: devel-debug.yml + +2. Debug with python interpreter: +--- name: docker-devel +--- python interpreter: remote python 3.X from docker-compose +--- script path: /home/beto/dev/isard-beto/src/run_engine.py +--- working directory: /your_devel_path/src +--- path mappings: /your_devel_path/src=/isard +''' diff --git a/extras/app-devel/build-docker-images-devel.sh b/extras/app-devel/build-docker-images-devel.sh new file mode 100755 index 000000000..d36a4fe59 --- /dev/null +++ b/extras/app-devel/build-docker-images-devel.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +# Check that the version number was provided +if [ -z "$1" ]; then + echo "You need to specify a IsardVDI version! e.g. '1.1.0'" + exit 1 +fi + +MAJOR=${1:0:1} +MINOR=${1:0:3} +PATCH=$1 + +# If a command fails, the whole script is going to stop +set -e + +# Checkout to the specified version tag +#git checkout $1 > /dev/null + +# Array containing all the images to build +images=( + #grafana + app-devel +) + +# Build all the images and tag them correctly +for image in "${images[@]}"; do + echo -e "\n\n\n" + echo "Building $image" + echo -e "\n\n\n" + docker build -f=extras/$image/Dockerfile -t isard/$image:latest -t isard/$image:$MAJOR -t isard/$image:$MINOR -t isard/$image:$PATCH . +done + diff --git a/extras/app-devel/delete-all-docker-img-cnt-net-vol.sh b/extras/app-devel/delete-all-docker-img-cnt-net-vol.sh new file mode 100755 index 000000000..409bb6a23 --- /dev/null +++ b/extras/app-devel/delete-all-docker-img-cnt-net-vol.sh @@ -0,0 +1,7 @@ +#!/bin/bash +docker rm -vf $(docker ps -a -q) +docker network prune --force +docker volume prune --force +docker rmi -f $(docker images -a -q) +rm -rf /opt/isard + diff --git a/extras/app-devel/devel-debug.yml b/extras/app-devel/devel-debug.yml new file mode 100644 index 000000000..056e28622 --- /dev/null +++ b/extras/app-devel/devel-debug.yml @@ -0,0 +1,103 @@ +version: "3.5" +services: + isard-database: + container_name: isard-database + volumes: + - "/opt/isard/database:/data" + networks: + - isard_network + ##### - only devel - ############################ + ports: + - "8080:8080" + expose: + - "28015" + ################################################# + image: rethinkdb + restart: unless-stopped + + isard-nginx: + container_name: isard-nginx + volumes: + - "/opt/isard/certs/default:/etc/nginx/external" + - "/opt/isard/logs/nginx:/var/log/nginx" + ports: + - "80:80" + - "443:443" + networks: + - isard_network + image: isard/nginx:1.1 + restart: "no" + depends_on: + - isard-app + + isard-hypervisor: + container_name: isard-hypervisor + volumes: + - "sshkeys:/root/.ssh" + - "/opt/isard:/isard" + - "/opt/isard/certs/default:/etc/pki/libvirt-spice" + ports: + - "5900-5949:5900-5949" + - "55900-55949:55900-55949" + networks: + - isard_network + image: isard/hypervisor:1.1 + privileged: true + ################ only for devel ############### + expose: + - "22" + ############################################### + restart: "no" + + isard-app: + container_name: isard-app + volumes: + - "sshkeys:/root/.ssh" + - "/opt/isard/certs:/certs" + - "/opt/isard/logs:/isard/logs" + - "/opt/isard/database/wizard:/isard/install/wizard" + - "/opt/isard/backups:/isard/backups" + - "/opt/isard/uploads:/isard/uploads" + + ##### - only devel - ############################ + - "/opt/isard_devel/src:/isard" + - "/opt/ipython_profile_default:/root/.ipython/profile_default" + ################################################# + + ########### - only devel ################# + expose: + - "5000" + ########################################## + extra_hosts: + - "isard-engine:127.0.0.1" + networks: + - isard_network + image: "isard/app-devel:${TAG_DEVEL}" + restart: "no" + depends_on: + - isard-database + - isard-hypervisor + - isard-grafana + + isard-grafana: + container_name: isard-grafana + volumes: + - "/opt/isard/grafana/grafana/data:/grafana/data" + - "/opt/isard/grafana/graphite/storage:/opt/graphite/storage" + - "/opt/isard/grafana/graphite/conf:/opt/graphite/conf" + ports: + - 3000:3000 + networks: + - isard_network + image: isard/grafana:1.1 + restart: "no" + logging: + driver: none + +volumes: + sshkeys: + +networks: + isard_network: + external: false + #name: isard_network diff --git a/extras/app-devel/devel-webapp.yml b/extras/app-devel/devel-webapp.yml new file mode 100644 index 000000000..a150d8464 --- /dev/null +++ b/extras/app-devel/devel-webapp.yml @@ -0,0 +1,74 @@ +version: "3.5" +services: + isard-database: + container_name: isard-database + volumes: + - "/opt/isard/database:/data" + - "/etc/localtime:/etc/localtime:ro" + ports: + - "8080:8080" + networks: + - isard_network + image: rethinkdb + restart: unless-stopped + + isard-nginx: + container_name: isard-nginx + volumes: + - "/opt/isard/certs/default:/etc/nginx/external" + - "/opt/isard/logs/nginx:/var/log/nginx" + - "/etc/localtime:/etc/localtime:ro" + ports: + - "80:80" + - "443:443" + networks: + - isard_network + image: isard/nginx:1.1 + restart: unless-stopped + depends_on: + - isard-app + + isard-hypervisor: + container_name: isard-hypervisor + volumes: + - "sshkeys:/root/.ssh" + - "/opt/isard:/isard" + - "/opt/isard/certs/default:/etc/pki/libvirt-spice" + - "/etc/localtime:/etc/localtime:ro" + ports: + - "5900-5949:5900-5949" + - "55900-55949:55900-55949" + networks: + - isard_network + image: isard/hypervisor:1.1 + privileged: true + restart: unless-stopped + + isard-app: + container_name: isard-app + volumes: + - "sshkeys:/root/.ssh" + - "../../src:/isard" + - "/opt/isard/certs:/certs" + - "/opt/isard/logs:/isard/logs" + - "/opt/isard/database/wizard:/isard/install/wizard" + - "/opt/isard/backups:/isard/backups" + - "/opt/isard/uploads:/isard/uploads" + extra_hosts: + - "isard-engine:127.0.0.1" + networks: + - isard_network + image: isard/app:1.1 + restart: unless-stopped + depends_on: + - isard-database + - isard-hypervisor + +volumes: + sshkeys: + +networks: + isard_network: + external: false + name: isard_network + diff --git a/extras/app-devel/run_web_app.sh b/extras/app-devel/run_web_app.sh new file mode 100755 index 000000000..a584aa105 --- /dev/null +++ b/extras/app-devel/run_web_app.sh @@ -0,0 +1,3 @@ +#!/bin/bash +sleep 10 +sudo docker exec -it isard-app python3 run_webapp.py \ No newline at end of file diff --git a/extras/grafana/Dockerfile b/extras/grafana/Dockerfile new file mode 100644 index 000000000..d8d408e52 --- /dev/null +++ b/extras/grafana/Dockerfile @@ -0,0 +1,72 @@ +FROM alpine:3.8 +# Based on https://github.com/SchweizerischeBundesbahnen/docker-graphite and https://github.com/orangesys/alpine-grafana + +# Install basic stuff =) +RUN apk add --no-cache \ + bash \ + ca-certificates \ + nginx \ + openssl \ + py2-pip \ + supervisor \ + tini \ + && pip install \ + supervisor-stdout \ + gunicorn + +# Install graphite +ENV GRAPHITE_ROOT /opt/graphite + +RUN apk add --no-cache \ + alpine-sdk \ + fontconfig \ + libffi \ + libffi-dev \ + python-dev \ + py-cairo \ + && export PYTHONPATH="/opt/graphite/lib/:/opt/graphite/webapp/" \ + && pip install https://github.com/graphite-project/whisper/tarball/master \ + && pip install https://github.com/graphite-project/carbon/tarball/master \ + && pip install https://github.com/graphite-project/graphite-web/tarball/master \ + && apk del \ + alpine-sdk \ + python-dev \ + libffi-dev + +# Grafana +ENV GRAFANA_VERSION=5.4.3 + +RUN set -ex \ + && addgroup -S grafana \ + && adduser -S -G grafana grafana \ + && apk add --no-cache libc6-compat ca-certificates su-exec \ + && mkdir /tmp/setup \ + && wget -P /tmp/setup http://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana-${GRAFANA_VERSION}.linux-amd64.tar.gz \ + && tar -xzf /tmp/setup/grafana-$GRAFANA_VERSION.linux-amd64.tar.gz -C /tmp/setup --strip-components=1 \ + && install -m 755 /tmp/setup/bin/grafana-server /usr/local/bin/ \ + && install -m 755 /tmp/setup/bin/grafana-cli /usr/local/bin/ \ + && mkdir -p /grafana/datasources /grafana/dashboards /grafana/data /grafana/logs /grafana/plugins /var/lib/grafana \ + && cp -r /tmp/setup/public /grafana/public \ + && chown -R grafana:grafana /grafana \ + && ln -s /grafana/plugins /var/lib/grafana/plugins \ + && grafana-cli plugins update-all \ + && rm -rf /tmp/setup + +ADD grafana-defaults.ini /grafana/conf/defaults.ini + +EXPOSE 8080 +EXPOSE 3000 +EXPOSE 2003 +EXPOSE 2004 +EXPOSE 7002 + +VOLUME ["/opt/graphite/conf", "/opt/graphite/storage"] + +COPY run.sh /run.sh +COPY etc/ /etc/ +COPY data/ /grafana/data_init/ +COPY conf/ /opt/graphite/conf.example/ + +# Enable tiny init +ENTRYPOINT ["/sbin/tini", "--"] +CMD ["/bin/bash", "/run.sh"] diff --git a/extras/grafana/README.md b/extras/grafana/README.md new file mode 100644 index 000000000..db54c2cee --- /dev/null +++ b/extras/grafana/README.md @@ -0,0 +1,27 @@ +# Grafana + +This is an optional (but cool) extra that will bring up a carbon+graphite+grafana container plugged to your IsardVDI with predefined dashboards. + +## Installation + +``` +./build.sh +docker-compose up -d +``` + +Connect to your IsardVDI server on port 3000 to access grafana dashboards. + +NOTE: Check that you have grafana enabled in IsardVDI config menu. + +## Remote Grafana + +You can put your grafana in another server by building and running there the remote yml: + +``` +./build.sh +docker-compose -f remote-grafana.yml up -d +``` + +## More info + +https://isardvdi.readthedocs.io/en/latest/ diff --git a/extras/grafana/build.sh b/extras/grafana/build.sh new file mode 100755 index 000000000..84498b6a9 --- /dev/null +++ b/extras/grafana/build.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# Check that the version number was provided +if [ -z "$1" ]; then + echo "You need to specify a IsardVDI version! e.g. '1.1.0'" + exit 1 +fi + +if [ $1 = "-f" ]; then + force=1 + if [ -z "$2" ]; then + echo "You need to specify a IsardVDI version with -f option! e.g. '1.1.0'" + exit 1 + fi + version=$2 +else + force=0 + version=$1 +fi + +MAJOR=${version:0:1} +MINOR=${version:0:3} +PATCH=$version + +# If a command fails, the whole script is going to stop +set -e + +# Checkout to the specified version tag +if [ force = 1 ]; then + git checkout $1 > /dev/null +fi + +# Array containing all the images to build +images=( + grafana +) + +# Build all the images and tag them correctly +for image in "${images[@]}"; do + echo -e "\n\n\n" + echo "Building $image" + echo -e "\n\n\n" + cmd="docker build -t isard/$image:latest -t isard/$image:$MAJOR -t isard/$image:$MINOR -t isard/$image:$PATCH ." + echo $cmd + $cmd +done + diff --git a/extras/grafana/conf/carbon.conf b/extras/grafana/conf/carbon.conf new file mode 100644 index 000000000..6463c7998 --- /dev/null +++ b/extras/grafana/conf/carbon.conf @@ -0,0 +1,71 @@ +[cache] +LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ + +# Specify the user to drop privileges to +# If this is blank carbon runs as the user that invokes it +# This user must have write access to the local data directory +USER = + +# Limit the size of the cache to avoid swapping or becoming CPU bound. +# Sorts and serving cache queries gets more expensive as the cache grows. +# Use the value "inf" (infinity) for an unlimited cache size. +MAX_CACHE_SIZE = inf + +# Limits the number of whisper update_many() calls per second, which effectively +# means the number of write requests sent to the disk. This is intended to +# prevent over-utilizing the disk and thus starving the rest of the system. +# When the rate of required updates exceeds this, then carbon's caching will +# take effect and increase the overall throughput accordingly. +MAX_UPDATES_PER_SECOND = 1000 + +# Softly limits the number of whisper files that get created each minute. +# Setting this value low (like at 50) is a good way to ensure your graphite +# system will not be adversely impacted when a bunch of new metrics are +# sent to it. The trade off is that it will take much longer for those metrics' +# database files to all get created and thus longer until the data becomes usable. +# Setting this value high (like "inf" for infinity) will cause graphite to create +# the files quickly but at the risk of slowing I/O down considerably for a while. +MAX_CREATES_PER_MINUTE = inf + +LINE_RECEIVER_INTERFACE = 0.0.0.0 +LINE_RECEIVER_PORT = 2003 + +PICKLE_RECEIVER_INTERFACE = 0.0.0.0 +PICKLE_RECEIVER_PORT = 2004 + +CACHE_QUERY_INTERFACE = 0.0.0.0 +CACHE_QUERY_PORT = 7002 + +# By default, carbon-cache will log every whisper update and cache hit. This can be excessive and +# degrade performance if logging on the same volume as the whisper data is stored. +LOG_UPDATES = False +LOG_CACHE_HITS = False +ENABLE_LOGROTATION = True +LOG_LISTENER_CONNECTIONS = False + +# Enable AMQP if you want to receve metrics using an amqp broker +# ENABLE_AMQP = False + +# Verbose means a line will be logged for every metric received +# useful for testing +# AMQP_VERBOSE = False + +# AMQP_HOST = localhost +# AMQP_PORT = 5672 +# AMQP_VHOST = / +# AMQP_USER = guest +# AMQP_PASSWORD = guest +# AMQP_EXCHANGE = graphite + +# Patterns for all of the metrics this machine will store. Read more at +# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings +# +# Example: store all sales, linux servers, and utilization metrics +# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization +# +# Example: store everything +# BIND_PATTERNS = # + +# NOTE: you cannot run both a cache and a relay on the same server +# with the default configuration, you have to specify a distinict +# interfaces and ports for the listeners. diff --git a/extras/grafana/conf/local_settings.py b/extras/grafana/conf/local_settings.py new file mode 100644 index 000000000..42240a20d --- /dev/null +++ b/extras/grafana/conf/local_settings.py @@ -0,0 +1,45 @@ +# Edit this file to override the default graphite settings, do not edit settings.py + +# Turn on debugging and restart apache if you ever see an "Internal Server Error" page +#DEBUG = True + +# Set your local timezone (django will try to figure this out automatically) +TIME_ZONE = 'Europe/Zurich' + +# Secret key for django +SECRET_KEY = '%%SECRET_KEY%%' + +# Setting MEMCACHE_HOSTS to be empty will turn off use of memcached entirely +#MEMCACHE_HOSTS = ['127.0.0.1:11211'] + +# Sometimes you need to do a lot of rendering work but cannot share your storage mount +#REMOTE_RENDERING = True +#RENDERING_HOSTS = ['fastserver01','fastserver02'] +#LOG_RENDERING_PERFORMANCE = True +#LOG_CACHE_PERFORMANCE = True + +# If you've got more than one backend server they should all be listed here +#CLUSTER_SERVERS = [] + +# Override this if you need to provide documentation specific to your graphite deployment +#DOCUMENTATION_URL = "http://wiki.mycompany.com/graphite" + +# Enable email-related features +#SMTP_SERVER = "mail.mycompany.com" + +# LDAP / ActiveDirectory authentication setup +#USE_LDAP_AUTH = True +#LDAP_SERVER = "ldap.mycompany.com" +#LDAP_PORT = 389 +#LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com" +#LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com" +#LDAP_BASE_PASS = "readonly_account_password" +#LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)" + +# If sqlite won't cut it, configure your real database here (don't forget to run manage.py syncdb!) +#DATABASE_ENGINE = 'mysql' # or 'postgres' +#DATABASE_NAME = 'graphite' +#DATABASE_USER = 'graphite' +#DATABASE_PASSWORD = 'graphite-is-awesome' +#DATABASE_HOST = 'mysql.mycompany.com' +#DATABASE_PORT = '3306' diff --git a/extras/grafana/conf/storage-aggregation.conf b/extras/grafana/conf/storage-aggregation.conf new file mode 100644 index 000000000..bc5e1db03 --- /dev/null +++ b/extras/grafana/conf/storage-aggregation.conf @@ -0,0 +1,29 @@ +[min] +pattern = \.lower$ +xFilesFactor = 0.1 +aggregationMethod = min + +[max] +pattern = \.upper(_\d+)?$ +xFilesFactor = 0.1 +aggregationMethod = max + +[sum] +pattern = \.sum$ +xFilesFactor = 0 +aggregationMethod = sum + +[count] +pattern = \.count$ +xFilesFactor = 0 +aggregationMethod = sum + +[count_legacy] +pattern = ^stats_counts.* +xFilesFactor = 0 +aggregationMethod = sum + +[default_average] +pattern = .* +xFilesFactor = 0.3 +aggregationMethod = average diff --git a/extras/grafana/conf/storage-schemas.conf b/extras/grafana/conf/storage-schemas.conf new file mode 100644 index 000000000..ac7cf369e --- /dev/null +++ b/extras/grafana/conf/storage-schemas.conf @@ -0,0 +1,3 @@ +[default] +pattern = .* +retentions = 10s:7d diff --git a/extras/grafana/data/grafana.db b/extras/grafana/data/grafana.db new file mode 100644 index 000000000..610043fe9 Binary files /dev/null and b/extras/grafana/data/grafana.db differ diff --git a/docs/about/license.md b/extras/grafana/data/log/.gitkeep similarity index 100% rename from docs/about/license.md rename to extras/grafana/data/log/.gitkeep diff --git a/docs/quick-start/first.md b/extras/grafana/data/plugins/.gitkeep similarity index 100% rename from docs/quick-start/first.md rename to extras/grafana/data/plugins/.gitkeep diff --git a/extras/grafana/data/png/.gitkeep b/extras/grafana/data/png/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/extras/grafana/data/sessions/.gitkeep b/extras/grafana/data/sessions/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/extras/grafana/docker-compose.yml b/extras/grafana/docker-compose.yml new file mode 100644 index 000000000..ef2fff286 --- /dev/null +++ b/extras/grafana/docker-compose.yml @@ -0,0 +1,26 @@ +version: "3.5" +services: + isard-grafana: + container_name: isard-grafana + volumes: + - "/opt/isard/grafana/grafana/data:/grafana/data" + - "/opt/isard/grafana/graphite/storage:/opt/graphite/storage" + - "/opt/isard/grafana/graphite/conf:/opt/graphite/conf" + ports: + - target: 3000 + published: 3000 + protocol: tcp + mode: host + networks: + - isard_network + image: isard/grafana:1.1 + restart: unless-stopped + logging: + driver: none + #~ depends_on: + #~ - isard-app + +networks: + isard_network: + external: false + name: isard_network diff --git a/extras/grafana/etc/nginx/conf.d/graphite b/extras/grafana/etc/nginx/conf.d/graphite new file mode 100644 index 000000000..a1dd1b0e7 --- /dev/null +++ b/extras/grafana/etc/nginx/conf.d/graphite @@ -0,0 +1,37 @@ +server { + listen 8080; + server_name graphite; + charset utf-8; + # Django admin media. + location /static/admin/ { + alias /usr/lib/python2.7/site-packages/django/contrib/admin/static/admin/; + } + + # Your project's static media. + location /static/ { + alias /opt/graphite/webapp/content/; + } + + # Finally, send all non-media requests to the Django server. + location / { + proxy_pass http://127.0.0.1:8000; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Server $host; + proxy_set_header X-Forwarded-Host $host; + proxy_set_header Host $host; + + client_max_body_size 10m; + client_body_buffer_size 128k; + + proxy_connect_timeout 90; + proxy_send_timeout 90; + proxy_read_timeout 90; + + proxy_buffer_size 4k; + proxy_buffers 4 32k; + proxy_busy_buffers_size 64k; + proxy_temp_file_write_size 64k; + } +} diff --git a/extras/grafana/etc/nginx/nginx.conf b/extras/grafana/etc/nginx/nginx.conf new file mode 100644 index 000000000..f2ab7f75c --- /dev/null +++ b/extras/grafana/etc/nginx/nginx.conf @@ -0,0 +1,20 @@ +worker_processes 1; +pid /var/run/nginx.pid; +daemon off; + +events { + worker_connections 1024; + use epoll; +} + +http { + include mime.types; + default_type application/octet-stream; + + sendfile on; + keepalive_timeout 65; + + gzip on; + + include /etc/nginx/conf.d/*; +} diff --git a/extras/grafana/etc/supervisor.d/carbon.ini b/extras/grafana/etc/supervisor.d/carbon.ini new file mode 100644 index 000000000..c3493b7e5 --- /dev/null +++ b/extras/grafana/etc/supervisor.d/carbon.ini @@ -0,0 +1,13 @@ +[program:carbon-cache] +autostart = true +autorestart = true +stdout_logfile=/grafana/logs/grafana-carbon.log +stderr_logfile=/grafana/logs/grafana-carbon-error.log +stdout_events_enabled = false +stderr_events_enabled = true +stdout_logfile_maxbytes = 1MB +stdout_logfile_backups = 0 +stderr_logfile_maxbytes = 1MB +stderr_logfile_backups = 0 + +command = /opt/graphite/bin/carbon-cache.py --pidfile /var/run/carbon-cache-a.pid --debug start diff --git a/extras/grafana/etc/supervisor.d/grafana.ini b/extras/grafana/etc/supervisor.d/grafana.ini new file mode 100644 index 000000000..3d701fd68 --- /dev/null +++ b/extras/grafana/etc/supervisor.d/grafana.ini @@ -0,0 +1,15 @@ +[program:grafana] +autostart = true +autorestart = true +#stdout_events_enabled = true +#stderr_events_enabled = true + +stdout_logfile=/grafana/logs/grafana.log +stderr_logfile=/grafana/logs/grafana-error.log +stdout_logfile_maxbytes = 1MB + +stdout_logfile_backups = 0 +stderr_logfile_maxbytes = 1MB +stderr_logfile_backups = 0 + +command = /usr/local/bin/grafana-server --homepath=/grafana >> /grafana/logs/grafana.log diff --git a/extras/grafana/etc/supervisor.d/gunicorn.ini b/extras/grafana/etc/supervisor.d/gunicorn.ini new file mode 100644 index 000000000..ba328835c --- /dev/null +++ b/extras/grafana/etc/supervisor.d/gunicorn.ini @@ -0,0 +1,15 @@ +[program:graphite-webapp] +autostart = true +autorestart = true +stdout_logfile=/grafana/logs/grafana-gunicorn.log +stderr_logfile=/grafana/logs/grafana-gunicorn-error.log +stdout_events_enabled = false +stderr_events_enabled = true +stdout_logfile_maxbytes = 1MB +stdout_logfile_backups = 0 +stderr_logfile_maxbytes = 1MB +stderr_logfile_backups = 0 + +directory = /opt/graphite/webapp +environment = PYTHONPATH='/opt/graphite/webapp' +command = /usr/bin/gunicorn -b127.0.0.1:8000 -w2 graphite.wsgi diff --git a/extras/grafana/etc/supervisor.d/nginx.ini b/extras/grafana/etc/supervisor.d/nginx.ini new file mode 100644 index 000000000..620ed0506 --- /dev/null +++ b/extras/grafana/etc/supervisor.d/nginx.ini @@ -0,0 +1,13 @@ +[program:nginx] +autostart = true +autorestart = true +stdout_logfile=/grafana/logs/grafana-nginx.log +stderr_logfile=/grafana/logs/grafana-nginx-error.log +stdout_events_enabled = false +stderr_events_enabled = true +stdout_logfile_maxbytes = 1MB +stdout_logfile_backups = 0 +stderr_logfile_maxbytes = 1MB +stderr_logfile_backups = 0 + +command = /usr/sbin/nginx -c /etc/nginx/nginx.conf diff --git a/extras/grafana/etc/supervisord.conf b/extras/grafana/etc/supervisord.conf new file mode 100644 index 000000000..81541080d --- /dev/null +++ b/extras/grafana/etc/supervisord.conf @@ -0,0 +1,26 @@ +[unix_http_server] +file=/run/supervisord.sock + +[supervisord] +user = root +nodaemon = true +logfile_maxbytes = 10MB +logfile_backups = 0 +pidfile = /tmp/supervisord.pid +logfile = /tmp/supervisord.log +environment = GRAPHITE_STORAGE_DIR='/opt/graphite/storage',GRAPHITE_CONF_DIR='/opt/graphite/conf' + +[rpcinterface:supervisor] +supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface + +[supervisorctl] +serverurl=unix:///run/supervisord.sock + +#[eventlistener:stdout] +#command = supervisor_stdout +#buffer_size = 100 +#events = PROCESS_LOG +#result_handler = supervisor_stdout:event_handler + +[include] +files = /etc/supervisor.d/*.ini diff --git a/extras/grafana/grafana-defaults.ini b/extras/grafana/grafana-defaults.ini new file mode 100644 index 000000000..9fd302452 --- /dev/null +++ b/extras/grafana/grafana-defaults.ini @@ -0,0 +1,253 @@ +##################### Grafana Configuration Defaults ##################### +# +# Do not modify this file in grafana installs +# + +app_mode = production + +#################################### Paths #################################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +# +data = data +# +# Directory where grafana can store logs +# +logs = data/log +# +# Directory where grafana will automatically scan and look for plugins +# +plugins = data/plugins + +#################################### Server #################################### +[server] +# Protocol (http or https) +protocol = http + +# The ip address to bind to, empty will bind to all interfaces +http_addr = + +# The http port to use +http_port = 3000 + +# The public facing domain name used to access grafana from a browser +domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +enforce_domain = false + +# The full public facing url +root_url = %(protocol)s://%(domain)s:%(http_port)s/ + +# Log web requests +router_logging = false + +# the path relative working path +static_root_path = public + +# enable gzip +enable_gzip = false + +# https certs & key file +cert_file = +cert_key = + +#################################### Database #################################### +[database] +# Either "mysql", "postgres" or "sqlite3", it's your choice +type = sqlite3 +host = 127.0.0.1:3306 +name = grafana +user = root +password = + +# For "postgres" only, either "disable", "require" or "verify-full" +ssl_mode = disable + +# For "sqlite3" only, path relative to data_path setting +path = grafana.db + +#################################### Session #################################### +[session] +# Either "memory", "file", "redis", "mysql", "postgresql", default is "file" +provider = file + +# Provider config options +# memory: not have any config yet +# file: session dir path, is relative to grafana data_path +# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` +# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable +# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1)/database_name` + +provider_config = sessions + +# Session cookie name +cookie_name = grafana_sess + +# If you use session in https only, default is false +cookie_secure = false + +# Session life time, default is 86400 +session_life_time = 86400 + +#################################### Analytics #################################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +reporting_enabled = false + +# Google Analytics universal tracking code, only enabled if you specify an id here +google_analytics_ua_id = + +#################################### Security #################################### +[security] +# default admin user, created on startup +admin_user = admin + +# default admin password, can be changed before first start of grafana, or in profile settings +admin_password = admin + +# used for signing +secret_key = SW2YcwTIb9zpOOhoPsMm + +# Auto-login remember days +login_remember_days = 7 +cookie_username = grafana_user +cookie_remember_name = grafana_remember + +# disable gravatar profile images +disable_gravatar = false + +#################################### Users #################################### +[users] +# disable user signup / registration +allow_sign_up = false + +# Allow non admin users to create organizations +allow_org_create = true + +# Set to true to automatically assign new users to the default organization (id 1) +auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +auto_assign_org_role = Viewer + +# Default UI theme ("dark" or "light") +default_theme = dark + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +enabled = true + +# specify organization name that should be used for unauthenticated users +org_name = Main Org. + +# specify role for unauthenticated users +org_role = Viewer + +#################################### Github Auth ########################## +[auth.github] +enabled = false +allow_sign_up = false +client_id = some_id +client_secret = some_secret +scopes = user:email +auth_url = https://github.com/login/oauth/authorize +token_url = https://github.com/login/oauth/access_token +api_url = https://api.github.com/user +team_ids = +allowed_domains = +allowed_organizations = + +#################################### Google Auth ########################## +[auth.google] +enabled = false +allow_sign_up = false +client_id = some_client_id +client_secret = some_client_secret +scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email +auth_url = https://accounts.google.com/o/oauth2/auth +token_url = https://accounts.google.com/o/oauth2/token +api_url = https://www.googleapis.com/oauth2/v1/userinfo +allowed_domains = + +#################################### Basic Auth ########################## +[auth.basic] +enabled = true + +#################################### Auth Proxy ########################## +[auth.proxy] +enabled = false +header_name = X-WEBAUTH-USER +header_property = username +auto_sign_up = true + +#################################### SMTP / Emailing ########################## +[smtp] +enabled = false +host = localhost:25 +user = +password = +cert_file = +key_file = +skip_verify = false +from_address = admin@grafana.localhost + +[emails] +welcome_email_on_sign_up = false +templates_pattern = emails/*.html + +#################################### Logging ########################## +[log] +# Either "console", "file", default is "console" +# Use comma to separate multiple modes, e.g. "console, file" +mode = console, file + +# Buffer length of channel, keep it as it is if you don't know what it is. +buffer_len = 10000 + +# Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" +level = Info + +# For "console" mode only +[log.console] +level = + +# For "file" mode only +[log.file] +level = +# This enables automated log rotate(switch of following options), default is true +log_rotate = true + +# Max line number of single file, default is 1000000 +max_lines = 1000000 + +# Max size shift of single file, default is 28 means 1 << 28, 256MB +max_lines_shift = 28 + +# Segment log daily, default is true +daily_rotate = true + +# Expired days of log file(delete after max days), default is 7 +max_days = 7 + +#################################### AMPQ Event Publisher ########################## +[event_publisher] +enabled = false +rabbitmq_url = amqp://localhost/ +exchange = grafana_events + +#################################### Dashboard JSON files ########################## +[dashboards.json] +enabled = false +path = /var/lib/grafana/dashboards + +#################################### Internal Grafana Metrics ########################## +# Metrics available at HTTP API Url /api/metrics +[metrics] +enabled = false +interval_seconds = 10 diff --git a/extras/grafana/remote-grafana.yml b/extras/grafana/remote-grafana.yml new file mode 100644 index 000000000..878de7c50 --- /dev/null +++ b/extras/grafana/remote-grafana.yml @@ -0,0 +1,41 @@ +version: "3.5" +services: + isard-grafana: + container_name: isard-grafana + volumes: + - type: bind + source: /opt/isard/grafana/grafana/data + target: /grafana/data + read_only: false + #~ - type: bind + #~ source: /opt/isard/grafana/graphite/storage + #~ target: /opt/graphite/storage + #~ read_only: false + #~ - type: bind + #~ source: /opt/isard/grafana/graphite/conf + #~ target: /opt/graphite/conf + #~ read_only: false + ports: + - target: 3000 + published: 3000 + protocol: tcp + mode: host + - target: 8080 + published: 8081 + protocol: tcp + mode: host + - target: 2003 + published: 2003 + protocol: tcp + mode: host + - target: 2004 + published: 2004 + protocol: tcp + mode: host + - target: 7002 + published: 7002 + protocol: tcp + mode: host + image: isard/grafana:1.1 + restart: unless-stopped + diff --git a/extras/grafana/run.sh b/extras/grafana/run.sh new file mode 100644 index 000000000..645dd455c --- /dev/null +++ b/extras/grafana/run.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +mkdir -p /opt/graphite/storage/{ceres,lists,log/webapp,rrd,whisper} +cd /opt/graphite +if [ ! -f /opt/graphite/conf/local_settings.py ]; then + echo "Creating default config for graphite-web..." + cp /opt/graphite/conf.example/local_settings.py /opt/graphite/conf/local_settings.py + RANDOM_STRING=$(python -c 'import random; import string; print "".join([random.SystemRandom().choice(string.digits + string.letters) for i in range(100)])') + sed "s/%%SECRET_KEY%%/${RANDOM_STRING}/" -i /opt/graphite/conf/local_settings.py +fi + +if [ ! -L /opt/graphite/webapp/graphite/local_settings.py ]; then + echo "Creating symbolic link for local_settings.py in graphite-web..." + ln -s /opt/graphite/conf/local_settings.py /opt/graphite/webapp/graphite/local_settings.py +fi + +if [ ! -f /opt/graphite/conf/carbon.conf ]; then + echo "Creating default config for carbon..." + cp /opt/graphite/conf.example/carbon.conf /opt/graphite/conf/carbon.conf +fi + +if [ ! -f /opt/graphite/conf/storage-schemas.conf ]; then + echo "Creating default storage schema for carbon..." + cp /opt/graphite/conf.example/storage-schemas.conf /opt/graphite/conf/storage-schemas.conf +fi + +if [ ! -f /opt/graphite/conf/storage-aggregation.conf ]; then + echo "Creating default storage schema for carbon..." + cp /opt/graphite/conf.example/storage-aggregation.conf /opt/graphite/conf/storage-aggregation.conf +fi + +if [ ! -f /opt/graphite/storage/graphite.db ]; then + echo "Creating database..." + PYTHONPATH=$GRAPHITE_ROOT/webapp django-admin.py migrate --settings=graphite.settings --run-syncdb --noinput + chown nginx:nginx /opt/graphite/storage/graphite.db + # Auto-magical create an django user with default login + script="from django.contrib.auth.models import User; + +username = 'admin'; +password = 'admin'; +email = 'admin@example.com'; + +if User.objects.filter(username=username).count()==0: + User.objects.create_superuser(username, email, password); + print('Superuser created.'); +else: + print('Superuser creation skipped.'); + +" + printf "$script" | PYTHONPATH=$GRAPHITE_ROOT/webapp django-admin.py shell --settings=graphite.settings +fi + +## GRAFANA +if [ ! -f /grafana/data/grafana.db ]; then + echo "Creating default config for grafana" + cp -R /grafana/data_init/* /grafana/data/ + chown -R grafana:grafana /grafana +fi + +exec supervisord -c /etc/supervisord.conf + diff --git a/extras/remote-hyper/README.md b/extras/remote-hyper/README.md new file mode 100644 index 000000000..7ae214edc --- /dev/null +++ b/extras/remote-hyper/README.md @@ -0,0 +1,5 @@ +# IsardVDI Generic Hypervisor + +It brings up a remote hypervisor. Instructions can be found at documentation: + +https://isardvdi.readthedocs.io/en/latest/admin/hypervisors/ diff --git a/extras/remote-hyper/docker-compose.yml b/extras/remote-hyper/docker-compose.yml new file mode 100644 index 000000000..0218a8395 --- /dev/null +++ b/extras/remote-hyper/docker-compose.yml @@ -0,0 +1,26 @@ +version: "3.2" +services: + isard-hypervisor: + volumes: + - type: volume + source: sshkeys + target: /root/.ssh + read_only: false + - type: bind + source: /opt/isard + target: /isard + read_only: false + - type: bind + source: /opt/isard/certs/default + target: /etc/pki/libvirt-spice + read_only: false + ports: + - "2022:22" + - "5900-5949:5900-5949" + - "55900-55949:55900-55949" + image: isard/hypervisor:1.0 + privileged: true + restart: unless-stopped + +volumes: + sshkeys: diff --git a/mkdocs.yml b/mkdocs.yml deleted file mode 100644 index 23f0a5506..000000000 --- a/mkdocs.yml +++ /dev/null @@ -1,60 +0,0 @@ -docs_dir: docs - -# Project information -site_name: IsardVDI -site_url: http://www.isardvdi.com -site_description: IsardVDI Open Source Virtual Desktops. -site_author: IsardVDI - -# Repository -repo_name: 'isard-vdi/isard' -repo_url: https://github.com/isard-vdi/isard - -# Copyright -copyright: Copyright © 2016 IsardVDI. - -# Documentation and theme -#theme: 'material' -theme: - name: "readthedocs" -strict: true - -# Options -extra: - logo: 'images/logo.svg' - palette: - primary: 'indigo' - accent: 'indigo' - font: - text: 'Roboto' - code: 'Roboto Mono' - social: - - type: 'github' - link: 'https://github.com/john-doe' - - type: 'twitter' - link: 'https://twitter.com/john-doe' - - type: 'linkedin' - link: 'https://de.linkedin.com/in/john-doe' - -# Google Analytics -#google_analytics: -# - 'UA-XXXXXXXX-X' -# - 'auto' - -# Extensions -markdown_extensions: - - admonition - - codehilite: - guess_lang: false - - toc: - permalink: true - -# TOC -pages: -- Introduction: index.md -- Installation: - - On linux: install/linux.md - - Using docker-compose: install/docker-compose.md -- Quick Start: quick-start/first.md -- About: about/license.md - diff --git a/src/engine/api/__init__.py b/src/engine/api/__init__.py index c96960259..2bf28d05f 100644 --- a/src/engine/api/__init__.py +++ b/src/engine/api/__init__.py @@ -12,7 +12,7 @@ api = Blueprint('api', __name__) app = current_app -from . import evaluate +#from . import evaluate def shutdown_server(): func = request.environ.get('werkzeug.server.shutdown') @@ -82,12 +82,11 @@ def stop_threads(): @api.route('/engine_restart', methods=['GET']) def engine_restart(): - app.m.stop_threads() while True: - alive, dead, not_defined = app.m.update_info_threads_engine() - if len(alive) == 0: + app.m.update_info_threads_engine() + if len(app.m.threads_info_main['alive']) == 0 and len(app.m.threads_info_hyps['alive']) == 0: action = {} action['type'] = 'stop' app.m.q.background.put(action) @@ -104,6 +103,33 @@ def engine_restart(): break return jsonify({'engine_restart':True}), 200 +@api.route('/grafana/restart', methods=['GET']) +def grafana_restart(): + app.m.t_grafana.restart_send_config = True + +@api.route('/engine/status') +def engine_status(): + '''all main threads are running''' + + pass + + +@api.route('/pool//status') +def pool_status(id_pool): + '''hypervisors ready to start and create disks''' + pass + +@api.route('/grafana/reload') +def grafana_reload(): + '''changes in grafana parameters''' + pass + +@api.route('/engine/events/stop') +def stop_thread_event(): + app.m.t_events.stop = True + app.t_events.q_event_register.put({'type': 'del_hyp_to_receive_events', 'hyp_id': ''}) + + @api.route('/engine_info', methods=['GET']) def engine_info(): d_engine = {} diff --git a/src/engine/config.py b/src/engine/config.py index c85dba735..afb391f4e 100644 --- a/src/engine/config.py +++ b/src/engine/config.py @@ -27,6 +27,8 @@ # ~ sys.exit(0) config_exists=False +first_loop = True +fail_first_loop = False while not config_exists: try: rcfg = configparser.ConfigParser() @@ -34,21 +36,33 @@ RETHINK_HOST = rcfg.get('RETHINKDB', 'HOST') RETHINK_PORT = rcfg.get('RETHINKDB', 'PORT') RETHINK_DB = rcfg.get('RETHINKDB', 'DBNAME') + if fail_first_loop: + print('ENGINE STARTING, isard.conf accesed') config_exists=True except: - print('ENGINE START PENDING: Missing isard.conf file. Run webapp and access to http://localhost:5000 or https://localhost on dockers.') + if first_loop is True: + print('ENGINE START PENDING: Missing isard.conf file. Run webapp and access to http://localhost:5000 or https://localhost on dockers.') + first_loop = False + fail_first_loop = True time.sleep(1) +first_loop = True +fail_first_loop = False table_exists=False while not table_exists: try: with r.connect(host=RETHINK_HOST, port=RETHINK_PORT) as conn: rconfig = r.db(RETHINK_DB).table('config').get(1).run(conn) - grafana= rconfig['grafana'] + #grafana= rconfig['engine']['grafana'] rconfig = rconfig['engine'] table_exists=True + if fail_first_loop: + print('ENGINE STARTING, database is online') except: - print('ENGINE START PENDING: Missing database isard. Run webapp and access to http://localhost:5000 or https://localhost on dockers.') + if first_loop is True: + print('ENGINE START PENDING: Missing database isard. Run webapp and access to http://localhost:5000 or https://localhost on dockers.') + first_loop = False + fail_first_loop = True time.sleep(1) #print(rconfig) @@ -59,9 +73,9 @@ TEST_HYP_FAIL_INTERVAL = rconfig['intervals']['test_hyp_fail'] POLLING_INTERVAL_BACKGROUND = rconfig['intervals']['background_polling'] POLLING_INTERVAL_TRANSITIONAL_STATES = rconfig['intervals']['transitional_states_polling'] -GRAFANA = grafana +#GRAFANA = grafana -TRANSITIONAL_STATUS = ('Starting', 'Stopping') +TRANSITIONAL_STATUS = ('Starting', 'Stopping', 'Deleting') # CONFIG_DICT = {k: {l[0]:l[1] for l in c.items(k)} for k in c.sections()} @@ -93,12 +107,12 @@ 'TIMEOUTS':rconfig['timeouts'], 'REMOTEOPERATIONS':{ -'host_remote_disk_operatinos': 'vdesktop1.escoladeltreball.org', -'default_group_dir': '/vimet/groups/a' +'host_remote_disk_operatinos': 'localhost', +'default_group_dir': '/opt/isard/groups/' }, 'FERRARY':{ 'prefix': '__f_', -'dir_to_ferrary_disks': '/vimet/groups/ferrary' +'dir_to_ferrary_disks': '/opt/isard/groups/ferrary' } } diff --git a/src/engine/controllers/broom.py b/src/engine/controllers/broom.py index 896eafea0..b1c339ff6 100644 --- a/src/engine/controllers/broom.py +++ b/src/engine/controllers/broom.py @@ -37,9 +37,10 @@ def polling(self): interval += 0.1 if self.stop is True: break - if self.manager.check_actions_domains_enabled(): + if self.manager.check_actions_domains_enabled() is False: continue + l = get_domains_with_transitional_status() list_domains_without_hyp = [d for d in l if 'hyp_started' not in d.keys()] @@ -84,6 +85,8 @@ def polling(self): domain_id = d['id'] status = d['status'] hyp_started = d['hyp_started'] + if hyp_started is bool: + continue if len(hyp_started) == 0: continue # TODO bug sometimes hyp_started not in hyps_domain_started keys... why? diff --git a/src/engine/controllers/events_recolector.py b/src/engine/controllers/events_recolector.py index 8d2600054..82c268974 100644 --- a/src/engine/controllers/events_recolector.py +++ b/src/engine/controllers/events_recolector.py @@ -14,6 +14,8 @@ import sys import threading import time +import queue +import traceback import libvirt @@ -25,6 +27,9 @@ from engine.services.lib.functions import hostname_to_uri, get_tid from engine.services.log import * +TIMEOUT_QUEUE_REGISTER_EVENTS = 1 +NUM_TRY_REGISTER_EVENTS = 5 +SLEEP_BETWEEN_TRY_REGISTER_EVENTS = 1.0 # Reference: https://github.com/libvirt/libvirt-python/blob/master/examples/event-test.py from pprint import pprint @@ -61,6 +66,7 @@ def virEventLoopNativeStart(stop): ########################################################################## def domEventToString(event): + #from https://github.com/libvirt/libvirt-python/blob/master/examples/event-test.py domEventStrings = ("Defined", "Undefined", "Started", @@ -75,18 +81,26 @@ def domEventToString(event): def domDetailToString(event, detail): - domEventStrings = ( - ("Added", "Updated"), - ("Removed",), - ("Booted", "Migrated", "Restored", "Snapshot", "Wakeup"), - ("Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error"), - ("Unpaused", "Migrated", "Snapshot"), - ("Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot"), - ("Finished",), - ("Memory", "Disk"), - ("Panicked",), + # from https://github.com/libvirt/libvirt-python/blob/master/examples/event-test.py + DOM_EVENTS = ( + ("Defined", ("Added", "Updated", "Renamed", "Snapshot")), + ("Undefined", ("Removed", "Renamed")), + ("Started", ("Booted", "Migrated", "Restored", "Snapshot", "Wakeup")), + ("Suspended", ("Paused", "Migrated", "IOError", "Watchdog", "Restored", "Snapshot", "API error", "Postcopy", + "Postcopy failed")), + ("Resumed", ("Unpaused", "Migrated", "Snapshot", "Postcopy")), + ("Stopped", ("Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", "Failed", "Snapshot", "Daemon")), + ("Shutdown", ("Finished", "On guest request", "On host request")), + ("PMSuspended", ("Memory", "Disk")), + ("Crashed", ("Panicked",)), ) - return domEventStrings[event][detail] + try: + return DOM_EVENTS[event][1][detail] + except Exception as e: + logs.status.error(f'Detail not defined in DOM_EVENTS. index_event:{event}, index_detail{detail}') + logs.status.error(e) + return 'Detail undefined' + def blockJobTypeToString(type): @@ -432,7 +446,6 @@ def myDomainEventGraphicsCallbackRethink(conn, dom, phase, localAddr, remoteAddr class ThreadHypEvents(threading.Thread): def __init__(self, name, - dict_hyps, register_graphics_events=True ): threading.Thread.__init__(self) @@ -440,10 +453,11 @@ def __init__(self, name, self.stop = False self.stop_event_loop = [False] self.REGISTER_GRAPHICS_EVENTS = register_graphics_events - self.hyps = dict_hyps + self.hyps = {} # self.hostname = get_hyp_hostname_from_id(hyp_id) self.hyps_conn = dict() self.events_ids = dict() + self.q_event_register = queue.Queue() def run(self): # Close connection on exit (to test cleanup paths) @@ -459,29 +473,33 @@ def exit(): sys.exitfunc = exit - # self.r_status = RethinkHypEvent() - - while True: - if len(self.hyps) == 0: - if self.stop: - break - time.sleep(0.1) - else: - self.thread_event_loop = virEventLoopNativeStart(self.stop_event_loop) + self.thread_event_loop = virEventLoopNativeStart(self.stop_event_loop) - for hyp_id, hostname in self.hyps.items(): + # self.r_status = RethinkHypEvent() + while self.stop is not True: + try: + action = self.q_event_register.get(timeout=TIMEOUT_QUEUE_REGISTER_EVENTS) + if action['type'] in ['add_hyp_to_receive_events']: + hyp_id = action['hyp_id'] self.add_hyp_to_receive_events(hyp_id) - - while self.stop is not True: - time.sleep(0.1) - - if self.stop is True: - for hyp_id in list(self.hyps): - self.del_hyp_to_receive_events(hyp_id) - self.stop_event_loop[0] = True - while self.thread_event_loop.is_alive(): - pass - break + elif action['type'] in ['del_hyp_to_receive_events']: + hyp_id = action['hyp_id'] + self.del_hyp_to_receive_events(hyp_id) + elif action['type'] == 'stop_thread': + self.stop = True + else: + logs.status.error('type action {} not supported'.format(action['type'])) + except queue.Empty: + pass + except Exception as e: + log.error('Exception in ThreadHypEvents main loop: {}'.format(e)) + log.error('Action: {}'.format(pprint.pformat(action))) + log.error('Traceback: {}'.format(traceback.format_exc())) + return False + + self.stop_event_loop[0] = True + while self.thread_event_loop.is_alive(): + pass def add_hyp_to_receive_events(self, hyp_id): d_hyp_parameters = get_hyp_hostname_user_port_from_id(hyp_id) @@ -501,8 +519,17 @@ def add_hyp_to_receive_events(self, hyp_id): logs.status.error(e) if conn_ok is True: - self.events_ids[hyp_id] = self.register_events(self.hyps_conn[hyp_id]) - self.hyps[hyp_id] = hostname + for i in range(NUM_TRY_REGISTER_EVENTS): + #try 5 + try: + self.events_ids[hyp_id] = self.register_events(self.hyps_conn[hyp_id]) + self.hyps[hyp_id] = hostname + break + except libvirt.libvirtError as e: + logs.status.error(f'Error when register_events, wait {SLEEP_BETWEEN_TRY_REGISTER_EVENTS}, try {i+1} of {NUM_TRY_REGISTER_EVENTS}') + logs.status.error(e) + time.sleep(SLEEP_BETWEEN_TRY_REGISTER_EVENTS) + def del_hyp_to_receive_events(self, hyp_id): self.unregister_events(self.hyps_conn[hyp_id], self.events_ids[hyp_id]) @@ -571,10 +598,10 @@ def unregister_events(self, hyp_libvirt_conn, cb_ids): hyp_libvirt_conn.unregisterCloseCallback() -def launch_thread_hyps_event(dict_hyps): +def launch_thread_hyps_event(): # t = threading.Thread(name= 'events',target=events_from_hyps, args=[list_hostnames]) - t = ThreadHypEvents(name='hyps_events', dict_hyps=dict_hyps) + t = ThreadHypEvents(name='hyps_events') t.daemon = True t.start() return t diff --git a/src/engine/controllers/ui_actions.py b/src/engine/controllers/ui_actions.py index 69a78b1f2..887d81674 100644 --- a/src/engine/controllers/ui_actions.py +++ b/src/engine/controllers/ui_actions.py @@ -20,7 +20,7 @@ update_domain_dict_hardware, remove_disk_template_created_list_in_domain, remove_dict_new_template_from_domain, \ create_disk_template_created_list_in_domain, get_pool_from_domain, get_domain, insert_domain, delete_domain, \ update_domain_status, get_domain_force_hyp, get_hypers_in_pool, get_domain_kind, get_if_delete_after_stop, \ - get_dict_from_item_in_table, update_domain_dict_create_dict + get_dict_from_item_in_table, update_domain_dict_create_dict, update_origin_and_parents_to_new_template from engine.services.lib.functions import exec_remote_list_of_cmds from engine.services.lib.qcow import create_cmd_disk_from_virtbuilder, get_host_long_operations_from_path from engine.services.lib.qcow import create_cmds_disk_from_base, create_cmds_delete_disk, get_path_to_disk, \ @@ -420,6 +420,14 @@ def create_template_in_db(self, id_domain): return False remove_disk_template_created_list_in_domain(id_domain) remove_dict_new_template_from_domain(id_domain) + if 'parents' in domain_dict.keys(): + domain_parents_chain_update = domain_dict['parents'].copy() + else: + domain_parents_chain_update = [] + + domain_parents_chain_update.append(template_id) + update_table_field('domains', id_domain, 'parents', domain_parents_chain_update) + update_origin_and_parents_to_new_template(id_domain,template_id) # update_table_field('domains', template_id, 'xml', xml_parsed, merge_dict=False) update_domain_status(status='Stopped', id_domain=template_id, @@ -742,6 +750,15 @@ def creating_and_test_xml_start(self, id_domain, creating_from_create_dict=False id_template = domain['create_dict']['origin'] template = get_domain(id_template) xml_from = template['xml'] + parents_chain = template.get('parents',[]) + domain.get('parents',[]) + #when creating template from domain, the domain would be inserted as a parent while template is creating + # parent_chain never can't have id_domain as parent + if id_domain in parents_chain: + for i in range(parents_chain.count('a')): + parents_chain.remove(id_domain) + + update_table_field('domains', id_domain, 'parents', parents_chain) + elif xml_from_virt_install is True: xml_from = domain['xml_virt_install'] @@ -751,6 +768,7 @@ def creating_and_test_xml_start(self, id_domain, creating_from_create_dict=False update_table_field('domains', id_domain, 'xml', xml_from) + xml_raw = update_xml_from_dict_domain(id_domain) if xml_raw is False: update_domain_status(status='FailedCreatingDomain', @@ -842,7 +860,7 @@ def domain_from_template(self, old_path_disk = dict_domain_template['hardware']['disks'][0]['file'] old_path_dir = extract_dir_path(old_path_disk) - DEFAULT_GROUP_DIR = CONFIG_DICT['REMOTEOPERATIONS']['default_group_dir'] + #DEFAULT_GROUP_DIR = CONFIG_DICT['REMOTEOPERATIONS']['default_group_dir'] if path_to_disk_dir is None: path_to_disk_dir = DEFAULT_GROUP_DIR + '/' + \ diff --git a/src/engine/models/manager_hypervisors.py b/src/engine/models/manager_hypervisors.py index 51e3b46aa..787a23679 100644 --- a/src/engine/models/manager_hypervisors.py +++ b/src/engine/models/manager_hypervisors.py @@ -4,12 +4,11 @@ # License: AGPLv3 # coding=utf-8 -import pprint - import queue import threading from datetime import datetime from time import sleep +import pprint import rethinkdb as r @@ -28,12 +27,16 @@ set_unknown_domains_not_in_hyps, get_domain, remove_domain, update_domain_history_from_id_domain from engine.services.db.domains import update_domain_status, update_domain_start_after_created, update_domain_delete_after_stopped from engine.services.lib.functions import get_threads_running, get_tid, engine_restart +from engine.services.lib.qcow import test_hypers_disk_operations from engine.services.log import logs from engine.services.threads.download_thread import launch_thread_download_changes from engine.services.threads.threads import launch_try_hyps, set_domains_coherence, launch_thread_worker, \ launch_disk_operations_thread, \ launch_long_operations_thread from engine.services.lib.functions import clean_intermediate_status +from engine.services.threads.grafana_thread import GrafanaThread,launch_grafana_thread + +WAIT_HYP_ONLINE = 2.0 class ManagerHypervisors(object): """Main class that control and launch all threads. @@ -50,7 +53,7 @@ def __init__(self, launch_threads=True, with_status_threads=True, status_polling_interval=STATUS_POLLING_INTERVAL, test_hyp_fail_interval=TEST_HYP_FAIL_INTERVAL): - logs.main.info('MAIN PID: {}'.format(get_tid())) + logs.main.info('MAIN TID: {}'.format(get_tid())) self.time_between_polling = TIME_BETWEEN_POLLING self.polling_interval_background = POLLING_INTERVAL_BACKGROUND @@ -70,10 +73,15 @@ def __init__(self, launch_threads=True, with_status_threads=True, self.t_broom = None self.t_background = None self.t_downloads_changes = None + self.t_grafana = None self.quit = False + self.threads_info_main = {} + self.threads_info_hyps = {} + self.hypers_disk_operations_tested = [] + self.num_workers = 0 - self.threads_started = False + self.threads_main_started = False self.STATUS_POLLING_INTERVAL = status_polling_interval self.TEST_HYP_FAIL_INTERVAL = test_hyp_fail_interval @@ -88,13 +96,13 @@ def launch_thread_background_polling(self): self.t_background.start() def check_actions_domains_enabled(self): - if self.num_workers > 0 and self.threads_started is True: + if self.num_workers > 0 and self.threads_main_started is True: return True else: return False def update_info_threads_engine(self): - d = {} + d_mains = {} alive=[] dead=[] not_defined=[] @@ -106,19 +114,30 @@ def update_info_threads_engine(self): #thread not defined not_defined.append(name) + d_mains['alive']=alive + d_mains['dead']=dead + d_mains['not_defined']=not_defined + self.threads_info_main = d_mains.copy() + + d_hyps = {} + alive=[] + dead=[] + not_defined=[] for name in ['workers','status','disk_operations','long_operations']: for hyp,t in self.__getattribute__('t_'+name).items(): try: alive.append(name + '_' + hyp) if t.is_alive() else dead.append(name + '_' + hyp) except: not_defined.append(name) - pass - d['alive']=alive - d['dead']=dead - d['not_defined']=not_defined - update_table_field('engine', 'engine', 'threads', d) - return alive,dead,not_defined + d_hyps['alive']=alive + d_hyps['dead']=dead + d_hyps['not_defined']=not_defined + self.threads_info_hyps = d_hyps.copy() + update_table_field('engine', 'engine', 'threads_info_main', d_mains) + update_table_field('engine', 'engine', 'threads_info_hyps', d_hyps) + + return True def stop_threads(self): # events and broom @@ -143,6 +162,8 @@ def stop_threads(self): self.q_disk_operations + #self.t_downloads_changes.stop = True + # changes @@ -181,26 +202,39 @@ def launch_threads_disk_and_long_operations(self): self.manager.hypers_disk_operations = get_hypers_disk_operations() - for hyp_disk_operations in self.manager.hypers_disk_operations: + self.manager.hypers_disk_operations_tested = test_hypers_disk_operations(self.manager.hypers_disk_operations) + + for hyp_disk_operations in self.manager.hypers_disk_operations_tested: hyp_long_operations = hyp_disk_operations d = get_hyp_hostname_user_port_from_id(hyp_disk_operations) - self.manager.t_disk_operations[hyp_disk_operations], \ - self.manager.q_disk_operations[hyp_disk_operations] = launch_disk_operations_thread( - hyp_id=hyp_disk_operations, - hostname=d['hostname'], - user=d['user'], - port=d['port'] - ) - self.manager.t_long_operations[hyp_long_operations], \ - self.manager.q_long_operations[hyp_long_operations] = launch_long_operations_thread( - hyp_id=hyp_long_operations, - hostname=d['hostname'], - user=d['user'], - port=d['port'] - ) + if hyp_disk_operations not in self.manager.t_disk_operations.keys(): + self.manager.t_disk_operations[hyp_disk_operations], \ + self.manager.q_disk_operations[hyp_disk_operations] = launch_disk_operations_thread( + hyp_id=hyp_disk_operations, + hostname=d['hostname'], + user=d['user'], + port=d['port'] + ) + self.manager.t_long_operations[hyp_long_operations], \ + self.manager.q_long_operations[hyp_long_operations] = launch_long_operations_thread( + hyp_id=hyp_long_operations, + hostname=d['hostname'], + user=d['user'], + port=d['port'] + ) def test_hyps_and_start_threads(self): + """If status of hypervisor is Error or Offline and are enabled, + this function try to connect and launch threads. + If hypervisor pass connection test, status change to ReadyToStart, + then change to StartingThreads previous to launch threads, when + threads are running state is Online. Status sequence is: + (Offline,Error) => ReadyToStart => StartingThreads => (Online,Error)""" + + # DISK_OPERATIONS: launch threads if test disk operations passed and is not launched + self.launch_threads_disk_and_long_operations() + l_hyps_to_test = get_hyps_with_status(list_status=['Error', 'Offline'], empty=True) @@ -209,106 +243,177 @@ def test_hyps_and_start_threads(self): 'user': d['user'] if 'user' in d.keys() else 'root'} for d in l_hyps_to_test} + #TRY hypervisor connexion and UPDATE hypervisors status + # update status: ReadyToStart if all ok launch_try_hyps(dict_hyps_to_test) + + #hyp_hostnames of hyps ready to start dict_hyps_ready = self.manager.dict_hyps_ready = get_hyps_ready_to_start() if len(dict_hyps_ready) > 0: logs.main.debug('hyps_ready_to_start: ' + pprint.pformat(dict_hyps_ready)) - #launch thread events + #launch thread events if is None if self.manager.t_events is None: logs.main.info('launching hypervisor events thread') - self.manager.t_events = launch_thread_hyps_event(dict_hyps_ready) - else: - #if new hypervisor has added then add hypervisor to receive events - logs.main.info('hypervisors added to thread events') - logs.main.info(pprint.pformat(dict_hyps_ready)) - self.manager.t_events.hyps.update(dict_hyps_ready) - for hyp_id, hostname in self.manager.t_events.hyps.items(): - self.manager.t_events.add_hyp_to_receive_events(hyp_id) + self.manager.t_events = launch_thread_hyps_event() + # else: + # #if new hypervisor has added then add hypervisor to receive events + # logs.main.info('hypervisors added to thread events') + # logs.main.info(pprint.pformat(dict_hyps_ready)) + # self.manager.t_events.hyps.update(dict_hyps_ready) + # for hyp_id, hostname in self.manager.t_events.hyps.items(): + # self.manager.t_events.add_hyp_to_receive_events(hyp_id) + set_unknown_domains_not_in_hyps(dict_hyps_ready.keys()) set_domains_coherence(dict_hyps_ready) pools = set() for hyp_id, hostname in dict_hyps_ready.items(): update_hyp_status(hyp_id, 'StartingThreads') - # start worker thread + + # launch worker thread self.manager.t_workers[hyp_id], self.manager.q.workers[hyp_id] = launch_thread_worker(hyp_id) + + # LAUNCH status thread if self.manager.with_status_threads is True: self.manager.t_status[hyp_id] = launch_thread_status(hyp_id, self.manager.STATUS_POLLING_INTERVAL) + # ADD hyp to receive_events + self.manager.t_events.q_event_register.put({'type': 'add_hyp_to_receive_events', 'hyp_id': hyp_id}) + # self.manager.launch_threads(hyp_id) # INFO TO DEVELOPER FALTA VERIFICAR QUE REALMENTE ESTÁN ARRANCADOS LOS THREADS?? # comprobar alguna variable a true en alguno de los threads update_hyp_status(hyp_id, 'Online') pools.update(get_pools_from_hyp(hyp_id)) + #if hypervisor not in pools defined in manager add it for id_pool in pools: if id_pool not in self.manager.pools.keys(): self.manager.pools[id_pool] = PoolHypervisors(id_pool, self.manager, len(dict_hyps_ready)) def run(self): self.tid = get_tid() - logs.main.info('starting thread: {} (TID {})'.format(self.name, self.tid)) + logs.main.info('starting thread background: {} (TID {})'.format(self.name, self.tid)) q = self.manager.q.background first_loop = True + pool_id = 'default' + #can't launch downloads if download changes thread is not ready and hyps are not online + update_table_field('hypervisors_pools', pool_id, 'download_changes', 'Stopped') + # if domains have intermedite states (updating, download_aborting...) + # to Failed or Delete clean_intermediate_status() l_hyps_to_test = get_hyps_with_status(list_status=['Error', 'Offline'], empty=True) - while len(l_hyps_to_test) == 0: - logs.main.error('no hypervisor enable, waiting for one hypervisor') - sleep(0.5) - l_hyps_to_test = get_hyps_with_status(list_status=['Error', 'Offline'], empty=True) + # while len(l_hyps_to_test) == 0: + # logs.main.error('no hypervisor enable, waiting for one hypervisor') + # sleep(0.5) + # l_hyps_to_test = get_hyps_with_status(list_status=['Error', 'Offline'], empty=True) while self.manager.quit is False: + #################################################################### + ### MAIN LOOP ###################################################### + # ONLY FOR DEBUG logs.main.debug('##### THREADS ##################') - get_threads_running() - self.manager.update_info_threads_engine() - - # DISK_OPERATIONS: - if len(self.manager.t_disk_operations) == 0: - self.launch_threads_disk_and_long_operations() - - # TEST HYPS AND START THREADS FROM RETHINK - self.test_hyps_and_start_threads() - - # LAUNCH CHANGES THREADS + threads_running = get_threads_running() + #pprint.pprint(threads_running) + #self.manager.update_info_threads_engine() + + # Threads that must be running always, with or withouth hypervisor: + # - changes_hyp + # - changes_domains + # - downloads_changes + # - broom + # - events + # - grafana + + # Threads that depends on hypervisors availavility: + # - disk_operations + # - long_operations + # - for every hypervisor: + # - worker + # - status + + + # LAUNCH MAIN THREADS if first_loop is True: update_table_field('engine', 'engine', 'status_all_threads', 'Starting') + # launch changes_hyp thread self.manager.t_changes_hyps = self.manager.HypervisorChangesThread('changes_hyp', self.manager) self.manager.t_changes_hyps.daemon = True self.manager.t_changes_hyps.start() + #launch changes_domains_thread self.manager.t_changes_domains = self.manager.DomainsChangesThread('changes_domains', self.manager) self.manager.t_changes_domains.daemon = True self.manager.t_changes_domains.start() + #launch downloads changes thread logs.main.debug('Launching Download Changes Thread') self.manager.t_downloads_changes = launch_thread_download_changes(self.manager) + #launch brom thread self.manager.t_broom = launch_thread_broom(self.manager) - first_loop = False + #launch events thread + logs.main.debug('launching hypervisor events thread') + self.manager.t_events = launch_thread_hyps_event() + + #launch grafana thread + logs.main.debug('launching grafana thread') + self.manager.t_grafana = launch_grafana_thread(self.manager.t_status) logs.main.info('THREADS LAUNCHED FROM BACKGROUND THREAD') update_table_field('engine', 'engine', 'status_all_threads', 'Starting') + while True: + #wait all sleep(0.1) - alive, dead, not_defined = self.manager.update_info_threads_engine() - pprint.pprint({'alive':alive, - 'dead':dead, - 'not_defined':not_defined}) - if len(not_defined) == 0 and len(dead) == 0: + self.manager.update_info_threads_engine() + + #if len(self.manager.threads_info_main['not_defined']) > 0 and len(self.manager.dict_hyps_ready) == 0: + if len(self.manager.threads_info_main['not_defined']) > 0 or len(self.manager.threads_info_main['dead']) > 0: + print('MAIN THREADS starting, wait a second extra') + sleep(1) + self.manager.update_info_threads_engine() + pprint.pprint(self.manager.threads_info_main) + #self.test_hyps_and_start_threads() + if len(self.manager.threads_info_main['not_defined']) == 0 and len(self.manager.threads_info_main['dead']) == 0: update_table_field('engine', 'engine', 'status_all_threads', 'Started') - self.manager.num_workers = len(self.manager.t_workers) - self.manager.threads_started = True + self.manager.threads_main_started = True break + # TEST HYPS AND START THREADS FOR HYPERVISORS + self.test_hyps_and_start_threads() + self.manager.num_workers = len(self.manager.t_workers) + + # Test hypervisor disk operations + # Create Test disk in hypervisor disk operations + if first_loop is True: + first_loop = False + # virtio_test_disk_relative_path = 'admin/admin/admin/virtio_testdisk.qcow2' + # ui.creating_test_disk(test_disk_relative_route=virtio_test_disk_relative_path) + + self.manager.update_info_threads_engine() + if len(self.manager.threads_info_hyps['not_defined']) > 0: + logs.main.error('something was wrong when launching threads for hypervisors, threads not defined') + logs.main.error(pprint.pformat(self.manager.threads_info_hyps)) + if len(self.manager.threads_info_hyps['dead']) > 0: + logs.main.error('something was wrong when launching threads for hypervisors, threads are dead') + logs.main.error(pprint.pformat(self.manager.threads_info_hyps)) + if len(self.manager.threads_info_hyps['dead']) == 0 and len(self.manager.threads_info_hyps['not_defined']) == 0: + pass + try: - action = q.get(timeout=self.manager.TEST_HYP_FAIL_INTERVAL) + if len(self.manager.t_workers) == 0: + timeout_queue = WAIT_HYP_ONLINE + else: + timeout_queue = TEST_HYP_FAIL_INTERVAL + action = q.get(timeout=timeout_queue) if action['type'] == 'stop': self.manager.quit = True logs.main.info('engine end') @@ -350,9 +455,7 @@ def run(self): 'hostname', 'hypervisors_pools', 'port', - 'user', - 'viewer_hostname', - 'viewer_nat_hostname').merge({'table': 'hypervisors'}).changes().\ + 'user').merge({'table': 'hypervisors'}).changes().\ union(r.table('engine').pluck('threads', 'status_all_threads').merge({'table': 'engine'}).changes())\ .run(self.r_conn): @@ -406,12 +509,6 @@ def run(self): logs.changes.debug('^^^^^^^^^^^^^^^^^^^ DOMAIN CHANGES THREAD ^^^^^^^^^^^^^^^^^') ui = UiActions(self.manager) - # Test hypervisor disk operations - # Create Test disk in hypervisor disk operations - virtio_test_disk_relative_path = 'admin/admin/admin/virtio_testdisk.qcow2' - ui.creating_test_disk(test_disk_relative_route=virtio_test_disk_relative_path) - - self.r_conn = new_rethink_connection() cursor = r.table('domains').pluck('id', 'kind', 'status', 'detail').merge({'table': 'domains'}).changes().\ @@ -445,7 +542,6 @@ def run(self): new_domain = False new_status = False old_status = False - import pprint logs.changes.debug(pprint.pformat(c)) @@ -525,7 +621,8 @@ def run(self): if old_status == 'Stopped' and new_status == "CreatingTemplate": ui.create_template_disks_from_domain(domain_id) - if old_status == 'Stopped' and new_status == "Deleting": + if old_status == 'Stopped' and new_status == "Deleting" or \ + old_status == 'Downloaded' and new_status == "Deleting": ui.deleting_disks_from_domain(domain_id) if (old_status == 'Stopped' and new_status == "Updating") or \ diff --git a/src/engine/services/db/db.py b/src/engine/services/db/db.py index 445177580..6442560fe 100644 --- a/src/engine/services/db/db.py +++ b/src/engine/services/db/db.py @@ -381,4 +381,21 @@ def remove_media(id): result = rtable.get(id).delete().run(r_conn) close_rethink_connection(r_conn) - return result \ No newline at end of file + return result + +def get_media_with_status(status): + """ + get media with status + :param status + :return: list id_domains + """ + r_conn = new_rethink_connection() + rtable = r.table('media') + try: + results = rtable.get_all(status, index='status').pluck('id').run(r_conn) + close_rethink_connection(r_conn) + except: + # if results is None: + close_rethink_connection(r_conn) + return [] + return [d['id'] for d in results] \ No newline at end of file diff --git a/src/engine/services/db/domains.py b/src/engine/services/db/domains.py index 3069ae520..57e7b3451 100644 --- a/src/engine/services/db/domains.py +++ b/src/engine/services/db/domains.py @@ -41,6 +41,25 @@ def update_domain_force_hyp(id_domain, hyp_id=None): close_rethink_connection(r_conn) return results +def update_domain_parents(id_domain): + r_conn = new_rethink_connection() + rtable = r.table('domains') + d = rtable.get(id_domain).pluck({'create_dict': 'origin'}, 'parents').run(r_conn) + + if 'parents' not in d.keys(): + parents_with_new_origin = [] + elif type(d['parents']) is not list: + parents_with_new_origin = [] + else: + parents_with_new_origin = d['parents'].copy() + + if 'origin' in d['create_dict'].keys(): + parents_with_new_origin.append(d['create_dict']['origin']) + results = rtable.get_all(id_domain, index='id').update({'parents': parents_with_new_origin}).run(r_conn) + + close_rethink_connection(r_conn) + return results + def update_domain_status(status, id_domain, hyp_id=None, detail='', keep_hyp_id=False): r_conn = new_rethink_connection() @@ -56,7 +75,7 @@ def update_domain_status(status, id_domain, hyp_id=None, detail='', keep_hyp_id= if hyp_id is None: - # print('ojojojo') + # print('ojojojo')rtable.get(id_domain) results = rtable.get_all(id_domain, index='id').update({ 'status': status, 'hyp_started': '', @@ -178,22 +197,22 @@ def get_domain_hyp_started_and_status_and_detail(id_domain): # return results -# def get_domains_with_status(status): -# """ -# NOT USED -# :param status: -# :return: -# """ -# r_conn = new_rethink_connection() -# rtable = r.table('domains') -# try: -# results = rtable.get_all(status, index='status').pluck('id').run(r_conn) -# close_rethink_connection(r_conn) -# except: -# # if results is None: -# close_rethink_connection(r_conn) -# return [] -# return [d['id'] for d in results] +def get_domains_with_status(status): + """ + get domain with status + :param status + :return: list id_domains + """ + r_conn = new_rethink_connection() + rtable = r.table('domains') + try: + results = rtable.get_all(status, index='status').pluck('id').run(r_conn) + close_rethink_connection(r_conn) + except: + # if results is None: + close_rethink_connection(r_conn) + return [] + return [d['id'] for d in results] def get_domains_with_transitional_status(list_status=TRANSITIONAL_STATUS): @@ -316,6 +335,13 @@ def remove_disk_template_created_list_in_domain(id_domain): close_rethink_connection(r_conn) return results +def update_origin_and_parents_to_new_template(id_domain,template_id): + r_conn = new_rethink_connection() + rtable = r.table('domains') + new_create_dict_origin = {'create_dict':{'origin':template_id}} + results = rtable.get(id_domain).update(new_create_dict_origin).run(r_conn) + close_rethink_connection(r_conn) + return results def remove_dict_new_template_from_domain(id_domain): r_conn = new_rethink_connection() diff --git a/src/engine/services/db/downloads.py b/src/engine/services/db/downloads.py index 0a0bbf23a..ba9436cff 100644 --- a/src/engine/services/db/downloads.py +++ b/src/engine/services/db/downloads.py @@ -1,6 +1,7 @@ import rethinkdb as r from engine.services.db import new_rethink_connection, close_rethink_connection +from engine.services.log import * def get_media(id_media): @@ -25,11 +26,14 @@ def get_downloads_in_progress(): close_rethink_connection(r_conn) return d -def update_status_table(table,status,id_media,detail=""): +def update_status_table(table,status,id_table,detail=""): r_conn = new_rethink_connection() d={'status':status, 'detail':detail} - r.table(table).get(id_media).update(d).run(r_conn) + try: + r.table(table).get(id_table).update(d).run(r_conn) + except: + logs.main.error(f'Error when updated status in table: {table}, status: {status}, id: {id_table}, detail: {detail}') close_rethink_connection(r_conn) def update_status_media_from_path(path,status,detail=''): diff --git a/src/engine/services/lib/download.py b/src/engine/services/lib/download.py new file mode 100644 index 000000000..ccd431844 --- /dev/null +++ b/src/engine/services/lib/download.py @@ -0,0 +1,26 @@ +import requests + + +def test_url_for_download(url,url_download_insecure_ssl=True, + timeout_time_limit=5, dict_header={}): + """Test if url is alive, previous to launch ssh curl in hypervisor + to download media, domains...""" + try: + response = requests.head(url, + allow_redirects=True, + verify=url_download_insecure_ssl, + timeout=timeout_time_limit, + headers=dict_header) + except requests.exceptions.RequestException as e: + return False,e + + if response.status_code != 200: + error = 'status code {}'.format(response.status_code) + return False,error + + content_type = response.headers.get('Content-Type','') + + if content_type.find('application') < 0: + return False, 'Content-Type of HTTP Header is not application' + else: + return True, '' diff --git a/src/engine/services/lib/functions.py b/src/engine/services/lib/functions.py index 3ca9f59e7..db2421ab0 100644 --- a/src/engine/services/lib/functions.py +++ b/src/engine/services/lib/functions.py @@ -1066,7 +1066,8 @@ def engine_restart(): return True def clean_intermediate_status(): - status_to_delete = ['DownloadAborting'] + #status_to_delete = ['DownloadAborting'] + status_to_delete = [] status_to_failed = ['Updating'] all_domains = get_all_domains_with_id_and_status() @@ -1075,3 +1076,14 @@ def clean_intermediate_status(): [update_domain_status('Failed', d['id'], detail='change status from {} when isard engine restart'.format(d['status'])) for d in all_domains if d['status'] in status_to_failed] + + +def flatten_dict(d): + def items(): + for key, value in list(d.items()): + if isinstance(value, dict): + for subkey, subvalue in list(flatten_dict(value).items()): + yield key + "." + subkey, subvalue + else: + yield key, value + return dict(items()) \ No newline at end of file diff --git a/src/engine/services/lib/grafana.py b/src/engine/services/lib/grafana.py new file mode 100644 index 000000000..293ea17f1 --- /dev/null +++ b/src/engine/services/lib/grafana.py @@ -0,0 +1,73 @@ +import socket +import pickle +import struct +import time + +from engine.services.log import * +from engine.services.lib.functions import flatten_dict + +TIMEOUT_SOCKET_CONNECTION = 30 + + +def send_dict_to_grafana(d,host,port=2004,prefix='isard'): + sender = create_socket_grafana(host=host,port=port) + if sender is not False: + flatten_and_send_dict(d, sender, prefix=prefix) + sender.close() + return True + else: + return False + + +def create_socket_grafana(host,port=2004): + s = socket.socket() + s.settimeout(TIMEOUT_SOCKET_CONNECTION) + + try: + s.connect((host, port)) + return s + + except socket.error as e: + log.error(e) + log.error(f'Failed connection to grafana server: {host} in port {port}') + try: + ip = socket.gethostbyname(host) + except socket.error as e: + log.error(e) + log.error('not resolves ip from hostname of grafana server: {}'.format(host)) + return False + return False + + +def flatten_and_send_dict(d,sender,prefix='isard'): + type_ok = (int,float) + try: + now = int(time.time()) + tuples = ([]) + lines = [] + # We're gonna report all three loadavg values + d_flat = flatten_dict(d) + for k,v in d_flat.items(): + k = prefix + '.' + k + + #check if type is ok + if type(v) is bool: + v = 1 if v is True else 0 + + if type(v) in type_ok: + tuples.append((k, (now, v))) + lines.append(f'({now}) {k}: {v}') + + if type(v) is str: + tuples.append((k + '.' + v, (now, 1))) + lines.append(f'({now}) {k}.{v}: 0') + + message = '\n'.join(lines) + '\n' # all lines must end in a newline + logs.main.debug('sending to grafana:') + logs.main.debug(message) + package = pickle.dumps(tuples, 1) + size = struct.pack('!L', len(package)) + sender.sendall(size) + sender.sendall(package) + except Exception as e: + log.error(f'Exception when send dictionary of values to grafana: {e}') \ No newline at end of file diff --git a/src/engine/services/lib/qcow.py b/src/engine/services/lib/qcow.py index 78387ad40..6700b0384 100644 --- a/src/engine/services/lib/qcow.py +++ b/src/engine/services/lib/qcow.py @@ -6,12 +6,23 @@ # coding=utf-8 import json +import string +from random import choices + from os.path import dirname as extract_dir_path from engine.services.db.db import get_pool from engine.services.lib.functions import exec_remote_cmd, size_format, get_threads_names_running, weighted_choice, \ backing_chain_cmd from engine.services.log import * +from engine.services.db import get_hyp_hostname_user_port_from_id +from engine.services.lib.functions import execute_commands +from engine import config +from engine.services.db.db import get_pools_from_hyp + + + + VDESKTOP_DISK_OPERATINOS = CONFIG_DICT['REMOTEOPERATIONS']['host_remote_disk_operatinos'] @@ -340,7 +351,16 @@ def verify_output_cmds1_template_from_domain(cmds_done, path_domain_disk, path_t log.debug('cmd: {}, out: {}, err: {}'.format(d['cmd'], d['out'], d['err'])) error_severity = 'Hard' elif error_severity != 'Hard': - df_bytes = int(d['out'].splitlines()[-1].split()[3]) * 1024 + try: + df_bytes = int(d['out'].splitlines()[-1].split()[3]) * 1024 + except: + #if mount point is too large df split output in two lines + try: + df_bytes = int(d['out'].splitlines()[-1].split()[2]) * 1024 + except: + log.info('When try to know disk free space previous to create template output is not standard') + log.debug('cmd: {}, out: {}, err: {}'.format(d['cmd'], d['out'], d['err'])) + df_bytes = 999999999 log.debug('disk free for create template from domain {}: {}'.format(id_domain, size_format(df_bytes))) d = [a for a in cmds_done if a['title'] == 'size_template_disk'][0] @@ -487,3 +507,45 @@ def get_host_and_path_diskoperations_to_write_in_path(type_path, relative_path, else: path_absolute = path_selected + '/' + relative_path return host_disk_operations_selected, path_absolute + +def test_hypers_disk_operations(hyps_disk_operations): + list_hyps_ok = list() + str_random = ''.join(choices(string.ascii_uppercase + string.digits, k=8)) + for hyp_id in hyps_disk_operations: + d_hyp = get_hyp_hostname_user_port_from_id(hyp_id) + cmds1 = list() + for pool_id in get_pools_from_hyp(hyp_id): + # test write permissions in root dir of all paths defined in pool + paths = {k: [l['path'] for l in d] for k, d in get_pool(pool_id)['paths'].items()} + for k, p in paths.items(): + for path in p: + cmds1.append({'title': f'try create dir if not exists - pool:{pool_id}, hypervisor: {hyp_id}, path_kind: {k}', + 'cmd': f'mkdir -p {path}'}) + cmds1.append({'title': f'touch random file - pool:{pool_id}, hypervisor: {hyp_id}, path_kind: {k}', + 'cmd': f'touch {path}/test_random_{str_random}'}) + cmds1.append({'title': 'delete random file - pool:{pool_id}, hypervisor: {hyp_id}, path_kind: {k}', + 'cmd': f'rm -f {path}/test_random_{str_random}'}) + try: + array_out_err = execute_commands(d_hyp['hostname'], + ssh_commands=cmds1, + dict_mode=True, + user=d_hyp['user'], + port=d_hyp['port']) + #if error in some path hypervisor is not valid + if len([d['err'] for d in array_out_err if len(d['err']) > 0]) > 0: + logs.main.error(f'Hypervisor {hyp_id} can not be disk_operations, some errors when testing if can create files in all paths_') + for d_cmd_err in [d for d in array_out_err if len(d['err']) > 0]: + cmd = d_cmd_err['cmd'] + err = d_cmd_err['err'] + logs.main.error(f'Command: {cmd} -- Error: {err}') + else: + list_hyps_ok.append(hyp_id) + + except Exception as e: + if __name__ == '__main__': + logs.main.err(f'Error when launch commands to test hypervisor {hyp_id} disk_operations: {e}') + + return list_hyps_ok + + + diff --git a/src/engine/services/log.py b/src/engine/services/log.py index dd8024fab..e7471e8c3 100644 --- a/src/engine/services/log.py +++ b/src/engine/services/log.py @@ -36,6 +36,7 @@ # logger = log.getLogger() # logger.setLevel(LOG_LEVEL_NUM) # log.Formatter(fmt=LOG_FORMAT,datefmt=LOG_DATE_FORMAT) +print(f'Engine log level: {LOG_LEVEL} ({LOG_LEVEL_NUM})') # log.basicConfig(format=LOG_FORMAT, datefmt=LOG_DATE_FORMAT,level=LOG_LEVEL_NUM) log.basicConfig(filename=LOG_DIR + '/' + LOG_FILE, diff --git a/src/engine/services/threads/download_thread.py b/src/engine/services/threads/download_thread.py index edd347ce6..c2705a791 100644 --- a/src/engine/services/threads/download_thread.py +++ b/src/engine/services/threads/download_thread.py @@ -11,36 +11,49 @@ import os import subprocess import rethinkdb as r +from time import sleep from engine.config import CONFIG_DICT from engine.services.db.db import new_rethink_connection, remove_media from engine.services.db.domains import update_domain_status from engine.services.log import logs from engine.services.db import get_config_branch, get_hyp_hostname_user_port_from_id, update_table_field, \ - update_domain_dict_create_dict, get_domain, delete_domain -from engine.services.db.downloads import get_downloads_in_progress, update_download_percent, update_status_table,\ - get_media + update_domain_dict_create_dict, get_domain, delete_domain +from engine.services.db.downloads import get_downloads_in_progress, update_download_percent, update_status_table, \ + get_media from engine.services.lib.qcow import get_host_disk_operations_from_path, get_path_to_disk, create_cmds_delete_disk from engine.services.lib.functions import get_tid +from engine.services.lib.download import test_url_for_download +from engine.services.db.domains import get_domains_with_status +from engine.services.db.db import get_media_with_status +from engine.services.db.hypervisors import get_hypers_in_pool + URL_DOWNLOAD_INSECURE_SSL = True +TIMEOUT_WAITING_HYPERVISOR_TO_DOWNLOAD = 10 + class DownloadThread(threading.Thread, object): - def __init__(self, hyp_hostname, url, path, table, id_down, dict_header, finalished_threads): + def __init__(self, url, path, path_selected, table, id_down, dict_header, finalished_threads, manager, pool_id, + type_path_selected): threading.Thread.__init__(self) self.name = '_'.join([table, id_down]) self.table = table self.path = path + self.path_selected = path_selected self.id = id_down self.url = url self.dict_header = dict_header self.stop = False - d = get_hyp_hostname_user_port_from_id(hyp_hostname) - self.hostname = d['hostname'] - self.user = d['user'] - self.port = d['port'] self.finalished_threads = finalished_threads + self.manager = manager + self.hostname = None + self.user = None + self.port = None + self.pool_id = pool_id + self.type_path_selected = type_path_selected + def run(self): # if self.table == 'domains': @@ -58,6 +71,37 @@ def run(self): # # hyp_to_disk_create = get_host_disk_operations_from_path(path_selected, pool=self.pool, # type_path=type_path_selected) + + # hypervisor to launch download command + # wait to threads disk_operations are alive + time_elapsed = 0 + path_selected = self.path_selected + while True: + if len(self.manager.t_disk_operations) > 0: + + hyp_to_disk_create = get_host_disk_operations_from_path(path_selected, + pool=self.pool_id, + type_path=self.type_path_selected) + logs.downloads.debug(f'Thread download started to in hypervisor: {hyp_to_disk_create}') + if self.manager.t_disk_operations.get(hyp_to_disk_create, False) is not False: + if self.manager.t_disk_operations[hyp_to_disk_create].is_alive(): + d = get_hyp_hostname_user_port_from_id(hyp_to_disk_create) + self.hostname = d['hostname'] + self.user = d['user'] + self.port = d['port'] + break + sleep(0.2) + time_elapsed += 0.2 + if time_elapsed > TIMEOUT_WAITING_HYPERVISOR_TO_DOWNLOAD: + logs.downloads.info( + f'Timeout ({TIMEOUT_WAITING_HYPERVISOR_TO_DOWNLOAD} sec) waiting hypervisor online to download {url_base}') + if self.table == 'domains': + update_domain_status('DownloadFailed', self.id, detail="downloaded disk") + else: + update_status_table(self.table, 'DownloadFailed', self.id) + self.finalished_threads.append(self.path) + return False + header_template = "--header '{header_key}: {header_value}' " headers = '' @@ -66,8 +110,23 @@ def run(self): else: insecure_option = '' - for k,v in self.dict_header.items(): + dict_header = {} + for k, v in self.dict_header.items(): headers += header_template.format(header_key=k, header_value=v) + dict_header[k] = v + + # TEST IF url return an stream of data + ok, error_msg = test_url_for_download(self.url, + url_download_insecure_ssl=URL_DOWNLOAD_INSECURE_SSL, + timeout_time_limit=TIMEOUT_WAITING_HYPERVISOR_TO_DOWNLOAD, + dict_header=dict_header) + + if ok is False: + logs.downloads.error(f'URL check failed for url: {self.url}') + logs.downloads.error(f'Failed url check reason: {error_msg}') + update_status_table(self.table, 'DownloadFailed', self.id, detail=error_msg) + return False + curl_template = "curl {insecure_option} -L -o '{path}' {headers} '{url}'" @@ -79,11 +138,11 @@ def run(self): ssh_command = ssh_template.format(port=self.port, user=self.user, hostname=self.hostname, - path= self.path, - path_dir= dirname(self.path), + path=self.path, + path_dir=dirname(self.path), headers=headers, - url= self.url, - insecure_option = insecure_option) + url=self.url, + insecure_option=insecure_option) logs.downloads.debug("SSH COMMAND: {}".format(ssh_command)) @@ -93,7 +152,7 @@ def run(self): stderr=subprocess.PIPE, preexec_fn=os.setsid) rc = p.poll() - update_status_table(self.table,'Downloading',self.id,"downloading in hypervisor: {}".format(self.hostname)) + update_status_table(self.table, 'Downloading', self.id, "downloading in hypervisor: {}".format(self.hostname)) while rc != 0: header = p.stderr.readline().decode('utf8') header2 = p.stderr.readline().decode('utf8') @@ -117,27 +176,30 @@ def run(self): c = p.stderr.read(1).decode('utf8') if self.stop is True: - curl_cmd = curl_template.format(path= self.path, + curl_cmd = curl_template.format(path=self.path, headers=headers, - url= self.url, - insecure_option = insecure_option) - #for pkill curl order is cleaned - curl_cmd = curl_cmd.replace("'","") - curl_cmd = curl_cmd.replace(" "," ") - - ssh_cmd_kill_curl = """ssh -p {port} {user}@{hostname} "pkill -f \\"^{curl_cmd}\\" " """.format(port=self.port, - user=self.user, - hostname=self.hostname, - curl_cmd=curl_cmd - ) - - logs.downloads.info('download {} aborted, ready to send ssh kill to curl in hypervisor {}'.format(self.path,self.hostname)) - - #destroy curl in hypervisor + url=self.url, + insecure_option=insecure_option) + # for pkill curl order is cleaned + curl_cmd = curl_cmd.replace("'", "") + curl_cmd = curl_cmd.replace(" ", " ") + + ssh_cmd_kill_curl = """ssh -p {port} {user}@{hostname} "pkill -f \\"^{curl_cmd}\\" " """.format( + port=self.port, + user=self.user, + hostname=self.hostname, + curl_cmd=curl_cmd + ) + + logs.downloads.info( + 'download {} aborted, ready to send ssh kill to curl in hypervisor {}'.format(self.path, + self.hostname)) + + # destroy curl in hypervisor p_kill_curl = subprocess.Popen(ssh_cmd_kill_curl, shell=True) p_kill_curl.wait(timeout=5) - #destroy ssh command + # destroy ssh command try: os.killpg(os.getpgid(p.pid), signal.SIGTERM) except Exception as e: @@ -147,19 +209,24 @@ def run(self): remove_media(self.id) if self.table == 'domains': delete_domain(self.id) - #update_status_table(self.table, 'FailedDownload', self.id, detail="download aborted") + # update_status_table(self.table, 'DownloadFailed', self.id, detail="download aborted") return False if not c: break if c == '\r': if len(line) > 60: - logs.downloads.debug(line) values = line.split() logs.downloads.debug(self.url) logs.downloads.debug(line) - d_progress = dict(zip(keys,values)) - d_progress['total_percent'] = int(float(d_progress['total_percent'])) - d_progress['received_percent'] = int(float(d_progress['received_percent'])) + d_progress = dict(zip(keys, values)) + try: + d_progress['total_percent'] = int(float(d_progress['total_percent'])) + d_progress['received_percent'] = int(float(d_progress['received_percent'])) + if d_progress['received_percent'] > 1: + pass + except: + d_progress['total_percent'] = 0 + d_progress['received_percent'] = 0 update_download_percent(d_progress, self.table, self.id) line = p.stderr.read(60).decode('utf8') @@ -175,9 +242,9 @@ def run(self): assert rc == 0 if self.table == 'domains': - #update_table_field(self.table, self.id, 'path_downloaded', self.path) + # update_table_field(self.table, self.id, 'path_downloaded', self.path) d_update_domain = get_domain(self.id)['create_dict'] - #d_update_domain = {'hardware': {'disks': [{}]}} + # d_update_domain = {'hardware': {'disks': [{}]}} d_update_domain['hardware']['disks'][0]['file'] = self.path update_domain_dict_create_dict(self.id, d_update_domain) @@ -186,11 +253,10 @@ def run(self): update_domain_status('Updating', self.id, detail="downloaded disk") else: self.finalished_threads.append(self.path) - update_table_field(self.table,self.id,'path_downloaded',self.path) + update_table_field(self.table, self.id, 'path_downloaded', self.path) update_status_table(self.table, 'Downloaded', self.id) - class DownloadChangesThread(threading.Thread): def __init__(self, manager, name='download_changes'): threading.Thread.__init__(self) @@ -213,7 +279,6 @@ def __init__(self, manager, name='download_changes'): self.download_threads = {} self.finalished_threads = [] - def get_file_path(self, dict_changes): table = dict_changes['table'] if table == 'domains': @@ -238,27 +303,52 @@ def get_file_path(self, dict_changes): type_path=type_path_selected) return new_file_path, path_selected, type_path_selected, pool_id - def abort_download(self, dict_changes): + def killall_curl(self,hyp_id): + action = dict() + action['type'] = 'killall_curl' + + pool_id = 'default' + self.manager.q.workers[hyp_id].put(action) + + def abort_download(self, dict_changes,final_status='Deleted'): logs.downloads.debug('aborting download function') new_file_path, path_selected, type_path_selected, pool_id = self.get_file_path(dict_changes) if new_file_path in self.download_threads.keys(): self.download_threads[new_file_path].stop = True else: - update_status_table(dict_changes['table'],'FailedDownload',dict_changes['id']) - - - def delete_media(self,dict_changes): + update_status_table(dict_changes['table'], 'DownloadFailed', dict_changes['id']) + # and delete partial download + cmds = create_cmds_delete_disk(new_file_path) + + # change for other pools when pools are implemented in all media + try: + pool_id = 'default' + next_hyp = self.manager.pools[pool_id].get_next() + logs.downloads.debug('hypervisor where delete media {}: {}'.format(new_file_path, next_hyp)) + + action = dict() + action['id_media'] = dict_changes['id'] + action['path'] = new_file_path + action['type'] = 'delete_media' + action['final_status'] = final_status + action['ssh_commands'] = cmds + + self.manager.q.workers[next_hyp].put(action) + return True + except Exception as e: + logs.downloads.error('next hypervisor fail: ' + str(e)) + + def delete_media(self, dict_changes): table = dict_changes['table'] id_down = dict_changes['id'] d_media = get_media(id_down) cmds = create_cmds_delete_disk(d_media['path_downloaded']) - #change for other pools when pools are implemented in all media + # change for other pools when pools are implemented in all media pool_id = 'default' next_hyp = self.manager.pools[pool_id].get_next() logs.downloads.debug('hypervisor where delete media {}: {}'.format(d_media['path_downloaded'], next_hyp)) - action = dict() action['id_media'] = id_down action['path'] = d_media['path_downloaded'] @@ -269,7 +359,7 @@ def delete_media(self,dict_changes): ## call disk_operations thread_to_delete - def remove_download_thread(self,dict_changes): + def remove_download_thread(self, dict_changes): new_file_path, path_selected, type_path_selected, pool_id = self.get_file_path(dict_changes) if new_file_path in self.download_threads.keys(): self.download_threads.pop(new_file_path) @@ -284,30 +374,23 @@ def start_download(self, dict_changes): # all disk downloads create a desktop - url_base = self.url_resources header_dict = {} if len(subdir_url) > 0: url_base = url_base + '/' + subdir_url - - # hypervisor to launch download command - hyp_to_disk_create = get_host_disk_operations_from_path(path_selected, - pool=pool_id, - type_path=type_path_selected) - - if dict_changes.get('url-web',False) is not False: + if dict_changes.get('url-web', False) is not False: url = dict_changes['url-web'] - elif dict_changes.get('url-isard',False) is not False: + elif dict_changes.get('url-isard', False) is not False: url_isard = dict_changes['url-isard'] url = url_base + '/' + table + '/' + url_isard if len(self.url_code) > 0: header_dict['Authorization'] = self.url_code else: - logs.downloads.error(('web-url or isard-url must be keys in dictionary for domain {}'+ - ' to download disk file from internet. ').format(id_down)) + logs.downloads.error(('web-url or isard-url must be keys in dictionary for domain {}' + + ' to download disk file from internet. ').format(id_down)) exit() if new_file_path in self.finalished_threads: @@ -320,27 +403,71 @@ def start_download(self, dict_changes): d_update_domain['hardware']['disks'][0]['path_selected'] = path_selected update_domain_dict_create_dict(id_down, d_update_domain) - # launching download threads if new_file_path not in self.download_threads: - self.download_threads[new_file_path] = DownloadThread(hyp_to_disk_create, - url, + # launching download threads + logs.downloads.debug(f'ready tu start DownloadThread --> url:{url} , path:{new_file_path}') + self.download_threads[new_file_path] = DownloadThread(url, new_file_path, + path_selected, table, id_down, header_dict, - self.finalished_threads) + self.finalished_threads, + self.manager, + pool_id, + type_path_selected) self.download_threads[new_file_path].daemon = True self.download_threads[new_file_path].start() else: - logs.downloads.info('download thread launched to this path: {}'.format(new_file_path)) + logs.downloads.error('download thread launched previously to this path: {}'.format(new_file_path)) def run(self): self.tid = get_tid() logs.downloads.debug('RUN-DOWNLOAD-THREAD-------------------------------------') + pool_id = 'default' + first_loop = True if self.stop is False: + if first_loop is True: + # if domains or media have status Downloading when engine restart + # we need to resetdownloading deleting file and + first_loop = False + # wait a hyp to downloads + next_hyp = False + while next_hyp is False: + logs.downloads.info('waiting an hypervisor online to launch downloading actions') + if pool_id in self.manager.pools.keys(): + next_hyp = self.manager.pools[pool_id].get_next() + sleep(1) + + for hyp_id in get_hypers_in_pool(): + self.killall_curl(hyp_id) + + domains_status_downloading = get_domains_with_status('Downloading') + medias_status_downloading = get_media_with_status('Downloading') + + for id_domain in domains_status_downloading: + create_dict = get_domain(id_domain)['create_dict'] + dict_changes = {'id': id_domain, + 'table': 'domains', + 'create_dict': create_dict} + update_domain_status('ResetDownloading', id_domain) + self.abort_download(dict_changes, final_status='DownloadFailed') + + for id_media in medias_status_downloading: + dict_media = get_media(id_media) + dict_changes = {'id': id_media, + 'table': 'media', + 'path': dict_media['path'], + 'hypervisors_pools': dict_media['hypervisors_pools']} + update_status_table('media', 'ResetDownloading', id_media) + self.abort_download(dict_changes, final_status='DownloadFailed') + self.r_conn = new_rethink_connection() - for c in r.table('media').get_all(r.args(['Deleting','Deleted','Downloaded','DownloadStarting', 'Downloading','Download','DownloadAborting']), index='status').\ + update_table_field('hypervisors_pools',pool_id,'download_changes','Started') + for c in r.table('media').get_all(r.args( + ['Deleting', 'Deleted', 'Downloaded', 'DownloadFailed', 'DownloadStarting', 'Downloading', 'Download', + 'DownloadAborting','ResetDownloading']), index='status'). \ pluck('id', 'path', 'url-isard', @@ -348,14 +475,16 @@ def run(self): 'status' ).merge( {'table': 'media'}).changes(include_initial=True).union( - r.table('domains').get_all(r.args(['Downloaded','DownloadStarting', 'Downloading','DownloadAborting']), index='status').\ + r.table('domains').get_all( + r.args(['Downloaded', 'DownloadFailed','DownloadStarting', 'Downloading', 'DownloadAborting','ResetDownloading']), index='status'). \ pluck('id', 'create_dict', 'url-isard', 'url-web', 'status').merge( {"table": "domains"}).changes(include_initial=True)).union( - r.table('engine').pluck('threads', 'status_all_threads').merge({'table': 'engine'}).changes()).run(self.r_conn): + r.table('engine').pluck('threads', 'status_all_threads').merge({'table': 'engine'}).changes()).run( + self.r_conn): if self.stop: break @@ -369,15 +498,15 @@ def run(self): logs.downloads.debug('DOWNLOAD CHANGES DETECTED:') logs.downloads.debug(pprint.pformat(c)) - if c.get('old_val',None) is None: + if c.get('old_val', None) is None: if c['new_val']['status'] == 'DownloadStarting': self.start_download(c['new_val']) - elif c.get('new_val',None) is None: + elif c.get('new_val', None) is None: if c['old_val']['status'] in ['DownloadAborting']: self.remove_download_thread(c['old_val']) elif 'old_val' in c and 'new_val' in c: - if c['old_val']['status'] == 'FailedDownload' and c['new_val']['status'] == 'DownloadStarting': + if c['old_val']['status'] == 'DownloadFailed' and c['new_val']['status'] == 'DownloadStarting': self.start_download(c['new_val']) elif c['old_val']['status'] == 'Downloaded' and c['new_val']['status'] == 'Deleting': @@ -388,7 +517,7 @@ def run(self): if c['new_val']['table'] == 'media': remove_media(c['new_val']['id']) - elif c['old_val']['status'] == 'Downloading' and c['new_val']['status'] == 'FailedDownload': + elif c['old_val']['status'] == 'Downloading' and c['new_val']['status'] == 'DownloadFailed': pass elif c['old_val']['status'] == 'DownloadStarting' and c['new_val']['status'] == 'Downloading': @@ -400,6 +529,10 @@ def run(self): elif c['old_val']['status'] == 'Downloading' and c['new_val']['status'] == 'DownloadAborting': self.abort_download(c['new_val']) + elif c['old_val']['status'] == 'Downloading' and c['new_val']['status'] == 'ResetDownloading': + self.abort_download(c['new_val'], final_status='DownloadFailed') + + def launch_thread_download_changes(manager): t = DownloadChangesThread(manager) diff --git a/src/engine/services/threads/grafana_thread.py b/src/engine/services/threads/grafana_thread.py new file mode 100644 index 000000000..82dd123ef --- /dev/null +++ b/src/engine/services/threads/grafana_thread.py @@ -0,0 +1,126 @@ +# Copyright 2019 the Isard-vdi project authors: +# Alberto Larraz Dalmases +# Josep Maria Viñolas Auquer +# License: AGPLv3 +# coding=utf-8 +import threading +from time import sleep + +from engine.services.log import logs +from engine.services.lib.functions import get_tid, flatten_dict +from engine.services.db import get_hyp_hostnames_online +from engine.services.lib.grafana import send_dict_to_grafana +from engine.services.db.config import get_config + +SEND_TO_GRAFANA_INTERVAL = 5 +SEND_STATIC_VALUES_INTERVAL = 30 + +def launch_grafana_thread(d_threads_status): + t = GrafanaThread(name='grafana', + d_threads_status=d_threads_status) + t.daemon = True + t.start() + return t + +class GrafanaThread(threading.Thread): + def __init__(self, name,d_threads_status): + threading.Thread.__init__(self) + self.name = name + self.stop = False + self.t_status = d_threads_status + self.restart_send_config = False + self.active = False + self.send_to_grafana_interval = SEND_TO_GRAFANA_INTERVAL + self.send_static_values_interval = SEND_STATIC_VALUES_INTERVAL + self.host_grafana = False + self.port = False + + + + def get_hostname_grafana(self): + try: + dict_grafana = get_config()['engine']['grafana'] + + if dict_grafana["active"] is not True: + self.active = False + return False + else: + self.host_grafana = dict_grafana["hostname"] + self.port = int(dict_grafana["carbon_port"]) + self.send_static_values_interval = int(dict_grafana.get('send_static_values_interval', + SEND_STATIC_VALUES_INTERVAL)) + self.send_to_grafana_interval = int(dict_grafana.get('interval', + SEND_TO_GRAFANA_INTERVAL)) + self.active = True + return True + except Exception as e: + logs.main.error(f'grafana config error: {e}') + self.active = False + return False + + def send(self,d): + send_dict_to_grafana(d, self.host_grafana, self.port) + + def run(self): + self.tid = get_tid() + logs.main.info('starting thread: {} (TID {})'.format(self.name, self.tid)) + + #get hostname grafana config + self.get_hostname_grafana() + + hyps_online = [] + + elapsed = self.send_static_values_interval + while self.stop is False: + sleep(self.send_to_grafana_interval) + elapsed += self.send_to_grafana_interval + + if self.restart_send_config is True: + self.restart_send_config = False + self.get_hostname_grafana() + + if self.active is True: + for i,id_hyp in enumerate(self.t_status.keys()): + try: + if self.t_status[id_hyp].status_obj.hyp_obj.connected is True: + if id_hyp not in hyps_online: + hyps_online.append(id_hyp) + check_hyp = True + except: + logs.main.error(f'hypervisor {id_hyp} problem checking if is connected') + check_hyp = False + + if len(hyps_online) > 0 and check_hyp is True: + #send static values of hypervisors + if elapsed >= self.send_static_values_interval: + d_hyps_info = dict() + for i, id_hyp in enumerate(hyps_online): + d_hyps_info[f'hyp-info-{i}'] = self.t_status[id_hyp].status_obj.hyp_obj.info + # ~ self.send(d_hyps_info) + elapsed = 0 + + #send stats + dict_to_send = dict() + j=0 + for i, id_hyp in enumerate(hyps_online): + if id_hyp in self.t_status.keys(): + #stats_hyp = self.t_status[id_hyp].status_obj.hyp_obj.stats_hyp + stats_hyp_now = self.t_status[id_hyp].status_obj.hyp_obj.stats_hyp_now + #stats_domains = self.t_status[id_hyp].status_obj.hyp_obj.stats_domains + if len(stats_hyp_now) > 0: + dict_to_send[f'hypers.'+id_hyp] = {'stats':stats_hyp_now,'info':d_hyps_info['hyp-info-'+str(i)],'domains':{}} + stats_domains_now = self.t_status[id_hyp].status_obj.hyp_obj.stats_domains_now + # ~ for id_domain,d_stats in stats_domains_now.items(): + # ~ if len(stats_hyp_now) > 0: + # ~ for id_domain,d_stats in stats_domains_now.items(): + # ~ dict_to_send[f'domain-stats-{j}'] = {'domain-id':{id_domain:1},'last': d_stats,} + dict_to_send[f'hypers.'+id_hyp]['domains']=stats_domains_now #{x:0 for x in stats_domains_now} + # ~ print(stats_domains_now) + # ~ j+=1 + + if len(dict_to_send) > 0: + self.send(dict_to_send) + + + + diff --git a/src/engine/services/threads/hyp_worker_thread.py b/src/engine/services/threads/hyp_worker_thread.py index 4b627f691..9c359f835 100644 --- a/src/engine/services/threads/hyp_worker_thread.py +++ b/src/engine/services/threads/hyp_worker_thread.py @@ -18,7 +18,7 @@ from engine.services.lib.functions import get_tid, engine_restart from engine.services.log import logs from engine.services.threads.threads import TIMEOUT_QUEUES, launch_action_disk, RETRIES_HYP_IS_ALIVE, \ - TIMEOUT_BETWEEN_RETRIES_HYP_IS_ALIVE, launch_delete_media + TIMEOUT_BETWEEN_RETRIES_HYP_IS_ALIVE, launch_delete_media, launch_killall_curl from engine.models.domain_xml import XML_SNIPPET_CDROM, XML_SNIPPET_DISK_VIRTIO, XML_SNIPPET_DISK_CUSTOM class HypWorkerThread(threading.Thread): @@ -171,13 +171,19 @@ def run(self): elif action['type'] in ['add_media_hot']: pass - + elif action['type'] in ['killall_curl']: + launch_killall_curl(self.hostname, + user, + port) elif action['type'] in ['delete_media']: + final_status = action.get('final_status','Deleted') + launch_delete_media (action, self.hostname, user, - port) + port, + final_status=final_status) # ## DESTROY THREAD # elif action['type'] == 'destroy_thread': diff --git a/src/engine/services/threads/threads.py b/src/engine/services/threads/threads.py index beddfa817..2b2c3e299 100644 --- a/src/engine/services/threads/threads.py +++ b/src/engine/services/threads/threads.py @@ -16,7 +16,7 @@ get_domains_started_in_hyp, update_domains_started_in_hyp_to_unknown, remove_media from engine.services.db.downloads import update_status_media_from_path from engine.services.db.db import update_table_field -from engine.services.db.domains import update_domain_status +from engine.services.db.domains import update_domain_status, update_domain_parents from engine.services.db.hypervisors import update_hyp_status, get_hyp_hostname_from_id, \ update_hypervisor_failed_connection, update_db_hyp_info from engine.services.lib.functions import dict_domain_libvirt_state_to_isard_state, state_and_cause_to_str, \ @@ -62,8 +62,8 @@ def launch_disk_operations_thread(hyp_id, hostname, user='root', port=22): hyp_id=hyp_id, hostname=hostname, queue_actions=queue_disk_operation, - user='root', - port=22) + user=user, + port=port) thread_disk_operation.daemon = True thread_disk_operation.start() return thread_disk_operation, queue_disk_operation @@ -78,8 +78,8 @@ def launch_long_operations_thread(hyp_id, hostname, user='root', port=22): hyp_id=hyp_id, hostname=hostname, queue_actions=queue_long_operation, - user='root', - port=22) + user=user, + port=port) thread_long_operation.daemon = True thread_long_operation.start() return thread_long_operation, queue_long_operation @@ -108,7 +108,21 @@ def launch_action_delete_disk(action, hostname, user, port): if len([k['err'] for k in array_out_err if len(k['err']) == 1]): log.debug('all operations deleting disk {} for domain {} runned ok'.format(disk_path, id_domain)) -def launch_delete_media(action,hostname,user,port): +def launch_killall_curl(hostname,user,port): + ssh_commands = ['killall curl'] + try: + array_out_err = execute_commands(hostname, + ssh_commands=ssh_commands, + user=user, + port=port) + out = array_out_err[0]['out'] + err = array_out_err[0]['err'] + logs.downloads.info(f'kill al curl process in hypervisor {hostname}: {out} {err}') + return True + except Exception as e: + logs.downloads.error(f'Kill all curl process in hypervisor {hostname} fail: {e}') + +def launch_delete_media(action,hostname,user,port,final_status='Deleted'): array_out_err = execute_commands(hostname, ssh_commands=action['ssh_commands'], user=user, @@ -121,7 +135,10 @@ def launch_delete_media(action,hostname,user,port): return False # ls of the file after deleted failed, has deleted ok elif len(array_out_err[2]['err']) > 0: - update_status_media_from_path(path, 'Deleted') + if final_status == 'DownloadFailed': + update_status_media_from_path(path, final_status) + else: + update_status_media_from_path(path, 'Deleted') return True else: log.error('failed deleting media {}'.format(id_media)) @@ -147,10 +164,13 @@ def launch_action_disk(action, hostname, user, port, from_scratch=False): list_backing_chain = extract_list_backing_chain(out_cmd_backing_chain) if id_domain is not False: + update_domain_parents(id_domain) update_disk_backing_chain(id_domain, index_disk, disk_path, list_backing_chain) ##INFO TO DEVELOPER # ahora ya se puede llamar a starting paused if id_domain is not False: + #update parents if have + #update_domain_parents(id_domain) update_domain_status('CreatingDomain', id_domain, None, detail='new disk created, now go to creating desktop and testing if desktop start') else: @@ -235,6 +255,8 @@ def launch_action_create_template_disk(action, hostname, user, port): new_template=True, list_backing_chain_template=backing_chain_template) + # disk created, update parents and status + #update_domain_parents(id_domain) update_domain_status(status='TemplateDiskCreated', id_domain=id_domain, hyp_id=False, diff --git a/src/isard.conf.docker b/src/isard.conf.docker index 96d041e0a..b6c409679 100644 --- a/src/isard.conf.docker +++ b/src/isard.conf.docker @@ -1,5 +1,5 @@ [RETHINKDB] -HOST: rethinkdb +HOST: isard-database PORT: 28015 DBNAME: isard diff --git a/src/webapp/__init__.py b/src/webapp/__init__.py index 50f238dc2..025e317ce 100644 --- a/src/webapp/__init__.py +++ b/src/webapp/__init__.py @@ -55,7 +55,6 @@ else: log.info('Debug mode: {}'.format(app.debug)) - ''' Scheduler ''' @@ -92,10 +91,6 @@ def send_bower(path): def send_font_linux(path): return send_from_directory(os.path.join(app.root_path, 'bower_components/font-linux/assets'), path) -#~ @app.route('/socket.io') -#~ def send_socketio(path): - #~ return send_from_directory(os.path.join(app.root_path, 'bower_components/socket.io-client/lib/socket.js'), path) - @app.route('/isard_dist/') def send_isardist(path): return send_from_directory(os.path.join(app.root_path, 'isard_dist'), path) @@ -115,27 +110,22 @@ def internal_error(error): ''' Import all views ''' -#~ if app.config['wizard']==1: - #~ from .views import WizardViews -#~ else: -if True: - from .views import LoginViews - from .views import DesktopViews - from .views import TemplateViews - from .views import MediaViews - from .views import AllowedsViews - #from .views import ClassroomViews - from .views import ProfileViews - from .views import AboutViews - - from .admin.views import AdminViews - from .admin.views import AdminUsersViews - from .admin.views import AdminDomainsViews - from .admin.views import AdminMediaViews - from .admin.views import AdminHypersViews - #from .admin.views import ClassroomViews - from .admin.views import AdminGraphsViews - from .admin.views import UpdatesViews +from .views import LoginViews +from .views import DesktopViews +from .views import TemplateViews +from .views import MediaViews +from .views import AllowedsViews +from .views import ProfileViews +from .views import AboutViews + +from .admin.views import AdminViews +from .admin.views import AdminUsersViews +from .admin.views import AdminDomainsViews +from .admin.views import AdminMediaViews +from .admin.views import AdminHypersViews +from .admin.views import AdminGraphsViews +from .admin.views import UpdatesViews + diff --git a/src/webapp/admin/views/AdminDomainsViews.py b/src/webapp/admin/views/AdminDomainsViews.py index 1b7a31769..53fed7a3f 100644 --- a/src/webapp/admin/views/AdminDomainsViews.py +++ b/src/webapp/admin/views/AdminDomainsViews.py @@ -80,44 +80,25 @@ def admin_domains_xml(id): @login_required @isAdmin def admin_domains_events(id): - # ~ if request.method == 'POST': - # ~ res=app.adminapi.update_table_dict('domains',id,request.get_json(force=True)) - # ~ if res: - # ~ return json.dumps(res), 200, {'ContentType': 'application/json'} - # ~ else: - # ~ return json.dumps(res), 500, {'ContentType': 'application/json'} return json.dumps(app.isardapi.get_domain_last_events(id)), 200, {'ContentType': 'application/json'} @app.route('/admin/domains/messages/', methods=['GET']) @login_required @isAdmin def admin_domains_messages(id): - # ~ if request.method == 'POST': - # ~ res=app.adminapi.update_table_dict('domains',id,request.get_json(force=True)) - # ~ if res: - # ~ return json.dumps(res), 200, {'ContentType': 'application/json'} - # ~ else: - # ~ return json.dumps(res), 500, {'ContentType': 'application/json'} return json.dumps(app.isardapi.get_domain_last_messages(id)), 200, {'ContentType': 'application/json'} -''' -VIRT BUILDER TESTS (IMPORT NEW BUILDERS?) -''' -@app.route('/admin/domains/virtrebuild') -@login_required -@isAdmin -def admin_domains_get_builders(): - #~ import subprocess - #~ command_output=subprocess.getoutput(['virt-builder --list']) - #~ blist=[] - #~ for l in command_output.split('\n'): - #~ blist.append({'dwn':False,'id':l[0:24].strip(),'arch':l[25:35].strip(),'name':l[36:].strip()}) - #~ app.adminapi.cmd_virtbuilder('cirros-0.3.1','/isard/cirros.qcow2','1') - app.adminapi.update_virtbuilder() - app.adminapi.update_virtinstall() - #~ images=app.adminapi.get_admin_table('domains_virt_builder') - return json.dumps(''), 200, {'ContentType': 'application/json'} +# ~ ''' +# ~ VIRT BUILDER TESTS (IMPORT NEW BUILDERS?) +# ~ ''' +# ~ @app.route('/admin/domains/virtrebuild') +# ~ @login_required +# ~ @isAdmin +# ~ def admin_domains_get_builders(): + # ~ app.adminapi.update_virtbuilder() + # ~ app.adminapi.update_virtinstall() + # ~ return json.dumps(''), 200, {'ContentType': 'application/json'} diff --git a/src/webapp/admin/views/AdminViews.py b/src/webapp/admin/views/AdminViews.py index 1fcaa1c3c..c44054f09 100644 --- a/src/webapp/admin/views/AdminViews.py +++ b/src/webapp/admin/views/AdminViews.py @@ -132,7 +132,8 @@ def admin_config_update(): dict['disposable_desktops'].pop('id',None) dict['disposable_desktops']['active']=False if 'active' not in dict['disposable_desktops'] else True if app.adminapi.update_table_dict('config',1,dict): - return json.dumps('Updated'), 200, {'ContentType':'application/json'} + # ~ return json.dumps('Updated'), 200, {'ContentType':'application/json'} + return render_template('admin/pages/config.html',nav="Config") return json.dumps('Could not update.'), 500, {'ContentType':'application/json'} @app.route('/admin/disposable/add', methods=['POST']) diff --git a/src/webapp/admin/views/UpdatesViews.py b/src/webapp/admin/views/UpdatesViews.py index 9a2732b9f..2f254921d 100644 --- a/src/webapp/admin/views/UpdatesViews.py +++ b/src/webapp/admin/views/UpdatesViews.py @@ -44,6 +44,14 @@ def admin_updates_register(): log.error('Error registering client: '+str(e)) #~ return False return redirect(url_for('admin_updates')) + +@app.route('/admin/updates_reload', methods=['POST']) +@login_required +@isAdmin +def admin_updates_reload(): + if request.method == 'POST': + u.reload_updates() + return redirect(url_for('admin_updates')) @app.route('/admin/updates/', methods=['GET']) @login_required diff --git a/src/webapp/config/populate.py b/src/webapp/config/populate.py index b0d360046..7290f3238 100644 --- a/src/webapp/config/populate.py +++ b/src/webapp/config/populate.py @@ -15,7 +15,9 @@ from ..lib.admin_api import Certificates class Populate(object): - def __init__(self): + def __init__(self,dreg): + self.register_code=dreg['resources']['code'] + self.register_url=dreg['resources']['url'] self.cfg=load_config() try: self.conn = r.connect( self.cfg['RETHINKDB_HOST'],self.cfg['RETHINKDB_PORT'],self.cfg['RETHINKDB_DB']).repl() @@ -117,10 +119,10 @@ def config(self): 'timeout_between_retries_hyp_is_alive': 1, 'retries_hyp_is_alive': 3 }}, - 'grafana':{'active':False,'url':'http://isard-grafana','web_port':80,'carbon_port':2003,'graphite_port':3000}, + 'grafana':{'active':False,'url':'','hostname':'isard-grafana','carbon_port':2004,"interval": 5}, 'version':0, - 'resources': {'code':False, - 'url':'http://www.isardvdi.com:5050'} + 'resources': {'code':self.register_code, + 'url':self.register_url} }], conflict='update').run()) log.info("Table config populated with defaults.") return True diff --git a/src/webapp/config/upgrade.py b/src/webapp/config/upgrade.py index ac4beb3c7..607ac0f8b 100644 --- a/src/webapp/config/upgrade.py +++ b/src/webapp/config/upgrade.py @@ -17,7 +17,7 @@ ''' Update to new database release version when new code version release ''' -release_version = 5 +release_version = 6 tables=['config','hypervisors','hypervisors_pools','domains','media'] @@ -78,9 +78,6 @@ def config(self,version): table='config' d=r.table(table).get(1).run() log.info('UPGRADING '+table+' TABLE TO VERSION '+str(version)) - if version == 5: - d['engine']['log']['log_level'] = 'WARNING' - r.table(table).update(d).run() if version == 1: ''' CONVERSION FIELDS PRE CHECKS ''' @@ -126,10 +123,62 @@ def config(self,version): except Exception as e: log.error('Could not update table '+table+' remove fields for db version '+version+'!') log.error('Error detail: '+str(e)) - - return True + if version == 5: + d['engine']['log']['log_level'] = 'WARNING' + r.table(table).update(d).run() + if version == 6: + + ''' CONVERSION FIELDS PRE CHECKS ''' + try: + url=d['engine']['grafana']['url'] + except: + url="" + try: + if not self.check_done( d, + [], + ['engine']): + ##### CONVERSION FIELDS + d['engine']['grafana']={"active": False , + "carbon_port": 2004 , + "interval": 5, + "hostname": "isard-grafana", + "url": url} + r.table(table).update(d).run() + except Exception as e: + log.error('Could not update table '+table+' conversion fields for db version '+version+'!') + log.error('Error detail: '+str(e)) + + # ~ ''' NEW FIELDS PRE CHECKS ''' + # ~ try: + # ~ if not self.check_done( d, + # ~ ['resources','voucher_access',['engine','api','token']], + # ~ []): + # ~ ##### NEW FIELDS + # ~ self.add_keys(table, [ + # ~ {'resources': { 'code':False, + # ~ 'url':'http://www.isardvdi.com:5050'}}, + # ~ {'voucher_access':{'active':False}}, + # ~ {'engine':{'api':{ "token": "fosdem", + # ~ "url": 'http://isard-engine', + # ~ "web_port": 5555}}}]) + # ~ except Exception as e: + # ~ log.error('Could not update table '+table+' new fields for db version '+version+'!') + # ~ log.error('Error detail: '+str(e)) + + ''' REMOVE FIELDS PRE CHECKS ''' + try: + if not self.check_done( d, + [], + ['grafana']): + #### REMOVE FIELDS + self.del_keys(table,['grafana']) + except Exception as e: + log.error('Could not update table '+table+' remove fields for db version '+version+'!') + log.error('Error detail: '+str(e)) + return True + ''' HYPERVISORS TABLE UPGRADES ''' diff --git a/src/webapp/lib/admin_api.py b/src/webapp/lib/admin_api.py index f5da9bd11..30726b316 100644 --- a/src/webapp/lib/admin_api.py +++ b/src/webapp/lib/admin_api.py @@ -149,11 +149,8 @@ def insert_or_update_table_dict(self, table, dict): return r.table(table).insert(dict, conflict='update').run(db.conn) def update_table_dict(self, table, id, dict): - # ~ with app.app_context(): - # ~ print(table) - # ~ print(id) - # ~ print(dict) - return self.check(r.table(table).get(id).update(dict).run(db.conn), 'replaced') + with app.app_context(): + return self.check(r.table(table).get(id).update(dict).run(db.conn), 'replaced') ''' USERS @@ -221,7 +218,7 @@ def user_edit(self,user): # ~ d': 'prova', 'password': 'prova', 'name': 'prova', # ~ 'quota': {'hardware': {'vcpus': 1, 'memory': 1000}, # ~ 'domains': {'templates': 1, 'running': 1, 'isos': 1, 'desktops': 1}}} - p = Password() + # ~ p = Password() #### Removed kind. Kind cannot be modified, so the update will #### not interfere with this field usr = {'active': True, @@ -240,6 +237,11 @@ def user_edit(self,user): user['quota']['domains']={**qdomains, **user['quota']['domains']} return self.check(r.table('users').update(user).run(db.conn),'replaced') + def user_passwd(self,user): + p = Password() + usr = {'password': p.encrypt(user['password'])} + return self.check(r.table('users').update(usr).run(db.conn),'replaced') + def user_toggle_active(self,id): with app.app_context(): is_active = not r.table('users').get(id).pluck('active').run(db.conn)['active'] diff --git a/src/webapp/lib/api.py b/src/webapp/lib/api.py index c49af43ee..5ca20c712 100644 --- a/src/webapp/lib/api.py +++ b/src/webapp/lib/api.py @@ -37,69 +37,60 @@ def __init__(self): #~ GENERIC def check(self,dict,action): - #~ These are the actions: - #~ {u'skipped': 0, u'deleted': 1, u'unchanged': 0, u'errors': 0, u'replaced': 0, u'inserted': 0} + ''' + These are the actions: + {u'skipped': 0, u'deleted': 1, u'unchanged': 0, u'errors': 0, u'replaced': 0, u'inserted': 0} + ''' if dict[action]: return True if not dict['errors']: return True return False - #~ def update_desktop_status(self,user,data,remote_addr): - #~ try: - #~ if data['name']=='status': - #~ if data['value']=='Stopping': - #~ if app.isardapi.update_table_value('domains', data['pk'], data['name'], data['value']): - #~ return json.dumps({'title':'Desktop stopping success','text':'Desktop '+data['pk']+' will be stopped','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} - #~ else: - #~ return json.dumps({'title':'Desktop stopping error','text':'Desktop '+data['pk']+' can\'t be stopped now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - #~ if data['value']=='Deleting': - #~ if app.isardapi.update_table_value('domains', data['pk'], data['name'], data['value']): - #~ return json.dumps({'title':'Desktop deleting success','text':'Desktop '+data['pk']+' will be deleted','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} - #~ else: - #~ return json.dumps({'title':'Desktop deleting error','text':'Desktop '+data['pk']+' can\'t be deleted now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - #~ if data['value']=='Starting': - #~ if float(app.isardapi.get_user_quotas(current_user.username)['rqp']) >= 100: - #~ return json.dumps({'title':'Quota exceeded','text':'Desktop '+data['pk']+' can\'t be started because you have exceeded quota','icon':'warning','type':'warning'}), 500, {'ContentType':'application/json'} - #~ self.auto_interface_set(user,data['pk'],remote_addr) - #~ if app.isardapi.update_table_value('domains', data['pk'], data['name'], data['value']): - #~ return json.dumps({'title':'Desktop starting success','text':'Desktop '+data['pk']+' will be started','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} - #~ else: - #~ return json.dumps({'title':'Desktop starting error','text':'Desktop '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - #~ return json.dumps({'title':'Method not allowd','text':'Desktop '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - #~ except Exception as e: - #~ print('Error updating desktop status for domain '+data['pk']+': '+str(e)) - #~ return json.dumps({'title':'Desktop starting error','text':'Desktop '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - def update_table_status(self,user,table,data,remote_addr): item = table[:-1].capitalize() + with app.app_context(): + dom = r.table('domains').get(data['pk']).pluck('status','name').run(db.conn) try: if data['name']=='status': if data['value']=='DownloadAborting': - if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): - return json.dumps({'title':item+' aborting success','text':item+' '+data['pk']+' will be aborted','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + if dom['status'] in ['Downloading']: + if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): + return json.dumps({'title':item+' aborting success','text':item+' '+dom['name']+' will be aborted','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + else: + return json.dumps({'title':item+' aborting error','text':item+' '+dom['name']+' can\'t be aborted. Something went wrong!','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} + else: + return json.dumps({'title':item+' aborting error','text':item+' '+dom['name']+' can\'t be aborted while not Downloading','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} if data['value']=='Stopping': - if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): - return json.dumps({'title':item+' stopping success','text':item+' '+data['pk']+' will be stopped','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + if dom['status'] in ['Started']: + if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): + return json.dumps({'title':False,'text':item+' '+dom['name']+' will be stopped','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + else: + return json.dumps({'title':item+' stopping error','text':item+' '+dom['name']+' can\'t be stopped. Something went wrong!','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} else: - return json.dumps({'title':item+' stopping error','text':item+' '+data['pk']+' can\'t be stopped now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} + return json.dumps({'title':item+' stopping error','text':item+' '+dom['name']+' can\'t be stopped while not Started','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} if data['value']=='Deleting': - if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): - return json.dumps({'title':item+' deleting success','text':item+' '+data['pk']+' will be deleted','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + if dom['status'] in ['Stopped','Failed','DownloadFailed']: + if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): + return json.dumps({'title':item+' deleting success','text':item+' '+dom['name']+' will be deleted','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + else: + return json.dumps({'title':item+' deleting error','text':item+' '+dom['name']+' can\'t be deleted. Something went wrong!','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} else: - return json.dumps({'title':item+' deleting error','text':item+' '+data['pk']+' can\'t be deleted now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} + return json.dumps({'title':item+' deleting error','text':item+' '+dom['name']+' can\'t be deleted while not Stopped or Failed','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} if data['value']=='Starting': - if float(app.isardapi.get_user_quotas(current_user.username)['rqp']) >= 100: - return json.dumps({'title':'Quota exceeded','text':item+' '+data['pk']+' can\'t be started because you have exceeded quota','icon':'warning','type':'warning'}), 500, {'ContentType':'application/json'} - self.auto_interface_set(user,data['pk'],remote_addr) - if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): - return json.dumps({'title':item+' starting success','text':item+' '+data['pk']+' will be started','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + if dom['status'] in ['Stopped','Failed']: + if float(app.isardapi.get_user_quotas(current_user.username)['rqp']) >= 100: + return json.dumps({'title':'Quota exceeded','text':item+' '+dom['name']+' can\'t be started because you have exceeded quota','icon':'warning','type':'warning'}), 500, {'ContentType':'application/json'} + self.auto_interface_set(user,data['pk'],remote_addr) + if app.isardapi.update_table_value(table, data['pk'], data['name'], data['value']): + return json.dumps({'title':False,'text':item+' '+dom['name']+' will be started','icon':'success','type':'info'}), 200, {'ContentType':'application/json'} + else: + return json.dumps({'title':item+' starting error','text':item+' '+dom['name']+' can\'t be started. Something went wrong!','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} else: - return json.dumps({'title':item+' starting error','text':item+' '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - return json.dumps({'title':'Method not allowd','text':item+' '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} + return json.dumps({'title':item+' starting error','text':item+' '+dom['name']+' can\'t be started while not Stopped or Failed','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} + return json.dumps({'title':'Method not allowed','text':'That action is not allowed!','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} except Exception as e: - log.error('Error updating status for '+data['pk']+': '+str(e)) - return json.dumps({'title':item+' starting error','text':item+' '+data['pk']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} - + log.error('Error updating status for '+dom['name']+': '+str(e)) + return json.dumps({'title':item+' starting error','text':item+' '+dom['name']+' can\'t be started now','icon':'warning','type':'error'}), 500, {'ContentType':'application/json'} def auto_interface_set(self,user,id, remote_addr): with app.app_context(): @@ -170,7 +161,6 @@ def add_listOfDicts2table(self,list,table): return False def show_disposable(self,client_ip): - # ~ return False disposables_config=self.config['disposable_desktops'] if disposables_config['active']: with app.app_context(): @@ -183,8 +173,7 @@ def show_disposable(self,client_ip): ''' MEDIA ''' - def get_user_media(self, user): #, filterdict=False): - #~ if not filterdict: filterdict={'kind': 'desktop'} + def get_user_media(self, user): with app.app_context(): media=list(r.table('media').get_all(user, index='user').run(db.conn)) return media @@ -193,25 +182,9 @@ def get_media_installs(self): with app.app_context(): data=r.table('virt_install').run(db.conn) return self.f.table_values_bstrap(data) - #~ if pluck and not id: - #~ if order: - #~ data=r.table(table).order_by(order).pluck(pluck).run(db.conn) - #~ return self.f.table_values_bstrap(data) if flatten else list(data) - #~ else: - #~ data=r.table(table).pluck(pluck).run(db.conn) - #~ return self.f.table_values_bstrap(data) if flatten else list(data) - #~ if pluck and id: - #~ data=r.table(table).get(id).pluck(pluck).run(db.conn) - #~ return self.f.flatten_dict(data) if flatten else data - #~ if order: - #~ data=r.table(table).order_by(order).run(db.conn) - #~ return self.f.table_values_bstrap(data) if flatten else list(data) - #~ else: - #~ data=r.table(table).run(db.conn) - #~ return self.f.table_values_bstrap(data) if flatten else list(data) - - -#~ STATUS + ''' + STATUS + ''' def get_domain_last_messages(self, id): with app.app_context(): return r.table('domains_status').get_all(id, index='name').order_by(r.desc('when')).pluck('when',{'status':['state','state_reason']}).limit(10).run(db.conn) @@ -220,7 +193,9 @@ def get_domain_last_events(self, id): with app.app_context(): return r.table('hypervisors_events').get_all(id, index='domain').order_by(r.desc('when')).limit(10).run(db.conn) - + ''' + USER + ''' def get_user(self, user): with app.app_context(): user=self.f.flatten_dict(r.table('users').get(user).run(db.conn)) @@ -230,7 +205,6 @@ def get_user(self, user): def get_user_domains(self, user, filterdict=False): if not filterdict: filterdict={'kind': 'desktop'} with app.app_context(): - # ~ domains=self.f.table_values_bstrap(r.table('domains').get_all(user, index='user').filter(filterdict).without('xml').run(db.conn)) domains=list(r.table('domains').get_all(user, index='user').filter(filterdict).without('xml','history_domain','allowed').run(db.conn)) return domains @@ -246,8 +220,6 @@ def get_category_domains(self, user, filterdict=False): domains=self.f.table_values_bstrap(r.table('domains').get_all(category, index='category').filter(filterdict).without('xml').run(db.conn)) return domains - - def get_group_users(self, group,pluck=''): with app.app_context(): users=list(r.table('users').get_all(group, index='group').order_by('username').pluck(pluck).run(db.conn)) @@ -256,7 +228,6 @@ def get_group_users(self, group,pluck=''): def get_domain(self, id, human_size=False, flatten=True): #~ Should verify something??? with app.app_context(): - domain = r.table('domains').get(id).without('xml','history_domain','progress').run(db.conn) try: if flatten: @@ -265,17 +236,14 @@ def get_domain(self, id, human_size=False, flatten=True): domain['hardware-memory']=self.human_size(domain['hardware-memory'] * 1000) if 'disks_info' in domain: for i,dict in enumerate(domain['disks_info']): - #~ print(dict) for key in dict.keys(): if 'size' in key: domain['disks_info'][i][key]=self.human_size(domain['disks_info'][i][key]) else: - # This is not used and will do nothing as we should implement a recursive function to look for all the nested 'size' fields + ''' This is not used and will do nothing as we should implement a recursive function to look for all the nested 'size' fields ''' if human_size: domain['hardware']['memory']=self.human_size(domain['hardware']['memory'] * 1000) if 'disks_info' in domain: - #~ import pprint - #~ pprint.pprint(domain['disks_info']) for i,dict in enumerate(domain['disks_info']): for key in dict.keys(): if 'size' in key: @@ -284,8 +252,6 @@ def get_domain(self, id, human_size=False, flatten=True): exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] log.error(exc_type, fname, exc_tb.tb_lineno) - log.error('DomainsStatusThread error:'+str(e)) - log.error('get_domain: '+str(e)) return domain def get_domain_media(self,id): @@ -298,7 +264,7 @@ def get_domain_media(self,id): iso=r.table('media').get(m['id']).pluck('id','name').run(db.conn) media['isos'].append(iso) except: - # Media does not exist + ''' Media does not exist ''' None if 'floppies' in domain_cd and domain_cd['floppies'] is not []: for m in domain_cd['floppies']: @@ -306,12 +272,12 @@ def get_domain_media(self,id): fd=r.table('media').get(m['id']).pluck('id','name').run(db.conn) media['floppies'].append(fd) except: - # media does not exist + ''' Media does not exist ''' None return media def user_hardware_quota(self, user, human_size=False, flatten=True): - #~ Should verify something??? + ''' Should verify something??? ''' with app.app_context(): domain = r.table('users').get(user).run(db.conn) try: @@ -324,7 +290,7 @@ def user_hardware_quota(self, user, human_size=False, flatten=True): if 'size' in key: domain['disks_info'][i][key]=self.human_size(domain['disks_info'][i][key]) else: - # This is not used and will do nothing as we should implement a recursive function to look for all the nested 'size' fields + ''' This is not used and will do nothing as we should implement a recursive function to look for all the nested 'size' fields ''' if human_size: domain['hardware']['memory']=self.human_size(domain['hardware']['memory'] * 1000) for i,dict in enumerate(domain['disks_info']): @@ -332,9 +298,9 @@ def user_hardware_quota(self, user, human_size=False, flatten=True): if 'size' in key: domain['disks_info'][i][key]=self.human_size(domain['disks_info'][i][key]) except Exception as e: - log.error('get_domain: '+str(e)) - #~ import pprint - #~ pprint.pprint(domain) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + log.error(exc_type, fname, exc_tb.tb_lineno) return domain def get_backing_ids(self,id): @@ -347,8 +313,9 @@ def get_backing_ids(self,id): try: idchain.append(list(r.table("domains").filter(lambda disks: disks['hardware']['disks'][0]['file']==f).pluck('id','name').run(db.conn))[0]) except Exception as e: - log.error('get_backing_ids:'+str(e)) - #~ print(e) + exc_type, exc_obj, exc_tb = sys.exc_info() + fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] + log.error(exc_type, fname, exc_tb.tb_lineno) break return idchain diff --git a/src/webapp/lib/isardScheduler.py b/src/webapp/lib/isardScheduler.py index d74624f59..6781097bc 100644 --- a/src/webapp/lib/isardScheduler.py +++ b/src/webapp/lib/isardScheduler.py @@ -8,7 +8,7 @@ from decimal import Decimal import random, queue from threading import Thread -import time +import time, pytz from webapp import app import rethinkdb as r #~ from flask import current_app @@ -43,7 +43,7 @@ def __init__(self): # ~ port=app.config['RETHINKDB_PORT'], # ~ auth_key=app.config['RETHINKDB_AUTH']) #>>>>>>> fe171dc30ddd8a2dabafa7b2085cbb60e6432c35 - self.scheduler = BackgroundScheduler() + self.scheduler = BackgroundScheduler(timezone=pytz.timezone('UTC')) self.scheduler.add_jobstore('rethinkdb',self.rStore, database='isard', table='scheduler_jobs',host=app.config['RETHINKDB_HOST'], port=app.config['RETHINKDB_PORT'], auth_key=app.config['RETHINKDB_AUTH']) diff --git a/src/webapp/lib/isardSocketio.py b/src/webapp/lib/isardSocketio.py index 9fa650685..7b0b508cc 100644 --- a/src/webapp/lib/isardSocketio.py +++ b/src/webapp/lib/isardSocketio.py @@ -186,8 +186,8 @@ def __init__(self): def run(self): with app.app_context(): - for c in r.table('domains').get_all(r.args(['Downloading','Downloaded']),index='status').pluck('id','name','description','icon','progress','status','user').merge({'table':'domains'}).changes(include_initial=False).union( - r.table('media').get_all(r.args(['DownloadStarting','Downloading','Downloaded']),index='status').merge({'table':'media'}).changes(include_initial=False)).run(db.conn): + for c in r.table('domains').get_all(r.args(['Downloaded', 'DownloadFailed','DownloadStarting', 'Downloading', 'DownloadAborting','ResetDownloading']),index='status').pluck('id','name','description','icon','progress','status','user').merge({'table':'domains'}).changes(include_initial=False).union( + r.table('media').get_all(r.args(['Deleting', 'Deleted', 'Downloaded', 'DownloadFailed', 'DownloadStarting', 'Downloading', 'Download', 'DownloadAborting','ResetDownloading']),index='status').merge({'table':'media'}).changes(include_initial=False)).run(db.conn): if self.stop==True: break try: if c['new_val'] is None: @@ -365,7 +365,7 @@ def __init__(self): def run(self): with app.app_context(): for c in r.table('backups').merge({'table':'backups'}).changes(include_initial=False).union( - r.table('scheduler_jobs').without('job_state').merge({'table':'scheduler_jobs'}).changes(include_initial=False)).union( + r.table('scheduler_jobs').has_fields('name').without('job_state').merge({'table':'scheduler_jobs'}).changes(include_initial=False)).union( r.table('disposables').merge({'table':'disposables'}).changes(include_initial=False)).run(db.conn): if self.stop==True: break try: @@ -544,6 +544,27 @@ def socketio_hyper_domains_stop(data): info, namespace='/sio_admins', room='hyper') + +@socketio.on('hyperpool_edit', namespace='/sio_admins') +def socketio_hyperpool_edit(form_data): + if current_user.role == 'admin': + data=app.isardapi.f.unflatten_dict(form_data) + res=app.adminapi.update_table_dict('hypervisors_pools','default',{'viewer':{'domain':data['viewer']['domain']}}) + + if res is True: + info=json.dumps({'result':True,'title':'Edit hypervisor pool','text':'Hypervisor pool '+'default'+' has been edited.','icon':'success','type':'success'}) + else: + info=json.dumps({'result':False,'title':'Edit hypervisor pool','text':'Hypervisor pool'+'default'+' can\'t be edited now.','icon':'warning','type':'error'}) + socketio.emit('add_form_result', + info, + namespace='/sio_admins', + room='hyper') + else: + info=json.dumps({'result':False,'title':'Hypervisor pool edit error','text':'Hypervisor pool should have at least one capability!','icon':'warning','type':'error'}) + socketio.emit('result', + info, + namespace='/sio_admins', + room='hyper') ''' USERS @@ -566,18 +587,29 @@ def socketio_user_add(form_data): @socketio.on('user_edit', namespace='/sio_admins') def socketio_user_edit(form_data): if current_user.role == 'admin': - # ~ create_dict=app.isardapi.f.unflatten_dict(form_data) - # ~ print(create_dict) res=app.adminapi.user_edit(form_data) if res is True: - data=json.dumps({'result':True,'title':'New user','text':'User '+form_data['name']+' has been created...','icon':'success','type':'success'}) + data=json.dumps({'result':True,'title':'User edit','text':'User '+form_data['name']+' has been updated...','icon':'success','type':'success'}) else: - data=json.dumps({'result':False,'title':'New user','text':'User '+form_data['name']+' can\'t be created. Maybe it already exists!','icon':'warning','type':'error'}) + data=json.dumps({'result':False,'title':'User edit','text':'User '+form_data['name']+' can\'t be updated!','icon':'warning','type':'error'}) socketio.emit('add_form_result', data, namespace='/sio_admins', room='users') +@socketio.on('user_passwd', namespace='/sio_admins') +def socketio_user_passwd(form_data): + if current_user.role == 'admin': + res=app.adminapi.user_passwd(form_data) + if res is True: + data=json.dumps({'result':True,'title':'User edit','text':'User '+form_data['name']+' has been updated...','icon':'success','type':'success'}) + else: + data=json.dumps({'result':False,'title':'User edit','text':'User '+form_data['name']+' can\'t be updated!','icon':'warning','type':'error'}) + socketio.emit('add_form_result', + data, + namespace='/sio_admins', + room='users') + @socketio.on('user_delete', namespace='/sio_admins') def socketio_user_delete(form_data): if current_user.role == 'admin': @@ -745,6 +777,14 @@ def socketio_domains_update(data): namespace='/sio_users', room='user_'+current_user.username) +@socketio.on('domain_update', namespace='/sio_admins') +def socketio_admin_domains_update(data): + remote_addr=request.headers['X-Forwarded-For'].split(',')[0] if 'X-Forwarded-For' in request.headers else request.remote_addr.split(',')[0] + socketio.emit('result', + app.isardapi.update_table_status(current_user.username, 'domains', data,remote_addr), + namespace='/sio_admins', + room='domains') + @socketio.on('domain_edit', namespace='/sio_admins') def socketio_admins_domain_edit(form_data): #~ Check if user has quota and rights to do it diff --git a/src/webapp/lib/isardUpdates.py b/src/webapp/lib/isardUpdates.py index 31ddebbdf..2db1fa997 100644 --- a/src/webapp/lib/isardUpdates.py +++ b/src/webapp/lib/isardUpdates.py @@ -9,7 +9,10 @@ class Updates(object): def __init__(self): - self.working=True + # ~ self.working=True + self.reload_updates() + + def reload_updates(self): self.updateFromConfig() self.updateFromWeb() @@ -17,10 +20,17 @@ def updateFromWeb(self): self.web={} self.kinds=['media','domains','builders','virt_install','virt_builder','videos','viewers'] + failed=0 for k in self.kinds: self.web[k]=self.getKind(kind=k) - if self.web[k] is False: - self.working=False + if self.web[k]==500: + # The id is no longer in updates server. + # We better reset it + with app.app_context(): + r.table('config').get(1).update({'resources':{'code':False}}).run(db.conn) + self.code=False + # ~ if len(self.kinds)==failed: + # ~ self.working=False def updateFromConfig(self): with app.app_context(): @@ -39,12 +49,13 @@ def is_conected(self): def is_registered(self): if self.is_conected(): - if self.working: - return self.code - else: + return self.code + # ~ if self.working: + # ~ return self.code + # ~ else: # we have an invalid code. (changes in web database?) - with app.app_context(): - r.table('config').get(1).update({'resources':{'code':False}}).run(db.conn) + # ~ with app.app_context(): + # ~ r.table('config').get(1).update({'resources':{'code':False}}).run(db.conn) return False def register(self): @@ -53,6 +64,7 @@ def register(self): if req.status_code==200: with app.app_context(): r.table('config').get(1).update({'resources':{'code':req.json()}}).run(db.conn) + self.code=req.json() self.updateFromConfig() self.updateFromWeb() return True @@ -66,7 +78,8 @@ def getNewKind(self,kind,username): if kind == 'viewers': return self.web[kind] web=self.web[kind] - dbb=list(r.table(kind).run(db.conn)) + with app.app_context(): + dbb=list(r.table(kind).run(db.conn)) result=[] for w in web: dict={} @@ -109,12 +122,14 @@ def getNewKindId(self,kind,username,id): w=web[0].copy() if kind == 'domains' or kind == 'media': - dbb=r.table(kind).get('_'+username+'_'+w['id']).run(db.conn) + with app.app_context(): + dbb=r.table(kind).get('_'+username+'_'+w['id']).run(db.conn) if dbb is None: w['id']='_'+username+'_'+w['id'] return w else: - dbb=r.table(kind).get(w['id']).run(db.conn) + with app.app_context(): + dbb=r.table(kind).get(w['id']).run(db.conn) if dbb is None: return w return False @@ -122,10 +137,12 @@ def getNewKindId(self,kind,username,id): def getKind(self,kind='builders'): try: - req= requests.post(self.url+'/get/'+kind+'/list', headers={'Authorization':str(self.code)},allow_redirects=False, verify=False, timeout=3) + req = requests.post(self.url+'/get/'+kind+'/list', headers={'Authorization':str(self.code)},allow_redirects=False, verify=False, timeout=3) if req.status_code==200: return req.json() #~ return True + elif req.status_code==500: + return 500 else: print('Error response code: '+str(req.status_code)+'\nDetail: '+req.json()) except Exception as e: @@ -191,7 +208,8 @@ def get_missing_resources(self,domain,username): missing_resources={'videos':[]} dom_videos=domain['create_dict']['hardware']['videos'] - sys_videos=list(r.table('videos').pluck('id').run(db.conn)) + with app.app_context(): + sys_videos=list(r.table('videos').pluck('id').run(db.conn)) sys_videos=[sv['id'] for sv in sys_videos] for v in dom_videos: if v not in sys_videos: diff --git a/src/webapp/static/admin/js/config.js b/src/webapp/static/admin/js/config.js index 14d0c6e17..c8e2da1fa 100644 --- a/src/webapp/static/admin/js/config.js +++ b/src/webapp/static/admin/js/config.js @@ -7,7 +7,6 @@ $(document).ready(function() { - api.ajax('/admin/config/','POST',{}).done(function(data) { $.each( data, function( key, value ) { if(typeof(value) === "boolean"){ @@ -19,7 +18,9 @@ $(document).ready(function() { }); }); - show_disposables() + + //~ Not using it now + //~ show_disposables() $('.btn-edit').on( 'click', function () { basekey=$(this).attr('data-panel') @@ -36,9 +37,10 @@ $(document).ready(function() { }); $('.footer-'+basekey).css('display','block'); $('[id^="btn-'+basekey+'-"]').show(); - if(basekey=='disposable_desktops'){ - activateDisposables(); - } + //~ Not using it now + //~ if(basekey=='disposable_desktops'){ + //~ activateDisposables(); + //~ } }); @@ -58,19 +60,6 @@ $(document).ready(function() { $('.footer-'+basekey).css('display','none'); $('[id^="btn-'+basekey+'-"]').hide(); }); - - //~ $('#btn-checkport').on( 'click', function (event) { - //~ event.preventDefault() - //~ api.ajax('/admin/config/checkport','POST',{'pk':data['id'],'server':$('#engine-grafana-server').value,'port':$('#engine-grafana-web_port').value}).done(function(data) { - //~ console.log(data); - //~ }); - //~ }); - - //~ function checkPort(){ - //~ api.ajax('/admin/config/checkport','POST',{'pk':data['id'],'server':$('#engine-grafana-server').value,'port':$('#engine-grafana-web_port').value}).done(function(data) { - //~ console.log(data); - //~ }); - //~ } $('.btn-scheduler').on( 'click', function () { $('#modalScheduler').modal({ @@ -85,30 +74,16 @@ $(document).ready(function() { socket.emit('scheduler_add',data) $("#modalAddScheduler")[0].reset(); $("#modalAdd").modal('hide'); - //~ form.parsley().validate(); - - //~ if (form.parsley().isValid()){ - //~ template=$('#modalAddDesktop #template').val(); - //~ console.log('TEMPLATE:'+template) - //~ if (template !=''){ - //~ data=$('#modalAdd').serializeObject(); - //~ console.log(data) - //~ socket.emit('domain_add',data) - //~ }else{ - //~ $('#modal_add_desktops').closest('.x_panel').addClass('datatables-error'); - //~ $('#modalAddDesktop #datatables-error-status').html('No template selected').addClass('my-error'); - //~ } - //~ } }); - - $('.btn-add-disposables').on( 'click', function () { - $('#modalDisposable').modal({ - backdrop: 'static', - keyboard: false - }).modal('show'); - setTemplates() - }); + //~ Not using it now + //~ $('.btn-add-disposables').on( 'click', function () { + //~ $('#modalDisposable').modal({ + //~ backdrop: 'static', + //~ keyboard: false + //~ }).modal('show'); + //~ setTemplates() + //~ }); $('.btn-backup').on( 'click', function () { new PNotify({ @@ -119,7 +94,7 @@ $(document).ready(function() { confirm: {confirm: true}, buttons: {closer: false,sticker: false}, history: {history: false}, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/backup','POST',{}).done(function(data) { }); @@ -185,7 +160,7 @@ $(document).ready(function() { confirm: {confirm: true}, buttons: {closer: false,sticker: false}, history: {history: false}, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/backup_remove','POST',{'pk':data['id'],}).done(function(data) { }); @@ -201,7 +176,7 @@ $(document).ready(function() { confirm: {confirm: true}, buttons: {closer: false,sticker: false}, history: {history: false}, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/restore','POST',{'pk':data['id'],}).done(function(data) { }); @@ -223,221 +198,130 @@ $(document).ready(function() { $("#backup-tables").append(''); $.each(data.data,function(key, value) { - $("#backup-tables").append(''); + if(value>0){ + $("#backup-tables").append(''); + } }); $('#backup-id').val(data['id']) $('#modalBackupInfo').modal({ backdrop: 'static', keyboard: false }).modal('show'); - - //~ new PNotify({ - //~ title: 'Delete backup', - //~ text: objToString(data.db), - //~ hide: false, - //~ opacity: 1, - //~ confirm: {confirm: true}, - //~ buttons: {closer: false,sticker: false}, - //~ history: {history: false}, - //~ stack: stack_center - //~ }).get().on('pnotify.confirm', function() { - //~ }).on('pnotify.cancel', function() { - //~ }); - }); + }); } }); - //~ backup_table_detail='' $('#backup-tables').on('change', function (e) { - //~ var optionSelected = $("option:selected", this); - //~ console.log(optionSelected) - var valueSelected = this.value; - //~ console.log(valueSelected+' '+$('#backup-id').val()) - api.ajax('/admin/backup_detailinfo','POST',{'pk':$('#backup-id').val(),'table':valueSelected}).done(function(data) { - //~ console.log($('#backup-id').val()) - //~ console.log(data) - //~ columns=[]; - //~ $.each(data[0],function(key, value) - //~ { - //~ if(key == 'id'){ - //~ columns.push({'data':key}) - //~ } - //~ }); - //~ backup_table_detail.destroy() - if ( $.fn.dataTable.isDataTable( '#backup-table-detail' ) ) { - backup_table_detail.clear().rows.add(data).draw() - }else{ - - backup_table_detail=$('#backup-table-detail').DataTable( { - data: data, - rowId: 'id', - //~ language: { - //~ "loadingRecords": 'Loading...' - //~ }, - columns: [ - { "data": "id", "width": "88px"}, - { "data": "description", "width": "88px"}, - { - "className": 'actions-control', - "orderable": false, - "data": null, - "width": "88px", - "defaultContent": '' - }, - ], - "order": [[0, 'asc']], - "columnDefs": [ { - "targets": 2, - "render": function ( data, type, full, meta ) { - if(full.new_backup_data){ - return ''; - }else{ - return '' - } - }}] - } ); - } - $('.btn-individual-restore').on('click', function (e){ - data=backup_table_detail.row( $(this).parents('tr') ).data(); - table=$('#backup-tables').val() - //~ table=$('#backup-id').val() - new PNotify({ - title: 'Restore data', - text: "Do you really want to restore row "+data.id+" to table "+table+"?", - hide: false, - opacity: 0.9, - confirm: {confirm: true}, - buttons: {closer: false,sticker: false}, - history: {history: false}, - stack: stack_center - }).get().on('pnotify.confirm', function() { - api.ajax('/admin/restore/'+table,'POST',{'data':data,}).done(function(data1) { - api.ajax('/admin/backup_detailinfo','POST',{'pk':$('#backup-id').val(),'table':table}).done(function(data2) { - data['new_backup_data']=false - dtUpdateInsert(backup_table_detail,data,false); - //~ setDomainDetailButtonsStatus(data.id, data.status); - //~ backup_table_detail.clear().rows.add(data2).draw() - }); - }); - }).on('pnotify.cancel', function() { - }); - }); - - $('.btn-bulk-restore').on('click', function(e) { - names='' - ids=[] - if(backup_table_detail.rows('.active').data().length){ - $.each(backup_table_detail.rows('.active').data(),function(key, value){ - names+=value['name']+'\n'; - ids.push(value['id']); - }); - var text = "You are about to restore these desktops:\n\n "+names - }else{ - $.each(backup_table_detail.rows({filter: 'applied'}).data(),function(key, value){ - ids.push(value['id']); - }); - var text = "You are about to restore "+backup_table_detail.rows({filter: 'applied'}).data().length+". All the desktops in list!" - } - new PNotify({ - title: 'Warning!', - text: text, - hide: false, - opacity: 0.9, - confirm: { - confirm: true - }, - buttons: { - closer: false, - sticker: false - }, - history: { - history: false - }, - stack: stack_center - }).get().on('pnotify.confirm', function() { - //~ api.ajax('/admin/mdomains','POST',{'ids':ids,'action':action}).done(function(data) { - //~ $('#mactions option[value="none"]').prop("selected",true); - //~ }); - }).on('pnotify.cancel', function() { - //~ $('#mactions option[value="none"]').prop("selected",true); - }); - - }); - - - }); - }); - - - - //~ // Stream backups_source - //~ if (!!window.EventSource) { - //~ var backups_source = new EventSource('/admin/stream/backups'); - //~ console.log('Listening backups...'); - //~ } else { - // Result to xhr polling :( - //~ } - - //~ window.onbeforeunload = function(){ - //~ backups_source.close(); - //~ }; - - //~ backups_source.addEventListener('open', function(e) { - //~ // Connection was opened. - //~ }, false); - - //~ backups_source.addEventListener('error', function(e) { - //~ if (e.readyState == EventSource.CLOSED) { - //~ // Connection was closed. - //~ } - - //~ }, false); - - //~ backups_source.addEventListener('New', function(e) { - //~ var data = JSON.parse(e.data); - //~ if($("#" + data.id).length == 0) { - //~ //it doesn't exist - //~ backups_table.row.add(data).draw(); - //~ new PNotify({ - //~ title: "Backup added", - //~ text: "Backups "+data.filename+" has been created", - //~ hide: true, - //~ delay: 2000, - //~ icon: 'fa fa-success', - //~ opacity: 1, - //~ type: 'success' - //~ }); - //~ }else{ - //~ //if already exists do an update (ie. connection lost and reconnect) - //~ var row = table.row('#'+data.id); - //~ backups_table.row(row).data(data); - //~ } - //~ }, false); - - //~ backups_source.addEventListener('Status', function(e) { - //~ var data = JSON.parse(e.data); - //~ var row = backups_table.row('#'+data.id); - //~ backups_table.row(row).data(data); - //~ }, false); - - //~ backups_source.addEventListener('Deleted', function(e) { - //~ var data = JSON.parse(e.data); - //~ var row = backups_table.row('#'+data.id).remove().draw(); - //~ new PNotify({ - //~ title: "Backup deleted", - //~ text: "Backup "+data.name+" has been deleted", - //~ hide: true, - //~ delay: 2000, - //~ icon: 'fa fa-success', - //~ opacity: 1, - //~ type: 'info' - //~ }); - //~ }, false); - - + var valueSelected = this.value; + api.ajax('/admin/backup_detailinfo','POST',{'pk':$('#backup-id').val(),'table':valueSelected}).done(function(data) { + if ( $.fn.dataTable.isDataTable( '#backup-table-detail' ) ) { + backup_table_detail.clear().rows.add(data).draw() + }else{ + + backup_table_detail=$('#backup-table-detail').DataTable( { + data: data, + rowId: 'id', + //~ language: { + //~ "loadingRecords": 'Loading...' + //~ }, + columns: [ + { "data": "id", "width": "88px"}, + { "data": "description", "width": "88px"}, + { + "className": 'actions-control', + "orderable": false, + "data": null, + "width": "88px", + "defaultContent": '' + }, + ], + "order": [[0, 'asc']], + "columnDefs": [ { + "targets": 2, + "render": function ( data, type, full, meta ) { + if(full.new_backup_data){ + return ''; + }else{ + return '' + } + }}] + } ); + } + $('.btn-individual-restore').on('click', function (e){ + data=backup_table_detail.row( $(this).parents('tr') ).data(); + table=$('#backup-tables').val() + new PNotify({ + title: 'Restore data', + text: "Do you really want to restore row "+data.id+" to table "+table+"?", + hide: false, + opacity: 0.9, + confirm: {confirm: true}, + buttons: {closer: false,sticker: false}, + history: {history: false}, + addclass: 'pnotify-center' + }).get().on('pnotify.confirm', function() { + api.ajax('/admin/restore/'+table,'POST',{'data':data,}).done(function(data1) { + api.ajax('/admin/backup_detailinfo','POST',{'pk':$('#backup-id').val(),'table':table}).done(function(data2) { + data['new_backup_data']=false + dtUpdateInsert(backup_table_detail,data,false); + }); + }); + }).on('pnotify.cancel', function() { + }); + }); + + // Api call to /admin/restore does not send correct data. + // New function should be done at AdminViews.py + //~ $('.btn-bulk-restore').on('click', function(e) { + //~ names='' + //~ ids=[] + //~ if(backup_table_detail.rows('.active').data().length){ + //~ $.each(backup_table_detail.rows('.active').data(),function(key, value){ + //~ names+=value['name']+'\n'; + //~ ids.push(value['id']); + //~ }); + //~ var text = "You are about to restore these desktops:\n\n "+names + //~ }else{ + //~ $.each(backup_table_detail.rows({filter: 'applied'}).data(),function(key, value){ + //~ ids.push(value['id']); + //~ }); + //~ var text = "You are about to restore "+backup_table_detail.rows({filter: 'applied'}).data().length+". All the desktops in list!" + //~ } + //~ table=$('#backup-tables').val() + //~ new PNotify({ + //~ title: 'Warning!', + //~ text: text, + //~ hide: false, + //~ opacity: 0.9, + //~ confirm: { + //~ confirm: true + //~ }, + //~ buttons: { + //~ closer: false, + //~ sticker: false + //~ }, + //~ history: { + //~ history: false + //~ }, + //~ addclass: 'pnotify-center' + //~ }).get().on('pnotify.confirm', function() { + //~ api.ajax('/admin/restore/'+table,'POST',{'data':data,}).done(function(data1) { + //~ api.ajax('/admin/backup_detailinfo','POST',{'pk':$('#backup-id').val(),'table':table}).done(function(data2) { + //~ data['new_backup_data']=false + //~ dtUpdateInsert(backup_table_detail,data,false); + //~ }); + //~ }); + //~ }).on('pnotify.cancel', function() { + //~ }); + + //~ }); + + + }); + }); scheduler_table=$('#table-scheduler').DataTable({ "ajax": { @@ -482,7 +366,7 @@ $(document).ready(function() { confirm: {confirm: true}, buttons: {closer: false,sticker: false}, history: {history: false}, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/delete','POST',{'pk':data['id'],'table':'scheduler_jobs'}).done(function(data) { }); @@ -517,15 +401,6 @@ $(document).ready(function() { console.log('backup data received') var data = JSON.parse(data); dtUpdateInsert(backups_table,data,false); - //~ if($("#" + data.id).length == 0) { - //~ //it doesn't exist - //~ backups_table.row.add(data).draw(); - //~ }else{ - //~ //if already exists do an update (ie. connection lost and reconnect) - //~ var row = backups_table.row('#'+data.id); - //~ backups_table.row(row).data(data).invalidate(); - //~ } - //~ backups_table.draw(false); }); socket.on('backups_deleted', function(data){ @@ -618,230 +493,124 @@ $(document).ready(function() { }); }); - //~ // Stream scheduler_source - //~ if (!!window.EventSource) { - //~ var scheduler_source = new EventSource('/admin/stream/scheduler_jobs'); - //~ console.log('Listening scheduler...'); - //~ } else { - // Result to xhr polling :( - //~ } - - //~ window.onbeforeunload = function(){ - //~ scheduler_source.close(); - //~ }; - - //~ scheduler_source.addEventListener('open', function(e) { - //~ // Connection was opened. - //~ }, false); - - //~ scheduler_source.addEventListener('error', function(e) { - //~ if (e.readyState == EventSource.CLOSED) { - //~ // Connection was closed. - //~ } - - //~ }, false); - - //~ scheduler_source.addEventListener('New', function(e) { - //~ var data = JSON.parse(e.data); - //~ if($("#" + data.id).length == 0) { - //~ //it doesn't exist - //~ scheduler_table.row.add(data).draw(); - //~ new PNotify({ - //~ title: "Scheduler added", - //~ text: "Scheduler "+data.name+" has been created", - //~ hide: true, - //~ delay: 2000, - //~ icon: 'fa fa-success', - //~ opacity: 1, - //~ type: 'success' - //~ }); - //~ }else{ - //~ //if already exists do an update (ie. connection lost and reconnect) - //~ var row = table.row('#'+data.id); - //~ scheduler_table.row(row).data(data); - //~ } - //~ }, false); - - //~ scheduler_source.addEventListener('Deleted', function(e) { - //~ var data = JSON.parse(e.data); - //~ var row = scheduler_table.row('#'+data.id).remove().draw(); - //~ new PNotify({ - //~ title: "Scheduler deleted", - //~ text: "Scheduler "+data.name+" has been deleted", - //~ hide: true, - //~ delay: 2000, - //~ icon: 'fa fa-success', - //~ opacity: 1, - //~ type: 'info' - //~ }); - //~ }, false); - }); -function show_disposables(){ - //~ api.ajax('/admin/table/disposables/get','GET',{}).done(function(data) { - //~ $.each( data, function( key, value ) { - //~ disposables=''; - //~ nets=''; - //~ $.each( value['disposables'], function( k, v ) { - //~ disposables=disposables+', '+v['name']; - //~ }); - //~ $.each( value['nets'], function( k, v ) { - //~ nets=nets+', '+v; - //~ }); - //~ $("#table-disposables").append(''+value['name']+''+nets+''+disposables+''); - //~ }); - //~ }); - - disposables_table=$('#table-disposables').DataTable({ - "ajax": { - "url": "/admin/table/disposables/get", - "dataSrc": "" - }, - "language": { - "loadingRecords": 'Loading...' - }, - "bLengthChange": false, - "bFilter": false, - "rowId": "id", - "deferRender": true, - "columns": [ - { "data": "name"}, - { "data": "nets[, ]"}, - { "data": "disposables"}, - { - "className": 'actions-control', - "orderable": false, - "data": null, - "width": "58px", - "defaultContent": '' - //~ ' - }, - ], - "order": [[1, 'asc']], - "columnDefs": [ { - "targets": 2, - "render": function ( data, type, full, meta ) { - return renderDisposables(full); - }}] - } ); - - $('#table-disposables').find(' tbody').on( 'click', 'button', function () { - var data = disposables_table.row( $(this).parents('tr') ).data(); - if($(this).attr('id')=='btn-disposable_desktops-delete'){ - new PNotify({ - title: 'Delete disposable', - text: "Do you really want to delete disposable "+ data.name+"?", - hide: false, - opacity: 0.9, - confirm: {confirm: true}, - buttons: {closer: false,sticker: false}, - history: {history: false}, - stack: stack_center - }).get().on('pnotify.confirm', function() { - api.ajax('/admin/delete','POST',{'pk':data['id'],'table':'disposables'}).done(function(data) { - }); - }).on('pnotify.cancel', function() { - }); - } - if($(this).attr('id')=='btn-disposable_desktops-edit'){ - $('#modalDisposable').modal({ - backdrop: 'static', - keyboard: false - }).modal('show'); - //~ $("#select2-disposables").select2Sortable(); - } - }); - -} - -function renderDisposables(data){ - var return_data = new Array(); - for(var i=0;i< data['disposables'].length; i++){ - return_data.push(data['disposables'][i].name) - } - return return_data; -} - -function activateDisposables(){ - -} +//~ function show_disposables(){ + //~ // api.ajax('/admin/table/disposables/get','GET',{}).done(function(data) { + //~ // $.each( data, function( key, value ) { + //~ // disposables=''; + //~ // nets=''; + //~ // $.each( value['disposables'], function( k, v ) { + //~ // disposables=disposables+', '+v['name']; + //~ // }); + //~ // $.each( value['nets'], function( k, v ) { + //~ // nets=nets+', '+v; + //~ // }); + //~ // $("#table-disposables").append(''+value['name']+''+nets+''+disposables+''); + //~ // }); + //~ // }); -function setTemplates(){ - - $('#disposables').select2({ - minimumInputLength: 2, - multiple: true, - ajax: { - type: "POST", - url: '/admin/getAllTemplates', - dataType: 'json', - contentType: "application/json", - delay: 250, - data: function (params) { - return JSON.stringify({ - term: params.term, - pluck: ['id','name'] - }); - }, - processResults: function (data) { - return { - results: $.map(data, function (item, i) { - return { - text: item.name, - id: item.id - } - }) - }; - } - }, - }); -}; - - //~ modal_add_desktops = $('#modal_add_desktops').DataTable({ + //~ disposables_table=$('#table-disposables').DataTable({ //~ "ajax": { - //~ "url": "/desktops/getAllTemplates", + //~ "url": "/admin/table/disposables/get", //~ "dataSrc": "" //~ }, - - //~ "scrollY": "125px", - //~ "scrollCollapse": true, - //~ "paging": false, - - //"searching": false, //~ "language": { - //~ "loadingRecords": 'Loading...', - //~ "zeroRecords": "No matching templates found", - //~ "info": "Showing _START_ to _END_ of _TOTAL_ templates", - //~ "infoEmpty": "Showing 0 to 0 of 0 templates", - //~ "infoFiltered": "(filtered from _MAX_ total templates)" + //~ "loadingRecords": 'Loading...' //~ }, + //~ "bLengthChange": false, + //~ "bFilter": false, //~ "rowId": "id", //~ "deferRender": true, //~ "columns": [ - //~ { "data": "kind", "width": "10px", "orderable": false}, //~ { "data": "name"}, - //~ { "data": "group", "width": "10px"}, - //~ { "data": "username"} - //~ ], - //~ "order": [[0, 'asc']], - //~ "pageLength": 5, - //~ "columnDefs": [ - //~ { - //~ "targets": 0, + //~ { "data": "nets[, ]"}, + //~ { "data": "disposables"}, + //~ { + //~ "className": 'actions-control', + //~ "orderable": false, + //~ "data": null, + //~ "width": "58px", + //~ "defaultContent": '' + //~ // ' + //~ }, + //~ ], + //~ "order": [[1, 'asc']], + //~ "columnDefs": [ { + //~ "targets": 2, //~ "render": function ( data, type, full, meta ) { - //~ return renderTemplateKind(full); - //~ }}, - //~ { - //~ "targets": 1, - //~ "render": function ( data, type, full, meta ) { - //~ return renderIcon1x(full)+" "+full.name; - //~ }}, - //~ ] - - + //~ return renderDisposables(full); + //~ }}] + //~ } ); + + //~ $('#table-disposables').find(' tbody').on( 'click', 'button', function () { + //~ var data = disposables_table.row( $(this).parents('tr') ).data(); + //~ if($(this).attr('id')=='btn-disposable_desktops-delete'){ + //~ new PNotify({ + //~ title: 'Delete disposable', + //~ text: "Do you really want to delete disposable "+ data.name+"?", + //~ hide: false, + //~ opacity: 0.9, + //~ confirm: {confirm: true}, + //~ buttons: {closer: false,sticker: false}, + //~ history: {history: false}, + //~ addclass: 'pnotify-center' + //~ }).get().on('pnotify.confirm', function() { + //~ api.ajax('/admin/delete','POST',{'pk':data['id'],'table':'disposables'}).done(function(data) { + //~ }); + //~ }).on('pnotify.cancel', function() { + //~ }); + //~ } + //~ if($(this).attr('id')=='btn-disposable_desktops-edit'){ + //~ $('#modalDisposable').modal({ + //~ backdrop: 'static', + //~ keyboard: false + //~ }).modal('show'); + //~ // $("#select2-disposables").select2Sortable(); + //~ } + //~ }); + +//~ } + +//~ function renderDisposables(data){ + //~ var return_data = new Array(); + //~ for(var i=0;i< data['disposables'].length; i++){ + //~ return_data.push(data['disposables'][i].name) + //~ } + //~ return return_data; +//~ } + + +//~ function setTemplates(){ + + //~ $('#disposables').select2({ + //~ minimumInputLength: 2, + //~ multiple: true, + //~ ajax: { + //~ type: "POST", + //~ url: '/admin/getAllTemplates', + //~ dataType: 'json', + //~ contentType: "application/json", + //~ delay: 250, + //~ data: function (params) { + //~ return JSON.stringify({ + //~ term: params.term, + //~ pluck: ['id','name'] + //~ }); + //~ }, + //~ processResults: function (data) { + //~ return { + //~ results: $.map(data, function (item, i) { + //~ return { + //~ text: item.name, + //~ id: item.id + //~ } + //~ }) + //~ }; + //~ } + //~ }, + //~ }); +//~ }; - //~ } ); - diff --git a/src/webapp/static/admin/js/domains.js b/src/webapp/static/admin/js/domains.js index 5de6cf462..5cf97c70b 100644 --- a/src/webapp/static/admin/js/domains.js +++ b/src/webapp/static/admin/js/domains.js @@ -142,7 +142,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/mdomains','POST',{'ids':ids,'action':action}).done(function(data) { $('#mactions option[value="none"]').prop("selected",true); @@ -215,34 +215,36 @@ $(document).ready(function() { type: 'error' }); }else{ - api.ajax('/domains/update','POST',{'pk':data['id'],'name':'status','value':'Starting'}).done(function(data) { - }); + socket.emit('domain_update',{'pk':data['id'],'name':'status','value':'Starting'}) + //~ api.ajax('/domains/update','POST',{'pk':data['id'],'name':'status','value':'Starting'}).done(function(data) { + //~ }); } break; case 'btn-stop': - new PNotify({ - title: 'Unplug desktop warning!', - text: "It is NOT RECOMMENDED to continue and turn off desktop "+ name+".\n \ - Please, properly shut down desktop from inside viewer \n\n \ - Turn off desktop? "+ name+"?", - hide: false, - opacity: 0.9, - confirm: { - confirm: true - }, - buttons: { - closer: false, - sticker: false - }, - history: { - history: false - }, - stack: stack_center - }).get().on('pnotify.confirm', function() { - api.ajax('/domains/update','POST',{'pk':data['id'],'name':'status','value':'Stopping'}).done(function(data) { - }); - }).on('pnotify.cancel', function() { - }); + socket.emit('domain_update',{'pk':data['id'],'name':'status','value':'Stopping'}) + //~ new PNotify({ + //~ title: 'Unplug desktop warning!', + //~ text: "It is NOT RECOMMENDED to continue and turn off desktop "+ name+".\n \ + //~ Please, properly shut down desktop from inside viewer \n\n \ + //~ Turn off desktop? "+ name+"?", + //~ hide: false, + //~ opacity: 0.9, + //~ confirm: { + //~ confirm: true + //~ }, + //~ buttons: { + //~ closer: false, + //~ sticker: false + //~ }, + //~ history: { + //~ history: false + //~ }, + //~ addclass: 'pnotify-center' + //~ }).get().on('pnotify.confirm', function() { + //~ api.ajax('/domains/update','POST',{'pk':data['id'],'name':'status','value':'Stopping'}).done(function(data) { + //~ }); + //~ }).on('pnotify.cancel', function() { + //~ }); break; case 'btn-display': getClientViewer(data,socket); @@ -352,7 +354,7 @@ $(document).ready(function() { function actionsDomainDetail(){ $('.btn-edit').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); //~ console.log(pk) setHardwareOptions('#modalEditDesktop'); setHardwareDomainDefaults('#modalEditDesktop',pk); @@ -367,7 +369,7 @@ function actionsDomainDetail(){ }); $('.btn-xml').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); $("#modalEditXmlForm")[0].reset(); $('#modalEditXml').modal({ backdrop: 'static', @@ -380,12 +382,50 @@ function actionsDomainDetail(){ success: function(data) { var data = JSON.parse(data); - $('#xml').val(data); + $('#modalEditXmlForm #xml').val(data); } }); //~ $('#modalEdit').parsley(); //~ modal_edit_desktop_datatables(pk); }); + + $('.btn-events').on('click', function () { + var pk=$(this).closest("[data-pk]").attr("data-pk"); + $("#modalShowInfoForm")[0].reset(); + $('#modalShowInfo').modal({ + backdrop: 'static', + keyboard: false + }).modal('show'); + $('#modalShowInfoForm #id').val(pk); + $.ajax({ + type: "GET", + url:"/admin/domains/events/" + pk, + success: function(data) + { + var data = JSON.parse(data); + $('#modalShowInfoForm #xml').val(JSON.stringify(data, undefined, 4)); + } + }); + }); + + $('.btn-messages').on('click', function () { + var pk=$(this).closest("[data-pk]").attr("data-pk"); + $("#modalShowInfoForm")[0].reset(); + $('#modalShowInfo').modal({ + backdrop: 'static', + keyboard: false + }).modal('show'); + $('#modalShowInfoForm #id').val(pk); + $.ajax({ + type: "GET", + url:"/admin/domains/messages/" + pk, + success: function(data) + { + //~ var data = JSON.parse(data); + $('#modalShowInfoForm #xml').val(JSON.stringify(data, undefined, 4)); + } + }); + }); if(url=="Desktops"){ @@ -401,7 +441,7 @@ function actionsDomainDetail(){ type: 'error' }); }else{ - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); setDefaultsTemplate(pk); setHardwareOptions('#modalTemplateDesktop'); setHardwareDomainDefaults('#modalTemplateDesktop',pk); @@ -413,8 +453,8 @@ function actionsDomainDetail(){ }); $('.btn-delete').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); - var name=$(this).closest("div").attr("data-name"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); + var name=$(this).closest("[data-pk]").attr("data-name"); new PNotify({ title: 'Confirmation Needed', text: "Are you sure you want to delete virtual machine: "+name+"?", @@ -430,7 +470,7 @@ function actionsDomainDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/domains/update','POST',{'pk':pk,'name':'status','value':'Deleting'}).done(function(data) { //Should return something about the result... diff --git a/src/webapp/static/admin/js/hypervisors.js b/src/webapp/static/admin/js/hypervisors.js index f58c70476..f268fef0a 100644 --- a/src/webapp/static/admin/js/hypervisors.js +++ b/src/webapp/static/admin/js/hypervisors.js @@ -364,7 +364,7 @@ function actionsHyperDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('hyper_toggle',{'pk':pk,'name':name}) }).on('pnotify.cancel', function() { @@ -389,7 +389,7 @@ function actionsHyperDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('hyper_delete',{'pk':pk,'name':name}) }).on('pnotify.cancel', function() { @@ -414,7 +414,7 @@ function actionsHyperDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('hyper_domains_stop',{'pk':pk,'name':name,'without_viewer':false}) }).on('pnotify.cancel', function() { @@ -439,7 +439,7 @@ function actionsHyperDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('hyper_domains_stop',{'pk':pk,'name':name,'without_viewer':true}) }).on('pnotify.cancel', function() { diff --git a/src/webapp/static/admin/js/hypervisors_pools.js b/src/webapp/static/admin/js/hypervisors_pools.js index 7dec9d238..53d3a6f8f 100644 --- a/src/webapp/static/admin/js/hypervisors_pools.js +++ b/src/webapp/static/admin/js/hypervisors_pools.js @@ -221,7 +221,7 @@ $(document).ready(function() { $('.btn-viewer-pool-edit').on('click', function(){ pk=$(this).attr("data-pk"); //~ setHardwareDomainDefaults('#modalEditDesktop',pk); - $("#modalEditViewer #modalEdit")[0].reset(); + $("#modalEditViewer #modalEditViewerForm")[0].reset(); $('#modalEditViewer').modal({ backdrop: 'static', keyboard: false @@ -230,6 +230,17 @@ $(document).ready(function() { //~ $('#modalEdit').parsley(); //~ modal_edit_desktop_datatables(pk); }); + + $("#modalEditViewer #send").on('click', function(e){ + var form = $('#modalEditViewer #modalEditViewerForm'); + form.parsley().validate(); + if (form.parsley().isValid()){ + data=$('#modalEditViewer #modalEditViewerForm').serializeObject(); + console.log(data) + socket.emit('hyperpool_edit',data) + } + }); + } } ); diff --git a/src/webapp/static/admin/js/media.js b/src/webapp/static/admin/js/media.js index 85a6bbb2b..37656b9f4 100644 --- a/src/webapp/static/admin/js/media.js +++ b/src/webapp/static/admin/js/media.js @@ -98,7 +98,7 @@ $(document).ready(function() { { "targets": 4, "render": function ( data, type, full, meta ) { - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ @@ -136,7 +136,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('media_update',{'pk':data.id,'name':'status','value':'Deleting'}) }).on('pnotify.cancel', function() { @@ -161,7 +161,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('media_update',{'pk':data.id,'name':'status','value':'DownloadAborting'}) }).on('pnotify.cancel', function() { diff --git a/src/webapp/static/admin/js/updates.js b/src/webapp/static/admin/js/updates.js index 5aba41633..7802f2abf 100644 --- a/src/webapp/static/admin/js/updates.js +++ b/src/webapp/static/admin/js/updates.js @@ -93,7 +93,7 @@ $(document).ready(function() { "targets": 4, "render": function ( data, type, full, meta ) { //~ console.log(full.status+' '+full.id) - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ @@ -216,7 +216,7 @@ $(document).ready(function() { "targets": 4, "render": function ( data, type, full, meta ) { //~ console.log(full.status+' '+full.id) - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ @@ -316,7 +316,7 @@ $(document).ready(function() { "targets": 3, "render": function ( data, type, full, meta ) { //~ console.log(full.status+' '+full.id) - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ @@ -389,7 +389,7 @@ $(document).ready(function() { "targets": 3, "render": function ( data, type, full, meta ) { //~ console.log(full.status+' '+full.id) - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ diff --git a/src/webapp/static/admin/js/users.js b/src/webapp/static/admin/js/users.js index b77d11589..86a67dbe1 100644 --- a/src/webapp/static/admin/js/users.js +++ b/src/webapp/static/admin/js/users.js @@ -59,21 +59,18 @@ $(document).ready(function() { } }); - //~ $("#modalEditUser #send").on('click', function(e){ - //~ var form = $('#modalEditUserForm'); - //~ data=quota2dict($('#modalEditUserForm').serializeObject()); - //~ console.log(data) - //~ form.parsley().validate(); - //~ if (form.parsley().isValid()){ - - //~ data=quota2dict($('#modalEditUserForm').serializeObject()); - //~ delete data['password2'] - //~ data['id']=data['username']=$('#modalEditUserForm #id').val(); - //~ console.log(data) - //~ socket.emit('user_edit',data) - //~ } - //~ }); - + $("#modalPasswdUser #send").on('click', function(e){ + var form = $('#modalPasswdUserForm'); + form.parsley().validate(); + if (form.parsley().isValid()){ + data={} + data['id']=data['username']=$('#modalPasswdUserForm #id').val(); + data['name']=$('#modalPasswdUserForm #name').val(); + data['password']=$('#modalPasswdUserForm #password').val(); + socket.emit('user_passwd',data) + } + }); + $("#modalDeleteUser #send").on('click', function(e){ var form = $('#modalDeleteUserForm'); data=$('#modalDeleteUserForm').serializeObject(); @@ -345,6 +342,24 @@ function actionsUserDetail(){ //~ modal_edit_desktop_datatables(pk); }); + $('.btn-passwd').on('click', function () { + //~ setQuotaOptions('#edit-users-quota'); + var closest=$(this).closest("div"); + var pk=closest.attr("data-pk"); + var name=closest.attr("data-name"); + //~ var user=closest.attr("data-user"); + $("#modalPasswdUserForm")[0].reset(); + $('#modalPasswdUser').modal({ + backdrop: 'static', + keyboard: false + }).modal('show'); + $('#modalPasswdUserForm #name').val(name); + $('#modalPasswdUserForm #id').val(pk); + //~ $('#hardware-block').hide(); + //~ $('#modalEdit').parsley(); + //~ modal_edit_desktop_datatables(pk); + }); + $('.btn-delete').on('click', function () { //~ setQuotaOptions('#edit-users-quota'); var pk=$(this).closest("div").attr("data-pk"); @@ -396,7 +411,7 @@ function actionsUserDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('user_toggle',{'pk':pk,'name':name}) }).on('pnotify.cancel', function() { @@ -484,7 +499,7 @@ function populate_users_table(){ confirm: {confirm: true}, buttons: {closer: false,sticker: false}, history: {history: false}, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/admin/users/nonexists','POST',{'commit':true}).done(function(data) { $("#modalOldUsers").modal('hide'); diff --git a/src/webapp/static/isard.css b/src/webapp/static/isard.css index edd892a60..1aa190fdf 100644 --- a/src/webapp/static/isard.css +++ b/src/webapp/static/isard.css @@ -17,3 +17,7 @@ visibility: hidden; } } + +.pnotify-center { + right: calc(50% - 150px) !important; +} diff --git a/src/webapp/static/js/desktops.js b/src/webapp/static/js/desktops.js index e60eb40e8..fe491d292 100644 --- a/src/webapp/static/js/desktops.js +++ b/src/webapp/static/js/desktops.js @@ -199,7 +199,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('domain_update',{'pk':data['id'],'name':'status','value':'Stopping'}) }).on('pnotify.cancel', function() { @@ -283,15 +283,17 @@ $(document).ready(function() { socket.on('result', function (data) { var data = JSON.parse(data); - new PNotify({ - title: data.title, - text: data.text, - hide: true, - delay: 4000, - icon: 'fa fa-'+data.icon, - opacity: 1, - type: data.type - }); + if(data.title){ + new PNotify({ + title: data.title, + text: data.text, + hide: true, + delay: 4000, + icon: 'fa fa-'+data.icon, + opacity: 1, + type: data.type + }); + }; }); socket.on('add_form_result', function (data) { @@ -335,7 +337,7 @@ $(document).ready(function() { function actionsDesktopDetail(){ $('.btn-edit').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); setHardwareOptions('#modalEditDesktop'); setHardwareDomainDefaults('#modalEditDesktop',pk); $("#modalEdit")[0].reset(); @@ -363,7 +365,7 @@ function actionsDesktopDetail(){ type: 'error' }); }else{ - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); setDefaultsTemplate(pk); setHardwareOptions('#modalTemplateDesktop'); @@ -382,8 +384,8 @@ function actionsDesktopDetail(){ }); $('.btn-delete').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); - var name=$(this).closest("div").attr("data-name"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); + var name=$(this).closest("[data-pk]").attr("data-name"); new PNotify({ title: 'Confirmation Needed', text: "Are you sure you want to delete virtual machine: "+name+"?", @@ -399,7 +401,7 @@ function actionsDesktopDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('domain_update',{'pk':pk,'name':'status','value':'Deleting'}) }).on('pnotify.cancel', function() { @@ -407,7 +409,7 @@ function actionsDesktopDetail(){ }); $('.btn-xml').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); $("#modalShowInfoForm")[0].reset(); $('#modalEditXml').modal({ backdrop: 'static', @@ -428,7 +430,7 @@ function actionsDesktopDetail(){ }); $('.btn-events').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); $("#modalShowInfoForm")[0].reset(); $('#modalShowInfo').modal({ backdrop: 'static', @@ -449,7 +451,7 @@ function actionsDesktopDetail(){ }); $('.btn-messages').on('click', function () { - var pk=$(this).closest("div").attr("data-pk"); + var pk=$(this).closest("[data-pk]").attr("data-pk"); $("#modalShowInfoForm")[0].reset(); $('#modalShowInfo').modal({ backdrop: 'static', @@ -483,16 +485,16 @@ function addDesktopDetailPannel ( d ) { } function setDesktopDetailButtonsStatus(id,status){ - if(status=='Stopped'){ - $('#actions-'+id+' *[class^="btn"]').prop('disabled', false); - }else{ - $('#actions-'+id+' *[class^="btn"]').prop('disabled', true); - } - if(status!='Started'){ - $('#actions-'+id+' .btn-edit').prop('disabled', false); - $('#actions-'+id+' .btn-delete').prop('disabled', false); - } - + + if(status=='Stopped'){ + $('#actions-'+id+' *[class^="btn"]').prop('disabled', false); + }else{ + $('#actions-'+id+' *[class^="btn"]').prop('disabled', true); + } + if(status=='Failed'){ + $('#actions-'+id+' .btn-edit').prop('disabled', false); + } + $('#actions-'+id+' .btn-delete').prop('disabled', false); } function icon(name){ diff --git a/src/webapp/static/js/media.js b/src/webapp/static/js/media.js index cddae4091..fd66b6650 100644 --- a/src/webapp/static/js/media.js +++ b/src/webapp/static/js/media.js @@ -101,7 +101,7 @@ $(document).ready(function() { return '' } //~ }else{ - if(full.status == 'Available' || full.status == "FailedDownload"){ + if(full.status == 'Available' || full.status == "DownloadFailed"){ return '' } if(full.status == 'Downloading'){ @@ -140,7 +140,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('media_update',{'pk':data.id,'name':'status','value':'Deleting'}) }).on('pnotify.cancel', function() { @@ -165,7 +165,7 @@ $(document).ready(function() { history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { socket.emit('media_update',{'pk':data.id,'name':'status','value':'DownloadAborting'}) }).on('pnotify.cancel', function() { diff --git a/src/webapp/static/js/templates.js b/src/webapp/static/js/templates.js index 898df9190..6632f1398 100644 --- a/src/webapp/static/js/templates.js +++ b/src/webapp/static/js/templates.js @@ -301,7 +301,7 @@ function actionsTmplDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/template/togglekind','POST',{'pk':pk}).done(function(data) { //~ console.log('data received:'+data); @@ -330,7 +330,7 @@ function actionsTmplDetail(){ history: { history: false }, - stack: stack_center + addclass: 'pnotify-center' }).get().on('pnotify.confirm', function() { api.ajax('/domains/removable','POST',{'id':pk}).done(function(data) { console.log('data received:'+data); diff --git a/src/webapp/static/js/viewer.js b/src/webapp/static/js/viewer.js index 2f03f96a6..1aaa0886f 100644 --- a/src/webapp/static/js/viewer.js +++ b/src/webapp/static/js/viewer.js @@ -5,97 +5,6 @@ * License: AGPLv3 */ -//~ function chooseViewer(data,socket){ - //~ os=getOS() - //~ new PNotify({ - //~ title: 'Choose display connection', - //~ text: 'Open in browser (html5) or download remote-viewer file.', - //~ icon: 'glyphicon glyphicon-question-sign', - //~ hide: false, - //~ delay: 3000, - //~ confirm: { - //~ confirm: true, - //~ buttons: [ - //~ { - //~ text: 'SPICE BROWSER', - //~ addClass: 'btn-primary', - //~ click: function(notice){ - //~ notice.update({ - //~ title: 'You choosed spice browser viewer', text: 'Viewer will be opened in new window.\n Please allow popups!', icon: true, type: 'info', hide: true, - //~ confirm: { - //~ confirm: false - //~ }, - //~ buttons: { - //~ closer: true, - //~ sticker: false - //~ } - //~ }); - //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'spice-html5','os':os}); - //~ } - //~ }, - //~ { - //~ text: 'SPICE CLIENT', - //~ addClass: 'btn-primary', - //~ click: function(notice){ - //~ notice.update({ - //~ title: 'You choosed spice client viewer', text: 'File will be downloaded. Open it with spice remote-viewer.', icon: true, type: 'info', hide: true, - //~ confirm: { - //~ confirm: false - //~ }, - //~ buttons: { - //~ closer: true, - //~ sticker: false - //~ } - //~ }); - //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'spice-client','os':os}); - //~ } - //~ }, - //~ { - //~ text: 'VNC BROWSER', - //~ addClass: 'btn-primary', - //~ click: function(notice){ - //~ notice.update({ - //~ title: 'You choosed VNC browser viewer', text: 'Viewer will be opened in new window.\n Please allow popups!', icon: true, type: 'info', hide: true, - //~ confirm: { - //~ confirm: false - //~ }, - //~ buttons: { - //~ closer: true, - //~ sticker: false - //~ } - //~ }); - //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'vnc-html5','os':os}); - //~ } - //~ }, - //~ { - //~ text: 'VNC CLIENT', - //~ addClass: 'btn-primary', - //~ click: function(notice){ - //~ notice.update({ - //~ title: 'You choosed VNC client viewer', text: 'File will be downloaded. Open it with VNC client app.', icon: true, type: 'info', hide: true, - //~ confirm: { - //~ confirm: false - //~ }, - //~ buttons: { - //~ closer: true, - //~ sticker: false - //~ } - //~ }); - //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'vnc-client','os':os}); - //~ } - //~ }, - //~ ] - //~ }, - //~ buttons: { - //~ closer: false, - //~ sticker: false - //~ }, - //~ history: { - //~ history: false - //~ } - //~ }); -//~ } - function setViewerButtons(id,socket,offer){ offer=[ { @@ -158,7 +67,7 @@ function setViewerButtons(id,socket,offer){ function startClientViewerSocket(socket){ socket.on('domain_viewer', function (data) { var data = JSON.parse(data); - $("#hiddenpass-"+data["id"]).val('proves'); + if(data['kind']=='url'){ viewer=data['viewer'] window.open(viewer.replace('',document.domain)); @@ -203,17 +112,6 @@ function getOS() { return os; } - - -//~ function copyToClipboard(el) { - //~ var $temp = $(""); - //~ $("body").append($temp); - //~ id=$(el).data('id') - //~ console.log($("#hiddenpass-"+id).text()) - //~ $temp.val($("#hiddenpass-"+id).text()).select(); - //~ document.execCommand("copy"); - //~ $temp.remove(); -//~ } //~ function getClientViewer(data,socket){ //~ if(detectXpiPlugin()){ @@ -371,4 +269,93 @@ function getOS() { //~ embed.connect(); //~ } - +//~ function chooseViewer(data,socket){ + //~ os=getOS() + //~ new PNotify({ + //~ title: 'Choose display connection', + //~ text: 'Open in browser (html5) or download remote-viewer file.', + //~ icon: 'glyphicon glyphicon-question-sign', + //~ hide: false, + //~ delay: 3000, + //~ confirm: { + //~ confirm: true, + //~ buttons: [ + //~ { + //~ text: 'SPICE BROWSER', + //~ addClass: 'btn-primary', + //~ click: function(notice){ + //~ notice.update({ + //~ title: 'You choosed spice browser viewer', text: 'Viewer will be opened in new window.\n Please allow popups!', icon: true, type: 'info', hide: true, + //~ confirm: { + //~ confirm: false + //~ }, + //~ buttons: { + //~ closer: true, + //~ sticker: false + //~ } + //~ }); + //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'spice-html5','os':os}); + //~ } + //~ }, + //~ { + //~ text: 'SPICE CLIENT', + //~ addClass: 'btn-primary', + //~ click: function(notice){ + //~ notice.update({ + //~ title: 'You choosed spice client viewer', text: 'File will be downloaded. Open it with spice remote-viewer.', icon: true, type: 'info', hide: true, + //~ confirm: { + //~ confirm: false + //~ }, + //~ buttons: { + //~ closer: true, + //~ sticker: false + //~ } + //~ }); + //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'spice-client','os':os}); + //~ } + //~ }, + //~ { + //~ text: 'VNC BROWSER', + //~ addClass: 'btn-primary', + //~ click: function(notice){ + //~ notice.update({ + //~ title: 'You choosed VNC browser viewer', text: 'Viewer will be opened in new window.\n Please allow popups!', icon: true, type: 'info', hide: true, + //~ confirm: { + //~ confirm: false + //~ }, + //~ buttons: { + //~ closer: true, + //~ sticker: false + //~ } + //~ }); + //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'vnc-html5','os':os}); + //~ } + //~ }, + //~ { + //~ text: 'VNC CLIENT', + //~ addClass: 'btn-primary', + //~ click: function(notice){ + //~ notice.update({ + //~ title: 'You choosed VNC client viewer', text: 'File will be downloaded. Open it with VNC client app.', icon: true, type: 'info', hide: true, + //~ confirm: { + //~ confirm: false + //~ }, + //~ buttons: { + //~ closer: true, + //~ sticker: false + //~ } + //~ }); + //~ socket.emit('domain_viewer',{'pk':data['id'],'kind':'vnc-client','os':os}); + //~ } + //~ }, + //~ ] + //~ }, + //~ buttons: { + //~ closer: false, + //~ sticker: false + //~ }, + //~ history: { + //~ history: false + //~ } + //~ }); +//~ } diff --git a/src/webapp/templates/admin/pages/base_graphs.html b/src/webapp/templates/admin/pages/base_graphs.html index 387419ca8..f0379d3ba 100644 --- a/src/webapp/templates/admin/pages/base_graphs.html +++ b/src/webapp/templates/admin/pages/base_graphs.html @@ -131,7 +131,7 @@ //~ icon: 'fa fa-alert-sign', opacity: 1, type: "{{ category }}", - stack: stack_center + addclass: 'pnotify-center' }); {% endfor %} diff --git a/src/webapp/templates/admin/pages/config.html b/src/webapp/templates/admin/pages/config.html index 1a1568ff4..c62ddaba2 100644 --- a/src/webapp/templates/admin/pages/config.html +++ b/src/webapp/templates/admin/pages/config.html @@ -87,6 +87,79 @@

LDAP authentication

+ +
+
+
+

Grafana

+ +
+
+
+ +
+
+
+

Grafana

+ +
+ +
+
+
+ +
+
+ +
+ +
+
+ +
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+
+ + + +
+
+
+ +
+ + -
-
-
-

STATISTICS

- -
-
- -
- - - -
-
-
-
-

Grafana

- -
-
-
- -
-
-
-

Grafana

- -
- -
-
-
- -
-
- -
- -
-
- -
- -
- -
-
-
- -
- -
-
-
- -
- -
-
- -
-
- - - -
-
-
- -
@@ -430,7 +413,7 @@

Grafana

---> + diff --git a/src/webapp/templates/admin/pages/config_modals.html b/src/webapp/templates/admin/pages/config_modals.html index 408c78e1c..5c6313db5 100644 --- a/src/webapp/templates/admin/pages/config_modals.html +++ b/src/webapp/templates/admin/pages/config_modals.html @@ -282,8 +282,10 @@