#!/usr/bin/env ruby
# Run for 'production' env by default
ENV["RAILS_ENV"] ||= 'production'
# Skip dashboard client
SKIP_DASHBOARD_CLIENT = true
$daemon_online = true

## Check if runed other daemon and stop him
$pid_file = File.expand_path('../../tmp/pids/onapp_daemon.pid', __FILE__)
if File.exists?($pid_file)
  puts "Daemon is already running. [#{$pid_file}]"
  exit 1
end
File.open($pid_file, 'w') { |file| file.write Process.pid }

require File.expand_path('../../config/environment.rb', __FILE__)
require 'eventmachine'
require 'lib/processor'

def logger
  return @logger if defined?(@logger)
  @logger = Logger.new("#{Rails.root}/log/onapp_daemon.log", 10, (500*1024*1024))
  @logger.level = Logger::INFO if ENV["RAILS_ENV"].eql?('production')
  @logger.formatter = proc do |severity, datetime, progname, msg|
    "[#{severity}] #{datetime} #{msg}\n"
  end
  @logger
end

File.unlink(Processor.status_socket_file) if File.exists?(Processor.status_socket_file)

module PidMonitor
  def file_deleted
    graceful_stop
  end
end

def graceful_stop
  $daemon_online = false
  EventMachine.stop_server($server) #Stop statistics sock service
  $timers.values.each { |timer| timer.cancel } #stop periodical vallers
  logger.info("Waiting for connection(s) to finish ...")
  unless wait_for_connections_and_stop # check runed processes
    EventMachine.add_periodic_timer(5) do
      wait_for_connections_and_stop
    end
  end
end

def wait_for_connections_and_stop
  logger.info("[PID #{Process.pid}] COUNTERS : " + $counters.inspect)
  logger.debug("[PID #{Process.pid}] TIMERS : " + $timers.inspect)

  return false if Transaction.running.where(:pid => Process.pid).count() > 0
  logger.info("[PID #{Process.pid}]:  All connection(s) closed. ")
  EventMachine.stop
end

def delete_pid_file(pid_file)
  logger.info('Try Remove PID file, and stop watch.')
  if File.exist?(pid_file) && File.open(pid_file).read.to_i == Process.pid
    $pid_monitor.stop_watching if $pid_monitor
    File.unlink(pid_file)
  else
    logger.info('!!! PID file not fond or foreign.')
  end
end

$counters = {
    'cdn_sync_runners' => 0,
    'backup_runners' => 0,
    'transaction_runners' => 0,
    'hypervisor_monitor' => 0,
    'billing_updater' => 0,
    'zombie_disk_space_updater' => 0,
    'schedule_runners' => 0,
    'cluster_monitors' => 0,
    'snmp_stats_level1' => 0,
    'snmp_stats_level2' => 0,
    'snmp_stats_level3' => 0,
    'vmware_stats_level1' => 0,
    'vmware_stats_level2' => 0,
    'cp_space_updater' => 0
}
$timers = {}

EM.run {
  # Add Unix socket monitor
  $server = EM::start_unix_domain_server(Processor.status_socket_file, ServerProcessorMonitor)

  logger.info("#{Rails.env} => DAEMON START")

  Signal.trap("INT") { logger.info("SCHEDULER INT signal."); delete_pid_file($pid_file); EM.stop }
  Signal.trap("TERM") { logger.info("SCHEDULER TERM signal."); delete_pid_file($pid_file); graceful_stop }
  Signal.trap("EXIT") { logger.info("SCHEDULER EXIT!"); delete_pid_file($pid_file); EM.stop }
  #Signal.trap("HUP") {  }

  # Stop event machine if removed pid
  $pid_monitor = EM.watch_file($pid_file, PidMonitor)

  EM.error_handler { |e|
    logger.error("Error raised during event loop: #{e.message}")
    logger.error("Error raised during event loop: #{e.backtrace}")
  }

  # many processes
  transaction_runner = TransactionRunnerProcessor.new
  backup_runner = BackupRunnerProcessor.new
  # 1 process runners
  billing_updater = BillingUpdaterMonitor.new #TODO Oleg review
  schedule_runner = ScheduleRunnerProcessor.new
  monitis_monitor = MonitisMonitorProcessor.new
  cdn_sync_runner = CDNSyncRunnerProcessor.new
  zombie_disk_space_updater = ZombieDiskSpaceUpdaterProcessor.new
  snmp_stats_runner1 = SNMPStatsRunnerProcessor.new(:level => 1)
  snmp_stats_runner2 = SNMPStatsRunnerProcessor.new(:level => 2)
  snmp_stats_runner3 = SNMPStatsRunnerProcessor.new(:level => 3)
  vmware_stats_runner1 = VMWareStatsRunnerProcessor.new(:level => 1)
  vmware_stats_runner2 = VMWareStatsRunnerProcessor.new(:level => 2)

  #CDN sync runner
  $timers['cdn_sync_runners'] = EM.add_periodic_timer(OnApp.configuration.cdn_sync_delay.minutes) do
    if $counters['cdn_sync_runners'] < cdn_sync_runner.max
      EM.defer(
          proc {
            $counters['cdn_sync_runners'] +=1
            begin
              cdn_sync_runner.logger.info("Starting CDN Sync Runner at #{Time.now}")
              cdn_sync_runner.run
            ensure
              $counters['cdn_sync_runners'] -= 1
            end
          })
    end
  end

  # Backup runners
  $timers['backup_runners'] = EM.add_periodic_timer(OnApp.configuration.backup_taker_delay) do
    backup_runner.logger.info("Status #{$counters.to_json}")
    if (backup_runner.waited_transactions > 0) && ($counters['backup_runners'] < backup_runner.max)
      EM.defer(
          proc {
            $counters['backup_runners'] +=1
            begin
              backup_runner.logger.info("Start Backup Runner #{$counters['backup_runners']}")
              backup_runner.run
            ensure
              backup_runner.logger.info('Stop Backup Runner')
              $counters['backup_runners'] -= 1
            end
          })
    else
      backup_runner.logger.info('No Scheduled Backups!')
    end
  end


  # Transaction runners
  $timers['transaction_runners'] = EM.add_periodic_timer(OnApp.configuration.transaction_runner_delay) do
    if (transaction_runner.waited_transactions > 0) && ($counters['transaction_runners'] < transaction_runner.max)
      EM.defer(
          proc {
            $counters['transaction_runners'] +=1
            begin
              transaction_runner.logger.info("Start Transaction Runner #{$counters['transaction_runners']}")
              transaction_runner.run
            ensure
              $counters['transaction_runners'] -= 1
            end
          })
    else
      transaction_runner.logger.info('No Scheduled Transactions!')
    end
  end

  # Hypervisor monitor
  # RECEIVER: commented as long as receiver is not working
  # EM.add_timer(60) do
  #   $timers['hypervisor_monitor'] = EM.add_periodic_timer(OnApp.configuration.hypervisor_monitor_delay) do
  #     if $counters['hypervisor_monitor'] < schedule_runner.max
  #       EM.defer(
  #           proc {
  #             $counters['hypervisor_monitor'] +=1
  #             begin
  #               hypervisor_monitor.logger.info("Start Hypervisor Monitor #{$counters['hypervisor_monitor']}")
  #               hypervisor_monitor.run
  #             ensure
  #               $counters['hypervisor_monitor'] -= 1
  #             end
  #           })
  #     end
  #   end if $daemon_online # start monoitor if daemon still online
  # end

  ## Billing Updater
  #$timers['billing_updater'] = EM.add_periodic_timer(OnApp.configuration.billing_stat_updater_delay) do
  #  if $counters['billing_updater'] < schedule_runner.max
  #    EM.defer(
  #        proc {
  #          $counters['billing_updater'] +=1
  #          begin
  #            billing_updater.logger.info("Start Billing Updater #{$counters['billing_updater']}")
  #            billing_updater.run
  #          ensure
  #            $counters['billing_updater'] -= 1
  #          end
  #        })
  #  end
  #end

  #CP disk usage monitoring
  # every 15 minutes we will check if space on any partiotion less 95%
  $timers['cp_space_updater'] = EM.add_periodic_timer(15.minutes) do
    if $counters['cp_space_updater'] < 1
      EM.defer(
          proc {
            $counters['cp_space_updater'] += 1
            begin
              logger.info("Start CP free space check #{$counters['cp_space_updater']}")
              hostname = `hostname`
              message = "CP #{hostname}\nDisk free space less than 95%\n"
              usage = `df -h`
              unless usage.scan(/[0-9]{1,}\%.*/).collect { |row| row.split(' ') }.select { |k, v| message << "#{v}\t#{k}\n" if k.to_i > 95 }.empty?
                Alert.log!(Alert.new, :free_space, :warn, message)
                SystemAlert.system_resources(message).deliver if OnApp.configuration.system_notification
              end
            ensure
              $counters['cp_space_updater'] -= 1
            end
          })
    end
  end

  # Zombie disk space updater
  $timers['zombie_disk_space_updater'] = EM.add_periodic_timer(OnApp.configuration.zombie_disk_space_updater_delay) do
    if $counters['zombie_disk_space_updater'] < zombie_disk_space_updater.max
      EM.defer(
          proc {
            $counters['zombie_disk_space_updater'] += 1
            begin
              zombie_disk_space_updater.logger.info("Start Zoombie Diskspace updater #{$counters['zombie_disk_space_updater']}")
              zombie_disk_space_updater.run
            ensure
              $counters['zombie_disk_space_updater'] -= 1
            end
          })
    end
  end

  # Schedule runner
  $timers['schedule_runners'] = EM.add_periodic_timer(OnApp.configuration.schedule_runner_delay) do
    if $counters['schedule_runners'] < schedule_runner.max
      EM.defer(
          proc {
            $counters['schedule_runners'] +=1
            begin
              schedule_runner.logger.info("Start Shedule runner #{$counters['schedule_runners']}")
              schedule_runner.run
            ensure
              $counters['schedule_runners'] -= 1
            end
          })
    end
  end

  # Cluster monitor
  $timers['cluster_monitors'] = EM.add_periodic_timer(OnApp.configuration.cluster_monitor_delay) do
    if $counters['cluster_monitors'] < monitis_monitor.max
      EM.defer(
          # run task
          proc {
            $counters['cluster_monitors'] += 1
            begin
              monitis_monitor.logger.info("Start Cluster monitor #{$counters['cluster_monitors'] }")
              monitis_monitor.run
            ensure
              $counters['cluster_monitors'] -= 1
            end
          })
    end
  end

  # SNMP Stats
  #level 1
  $timers['snmp_stats_level1'] = EM.add_periodic_timer(OnApp.configuration.snmp_stats_level1_period) do
    snmp_stats_runner1.logger.info("Start SNMP Stat Level 1: (HV defers: #{$counters['snmp_stats_level1']})")
    if $counters['snmp_stats_level1'] <= 0
      $counters['snmp_stats_level1'] = Hypervisor.not_vmware.count
      snmp_stats_runner1.run {
        Hypervisor.not_vmware.find_each(:batch_size => 50) do |hv|
          EM.defer(
              proc {
                begin
                  snmp_stats_runner1.logger.info("Get Level 1 for HV: #{hv.id}  #{hv.ip_address}")
                  hv.get_level1
                rescue => e
                  snmp_stats_runner1.logger.info(e)
                end
              }, proc {
                $counters['snmp_stats_level1'] -= 1
              })
        end
      }
    end
  end

  #level 2
  $timers['snmp_stats_level2'] = EM.add_periodic_timer(OnApp.configuration.snmp_stats_level2_period) do
    snmp_stats_runner2.logger.info("Start SNMP Stat Level 2: (HV defers: #{$counters['snmp_stats_level2']})")
    if $counters['snmp_stats_level2'] <= 0
      $counters['snmp_stats_level2'] = Hypervisor.not_vmware.count
      snmp_stats_runner2.run {
        Hypervisor.not_vmware.all.each do |hv|
          EM.defer(
              proc {
                begin
                  snmp_stats_runner2.logger.info("Get Level 2 for HV: #{hv.id}  #{hv.ip_address}")
                  hv.get_level2
                rescue => e
                  snmp_stats_runner2.logger.info(e)
                end
              }, proc {
                $counters['snmp_stats_level2'] -= 1
              })
        end
      }
    end
  end

  #level 3
  $timers['snmp_stats_level3'] = EM.add_periodic_timer(OnApp.configuration.snmp_stats_level3_period) do
    snmp_stats_runner3.logger.info("Start SNMP Stat Level 3: (HV defers: #{$counters['snmp_stats_level3']})")
    if $counters['snmp_stats_level3'] <= 0
      $counters['snmp_stats_level3'] = Hypervisor.not_vmware.count
      snmp_stats_runner3.run {
        Hypervisor.not_vmware.all.each do |hv|
          EM.defer(
              proc {
                begin
                  snmp_stats_runner3.logger.info("Get Level 3 for HV: #{hv.id}  #{hv.ip_address}")
                  hv.get_level3
                rescue => e
                  snmp_stats_runner3.logger.info(e)
                end
              }, proc {
                $counters['snmp_stats_level3'] -= 1
              })
        end
      }
    end
  end

  # VMWare Stats
  #level 1
  $timers['vmware_stats_level1'] = EM.add_periodic_timer(OnApp.configuration.vmware_stats_level1_period) do
    vmware_stats_runner1.logger.info("Start VMWare Stat Level 1: (HV defers: #{$counters['vmware_stats_level1']})")
    if $counters['vmware_stats_level1'] <= 0
      $counters['vmware_stats_level1'] = Hypervisor.vmware.count
      vmware_stats_runner1.run {
        Hypervisor.vmware.all.each do |hv|
          EM.defer(
              proc {
                begin
                  vmware_stats_runner1.logger.info("Get Level 1 for HV: #{hv.id}  #{hv.ip_address}")
                  hv.get_level1
                rescue => e
                  vmware_stats_runner1.logger.info(e)
                end
              }, proc {
                $counters['vmware_stats_level1'] -= 1
              })
        end
      }
    end
  end

  #level 2
  $timers['vmware_stats_level2'] = EM.add_periodic_timer(OnApp.configuration.vmware_stats_level2_period) do
    vmware_stats_runner2.logger.info("Start VMWare Stat Level 2: (HV defers: #{$counters['vmware_stats_level2']})")
    if $counters['vmware_stats_level2'] <= 0
      $counters['vmware_stats_level2'] = Hypervisor.vmware.count
      vmware_stats_runner2.run {
        Hypervisor.vmware.all.each do |hv|
          EM.defer(
              proc {
                begin
                  vmware_stats_runner2.logger.info("Get Level 2 for HV: #{hv.id}  #{hv.ip_address}")
                  hv.get_level2
                rescue => e
                  vmware_stats_runner2.logger.info(e)
                end
              }, proc {
                $counters['vmware_stats_level2'] -= 1
              })
        end
      }
    end
  end


}