#!python
#
# (c) 2017-2021 Fetal-Neonatal Neuroimaging & Developmental Science Center
#                   Boston Children's Hospital
#
#              http://childrenshospital.org/FNNDSC/
#                        dev@babyMRI.org
#

import sys
import os
from argparse import ArgumentParser

sys.path.insert(1, os.path.join(os.path.dirname(__file__), '..'))

from pfconclient import client


parser = ArgumentParser(description='Manage pfcon service resources')
parser.add_argument('url', help="url of pfcon service")
parser.add_argument('jid', help="job id")
parser.add_argument('-u', '--username', help="username for pfcon service")
parser.add_argument('-p', '--password', help="password for pfcon service")
parser.add_argument('--timeout', help="requests' timeout")
subparsers = parser.add_subparsers(dest='subparser_name', title='subcommands',
                                   description='valid subcommands',
                                   help='sub-command help')

# create the parser for the "run" command
parser_run = subparsers.add_parser('run', help='run a new job until finished')
parser_run.add_argument('inputdir', help="job input directory")
parser_run.add_argument('outputdir', help="job output directory")
run_req_group = parser_run.add_argument_group('required job API parameters')
run_req_group.add_argument('--cmd_args', help='cmd arguments string', required=True)
run_req_group.add_argument('--auid', help='user id', required=True)
run_req_group.add_argument('--number_of_workers', help='number of workers',
                              required=True)
run_req_group.add_argument('--cpu_limit', help='cpu limit', required=True)
run_req_group.add_argument('--memory_limit', help='memory limit', required=True)
run_req_group.add_argument('--gpu_limit', help='gpu limit', required=True)
run_req_group.add_argument('--image', help='docker image', required=True)
run_req_group.add_argument('--selfexec',
                           help='executable file name within the docker image',
                           required=True)
run_req_group.add_argument('--selfpath',
                           help='path to executable file within the docker image',
                           required=True)
run_req_group.add_argument('--execshell',
                           help='execution shell within the docker image',
                           required=True)
run_req_group.add_argument('--type', help='plugin type', choices=['fs', 'ds'],
                           required=True)
run_opt_group = parser_run.add_argument_group('optional job API parameters')
run_opt_group.add_argument('--cmd_path_flags',
                           help='comma separated list of cmd flags with path argument')
parser_run.add_argument('--poll_initial_wait',
                        help='initial wait time in seconds to poll for job status')
parser_run.add_argument('--poll_max_wait',
                        help='maximum wait time in seconds to poll for job status')

# create the parser for the "submit" command
parser_submit = subparsers.add_parser('submit', help='submit a new job')
parser_submit.add_argument('inputdir', help="job input directory")
submit_req_group = parser_submit.add_argument_group('required job API parameters')
submit_req_group.add_argument('--cmd_args', help='cmd arguments string', required=True)
submit_req_group.add_argument('--auid', help='user id', required=True)
submit_req_group.add_argument('--number_of_workers', help='number of workers', required=True)
submit_req_group.add_argument('--cpu_limit', help='cpu limit', required=True)
submit_req_group.add_argument('--memory_limit', help='memory limit', required=True)
submit_req_group.add_argument('--gpu_limit', help='gpu limit', required=True)
submit_req_group.add_argument('--image', help='docker image', required=True)
submit_req_group.add_argument('--selfexec',
                              help='executable file name within the docker image',
                              required=True)
submit_req_group.add_argument('--selfpath',
                              help='path to executable file within the docker image',
                              required=True)
submit_req_group.add_argument('--execshell',
                              help='execution shell within the docker image',
                              required=True)
submit_req_group.add_argument('--type', help='plugin type', choices=['fs', 'ds'],
                              required=True)
submit_opt_group = parser_submit.add_argument_group('optional job API parameters')
submit_opt_group.add_argument('--cmd_path_flags',
                              help='comma separated list of cmd flags with path argument')

# create the parser for the "status" command
parser_status = subparsers.add_parser('status', help='get the exec status of a job')

# create the parser for the "poll" command
parser_poll = subparsers.add_parser('poll',
                                    help='poll the exec status of a job until finished')
parser_poll.add_argument('--poll_initial_wait',
                         help='initial wait time in seconds to poll for job status')
parser_poll.add_argument('--poll_max_wait',
                         help='maximum wait time in seconds to poll for job status')

# create the parser for the "download" command
parser_download = subparsers.add_parser('download',
                                        help="download job's output files")
parser_download.add_argument('outputdir', help="directory to download job's output files")
parser_download.add_argument('--zip', action='store_true',
                             help='save output files as a single zip file')

# parse the arguments and perform the appropriate action with the client
args = parser.parse_args()
timeout = args.timeout or 1000
client = client.Client(args.url, args.username, args.password)

if args.subparser_name == 'run' or args.subparser_name == 'submit':
    d_job_descriptors = {
        'cmd_args': args.cmd_args,
        'cmd_path_flags': args.cmd_path_flags if args.cmd_path_flags is not None else '',
        'auid': args.auid,
        'number_of_workers': args.number_of_workers,
        'cpu_limit': args.cpu_limit,
        'memory_limit': args.memory_limit,
        'gpu_limit': args.gpu_limit,
        'image': args.image,
        'selfexec': args.selfexec,
        'selfpath': args.selfpath,
        'execshell':args.execshell,
        'type': args.type
    }
    if args.subparser_name == 'run':
        if args.poll_initial_wait:
            client.initial_wait = args.poll_initial_wait
        if args.poll_max_wait:
            client.max_wait = args.poll_max_wait
        client.run_job(args.jid, d_job_descriptors, args.inputdir, args.outputdir,
                       timeout)
    else:
        # create job zip file content from local input_dir
        job_zip_file = client.create_zip_file(args.inputdir)
        zip_content = job_zip_file.getvalue()
        client.submit_job(args.jid, d_job_descriptors, zip_content, timeout)
elif args.subparser_name == 'status':
    d_resp = client.get_job_status(args.jid, timeout)
    l_status = d_resp['compute']['d_ret']['l_status']
    print('\nJob %s status: %s' % (args.jid, l_status))
elif args.subparser_name == 'poll':
    if args.poll_initial_wait:
        client.initial_wait = args.poll_initial_wait
    if args.poll_max_wait:
        client.max_wait = args.poll_max_wait
    client.poll_job_status(args.jid, timeout)
elif args.subparser_name == 'download':
    if args.zip:
        client.get_job_zip_file(args.jid, args.outputdir, timeout)
    else:
        client.get_job_files(args.jid, args.outputdir, timeout)
