#!/usr/bin/env python
#coding=utf-8
from oss.oss_api import *
from oss.oss_util import *
from oss.oss_xml_handler import *
import signal
import ConfigParser
from optparse import OptionParser
import os
import re
import time
import Queue
import sys
reload(sys)
sys.setdefaultencoding("utf-8")

CMD_LIST = {}
ACL_LIST = ['private', 'public-read', 'public-read-write']
OSS_PREFIX = 'oss://'
CONFIGFILE = os.path.expanduser('~') + '/.osscredentials'
CONFIGSECTION = 'OSSCredentials'
DEFAUL_HOST = "oss.aliyuncs.com"
OSS_HOST = DEFAUL_HOST
ID = ""
KEY = ""
PUT_OK = 0
GET_OK = 0
DELETE_OK = 0
SEND_BUF_SIZE = 8192
RECV_BUF_SIZE = 1024*1024*10
MAX_OBJECT_SIZE = 5*1024*1024*1024
IS_DEBUG = False

HELP = \
'''osscmd:
    getallbucket(gs)
    createbucket(cb,mb,pb) oss://bucket --acl=[acl] --location=[location]
    deletebucket(db)       oss://bucket
    deletewholebucket      oss://bucket
    getbucketlocation(gl)  oss://bucket

    getacl oss://bucket
    setacl oss://bucket --acl=[acl] 
        allow private, public-read, public-read-write

    ls(list)            oss://bucket/[prefix] [marker] [delimiter] [maxkeys]
    ls(list)            oss://bucket/[prefix] --marker=xxx --delimiter=xxx --maxkeys=xxx
    mkdir               oss://bucket/dirname
    listallobject       oss://bucket/[prefix]
    deleteallobject     oss://bucket/[prefix]
    downloadallobject   oss://bucket/[prefix] localdir --replace=false
    downloadtodir       oss://bucket/[prefix] localdir --replace=false
    uploadfromdir       localdir oss://bucket/[prefix] --check_point=check_point_file
    put localfile       oss://bucket/object --content-type=[content_type] --headers=\"key1:value1, key2:value2\" 
    upload localfile    oss://bucket/object --content-type=[content_type]
    get                 oss://bucket/object localfile
    multiget(multi_get) oss://bucket/object localfile 
    cat                 oss://bucket/object
    meta                oss://bucket/object
    info                oss://bucket/object
    copy                oss://source_bucket/source_object oss://target_bucket/target_object --headers=\"key1:value1, key2:value2\"
    rm(delete,del)      oss://bucket/object
    signurl(sign)       oss://bucket/object --timeout=[timeout_seconds]

    init                oss://bucket/object
    listpart            oss://bucket/object --upload_id=xxx
    listparts           oss://bucket
    getallpartsize      oss://bucket
    cancel              oss://bucket/object --upload_id=xxx
    multiupload(multi_upload,mp) localfile oss://bucket/object 
    multiupload(multi_upload,mp) localfile oss://bucket/object --upload_id=xxx --thread_num=10 --max_part_num=1000
    uploadpartfromfile  (upff)   localfile oss://bucket/object --upload_id=xxx --part_number=xxx 
    uploadpartfromstring(upfs)   oss://bucket/object --upload_id=xxx --part_number=xxx --data=xxx
    config --host=[oss.aliyuncs.com] --id=[accessid] --key=[accesskey]
    '''

def print_result(cmd, res):
    '''
    Print HTTP Response if failedd.
    '''
    try:
        if res.status / 100 == 2:
            pass
        else:
            print "Error Headers:\n"
            print res.getheaders()
            print "Error Body:\n"
            print res.read(1024)
            print "Error Status:\n"
            print res.status
            print cmd, "Failed!"
    except AttributeError:
        pass

def format_datetime(osstimestamp):
    date = re.compile("(\.\d*)?Z").sub(".000Z", osstimestamp)
    ts = time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
    return time.strftime("%Y-%m-%d %H:%M", ts)

def format_unixtime(osstimestamp):
    date = re.compile("(\.\d*)?Z").sub(".000Z", osstimestamp)
    ts = time.strptime(date, "%Y-%m-%dT%H:%M:%S.000Z")
    return (int)(time.mktime(ts)) 

def format_size(size):
    size = int(size)
    coeffs = ['K', 'M', 'G', 'T']
    coeff = ""
    while size > 2048:
        size /= 1024
        coeff = coeffs.pop(0)
    return str(size) + coeff + "B"

def format_utf8(string):
    string = smart_code(string)
    if isinstance(string, unicode):
        string = string.encode('utf-8')                
    return string

def split_path(path):
    if not path.lower().startswith(OSS_PREFIX):
        print "%s parameter %s invalid, " \
              "must be start with %s" % \
              (args[0], args[1], OSS_PREFIX)
        sys.exit(1)
    pather = path[len(OSS_PREFIX):].split('/')
    return pather

'split an object path'
def split_object_path(command, path):
    pather = split_path(path)
    if len(pather) <= 1:
        print "%s parameters invalid, must be object format" % command
        sys.exit(1)
    return pather

def check_bucket(bucket):
    if len(bucket) == 0:
        print "Bucket should not be empty!"
        print "Please input oss://bucket"
        sys.exit(1)

def check_object(object):
    if len(object) == 0:
        print "Object should not be empty!"
        print "Please input oss://bucket/object"
        sys.exit(1)

    if object.startswith("/"):
        print "object name should not begin with / "
        sys.exit(-1)

def check_localfile(localfile):
    if not os.path.isfile(localfile):
        print "%s is not existed!" % localfile
        sys.exit(1)

def check_args(argv, args=None):
    if not args:
        args = []
    if len(args) < argv:
        print "%s miss parameters" % args[0]
        sys.exit(1)

def check_bucket_object(bucket, object):
    check_bucket(bucket)
    check_object(object)

def parse_bucket_object(path):
    pather = split_path(path)
    bucket = ""
    object = ""
    if len(pather) > 0:
        bucket = pather[0]
    if len(pather) > 1:
        object += '/'.join(pather[1:])
    object = smart_code(object)
    if object.startswith("/"):
        print "object name SHOULD NOT begin with /"
        sys.exit(1)
    return (bucket, object)

def parse_bucket(path):
    bucket = path
    if bucket.startswith(OSS_PREFIX):
        bucket = bucket[len(OSS_PREFIX):]
    tmp_list = bucket.split("/")
    if len(tmp_list) > 0:
        bucket = tmp_list[0]
    return bucket

def cmd_listing(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    (bucket, object) = parse_bucket_object(args[1])
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    prefix = object
    marker = ''
    delimiter = ''
    maxkeys = 1000
    if options.marker:
        marker = options.marker
    if options.delimiter:
        delimiter = options.delimiter
    if options.maxkeys:
        maxkeys = options.maxkeys
    if len(args) == 3: 
        marker = args[2]
    elif len(args) == 4:
        marker = args[2]
        delimiter = args[3]
    elif len(args) >= 5:
        marker = args[2]
        delimiter = args[3]
        maxkeys = args[4]

    prefix = smart_code(prefix)
    marker = smart_code(marker)
    delimiter = smart_code(delimiter)
    maxkeys = smart_code(maxkeys)
    res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
    if (res.status / 100) == 2:
        body = res.read()
        hh = GetBucketXml(body)
        (fl, pl) = hh.list()
        print "prefix list is: "
        for i in pl:
            print i
        print "object list is: "
        for i in fl:
            if len(i) == 7:
                try:
                    print "%16s %6s %s/%s " % (format_datetime(i[1]), format_size((int)(i[3])), OSS_PREFIX + bucket, i[0])
                except:
                    print "Exception when print :", i
        print "\nprefix list number is: %s " % len(pl)
        print "object list number is: %s " % len(fl)
    return res

def cmd_listparts(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    (bucket, object) = parse_bucket_object(args[1])
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    print "%20s %20s" % ("UploadId", "Path")
    for i in get_all_upload_id_list(get_oss(), bucket):
        print "%20s oss://%s/%s" % (i[1], bucket, i[0])

def cmd_getallpartsize(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    (bucket, object) = parse_bucket_object(args[1])
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    total_part_size = 0
    print "%5s %20s %20s %s" % ("Number", "UploadId", "Size", "Path")
    for i in get_all_upload_id_list(get_oss(), bucket):
        upload_id = i[1]
        object = i[0]
        for i in get_part_list(get_oss(), bucket, object, upload_id):
            part_size = (int)(i[2])
            total_part_size += part_size 
            print "%5s %20s %10s oss://%s/%s" % (i[0], upload_id, format_size(part_size), bucket, object) 
    print "totalsize is: real:%s, format:%s " % (total_part_size, format_size(total_part_size))

def cmd_init_upload(args, options):
    check_args(2, args)  
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    check_bucket_object(bucket, object)
    upload_id = get_upload_id(get_oss(), bucket, object)
    print upload_id

def cmd_listpart(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    if options.upload_id is None:
        print "upload_id invalid, please set with --upload_id=xxx"
        sys.exit(1)
    print "%5s %32s %20s %20s" % ("PartNumber".ljust(10), "ETag".ljust(34), "Size".ljust(20), "LastModifyTime".ljust(32))
    for i in get_part_list(get_oss(), bucket, object, options.upload_id):
        if len(i) >= 4:
            print "%s %s %s %s" % (str(i[0]).ljust(10), str(i[1]).ljust(34), str(i[2]).ljust(20), str(i[3]).ljust(32))

def cmd_upload_part_from_file(args, options):
    check_args(3, args)
    localfile = args[1]
    check_localfile(localfile)
    path = args[2]
    (bucket, object) = parse_bucket_object(path)
    check_bucket_object(bucket, object)
    if options.upload_id is None:
        print "upload_id invalid, please set with --upload_id=xxx"
        sys.exit(1)
    if options.part_number is None:
        print "part_number invalid, please set with --part_number=xxx"
        sys.exit(1)
    res = get_oss().upload_part(bucket, object, localfile, options.upload_id, options.part_number)
    return res

def cmd_upload_part_from_string(args, options):
    check_args(2, args)
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    check_bucket_object(bucket, object)
    if options.upload_id is None:
        print "upload_id invalid, please set with --upload_id=xxx"
        sys.exit(1)
    if options.part_number is None:
        print "part_number invalid, please set with --part_number=xxx"
        sys.exit(1)
    if options.data is None:
        print "data invalid, please set with --data=xxx"
        sys.exit(1)
    res = get_oss().upload_part_from_string(bucket, object, options.data, options.upload_id, options.part_number)
    return res

def cmd_listallobject(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    prefix = object 
    marker = ""
    total_object_num = 0
    totalsize = 0
    totaltimes = 0
    delimiter = ''
    maxkeys = '1000'
    if options.out:
        f = open(options.out, "w")
    while 1:
        res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
        if res.status != 200:
            return res
        body = res.read()
        (tmp_object_list, marker) = get_object_list_marker_from_xml(body)
        for i in tmp_object_list:
            object = i[0]
            length = i[1]
            last_modify_time = i[2]
            total_object_num += 1
            totalsize += (int)(length)
            msg = "%s%s/%s" % (OSS_PREFIX, bucket, object)
            print "%16s %6s %s/%s " % (format_datetime(last_modify_time), format_size(length), OSS_PREFIX + bucket, object)
            if options.out:
                f.write(msg)
                f.write("\n")
        totaltimes += 1
        if len(marker) == 0:
            break
    if options.out:
        f.close()
        print "the object list result is saved into %s" % options.out
    print "object list number is: %s " % total_object_num
    print "totalsize is: real:%s, format:%s " % (totalsize, format_size(totalsize))
    print "request times is: %s" % totaltimes
    return res

def get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, retry_times = 5):
    show_bar = False
    object = smart_code(object)
    tmp_object = object
    if object_prefix == object[:len(object_prefix)]:
        tmp_object = object[len(object_prefix):]
    while 1:
        if not tmp_object.startswith("/"):
            break
        tmp_object = tmp_object[1:]
    localfile = os.path.join(local_path, tmp_object) 
    localfile = smart_code(localfile)
    global GET_OK
    for i in xrange(retry_times):
        try:
            if os.path.isfile(localfile):
                t1 = format_unixtime(last_modify_time)
                t2 = (int)(os.path.getmtime(localfile))
                if not replace and (int)(length) == os.path.getsize(localfile) and t1 < t2:
                    #skip download this object these conditions match
                    print "no need to get %s/%s to %s" % (bucket, object, localfile)
                    GET_OK += 1
                    return True 
            else:
                try:
                    dirname = os.path.dirname(localfile)
                    if not os.path.isdir(dirname):
                        os.makedirs(dirname)
                except:
                    pass
            ret = False
            oss = get_oss(show_bar)
            res = oss.get_object_to_file(bucket, object, localfile)
            if 200 == res.status:
                ret = True
            if ret and (int)(length) == os.path.getsize(localfile):
                GET_OK += 1
                print "get %s/%s to %s OK" % (bucket, object, localfile)
                return ret
            else:
                print "get %s/%s to %s FAIL" % (bucket, object, localfile)
        except:
            print "get %s/%s to %s exception" % (bucket, object, localfile)
    return False

class DownloadObjectWorker(threading.Thread):
    def __init__(self, retry_times, queue):
        threading.Thread.__init__(self)
        self.queue = queue
        self.retry_times = retry_times

    def run(self):
        while 1:
            try:
                (get_object, bucket, object, object_prefix, local_path, length, last_modify_time, replace) = self.queue.get(block=False)
                get_object(bucket, object, object_prefix, local_path, length, last_modify_time, replace, self.retry_times)
                self.queue.task_done()
            except Queue.Empty:
                break
            except:
                self.queue.task_done()

def cmd_downloadallobject(args, options):
    check_args(3, args)  
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    check_bucket(bucket)
    local_path = args[2]
    if os.path.isfile(local_path):
        print "%s is not dir, please input localdir" % local_path
        return
    replace = False
    if options.replace is not None and options.replace.lower() == "true":
        replace = True
    prefix = object
    thread_num = 5
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)

    marker = ""
    delimiter = ''
    maxkeys = '1000'
    handled_obj_num = 0
    while 1:
        queue = Queue.Queue(0)
        res = get_oss().get_bucket(bucket, prefix, marker, delimiter, maxkeys)
        if res.status != 200:
            return res
        body = res.read()
        (tmp_object_list, marker) = get_object_list_marker_from_xml(body)
        for i in tmp_object_list:
            object = i[0]
            length = i[1]
            last_modify_time = i[2]
            if str(length) == "0" and object.endswith("/"):
                continue
            handled_obj_num += 1 
            queue.put((get_object, bucket, object, prefix, local_path, length, last_modify_time, replace))
        thread_pool = []
        for i in xrange(thread_num):
            current = DownloadObjectWorker(retry_times, queue)
            thread_pool.append(current)
            current.start()
        queue.join()
        for item in thread_pool:
            item.join()
        if len(marker) == 0:
            break
    print "Total being downloaded objects num: %s, they are downloaded into %s" % (handled_obj_num, local_path)
    global GET_OK
    print "OK num:%s" % GET_OK
    print "FAIL num:%s" % (handled_obj_num - GET_OK)

def put_object(bucket, object, local_file, retry_times=5):
    show_bar = False
    for i in xrange(retry_times):
        try:
            oss = get_oss(show_bar)
            object = smart_code(object)
            local_file = smart_code(local_file)
            local_file_size = os.path.getsize(local_file)
            if local_file_size > 10*1024*1024:
                upload_id = ""
                thread_num = 5
                max_part_num = 10000
                headers = {}
                res = oss.multi_upload_file(bucket, object, local_file, upload_id, thread_num, max_part_num, headers)
            else:
                res = oss.put_object_from_file(bucket, object, local_file)
            if 200 == res.status:
                global PUT_OK
                PUT_OK += 1
                print "upload %s OK" % (local_file)
                return True 
            else:
                print "upload %s FAIL, status:%s, request-id:%s" % (local_file, res.status, res.getheader("x-oss-request-id"))
        except:
            print "put %s/%s from %s exception" % (bucket, object, local_file)
    return False

class UploadObjectWorker(threading.Thread):
    def __init__(self, check_point_file, retry_times, queue):
        threading.Thread.__init__(self)
        self.check_point_file = check_point_file
        self.queue = queue
        self.file_time_map = {}
        self.retry_times = retry_times
    def run(self):
        while 1:
            try:
                (put_object, bucket, object, local_file) = self.queue.get(block=False)
                ret = put_object(bucket, object, local_file, self.retry_times)
                if ret:
                    local_file_full_path = os.path.abspath(local_file)
                    local_file_full_path = format_utf8(local_file_full_path)
                    self.file_time_map[local_file_full_path] = (int)(os.path.getmtime(local_file))
                self.queue.task_done()
            except Queue.Empty:
                break
            except:
                self.queue.task_done()
        if len(self.file_time_map) != 0:
            dump_check_point(self.check_point_file, self.file_time_map)
            
def load_check_point(check_point_file):
    file_time_map = {}
    if os.path.isfile(check_point_file):
        f = open(check_point_file)
        for line in f:
            line = line.strip()
            tmp_list = line.split('#')
            if len(tmp_list) > 1:
                time_stamp = (float)(tmp_list[0])
                time_stamp = (int)(time_stamp)
                file_name = "".join(tmp_list[1:])
                file_name = format_utf8(file_name)
                if file_time_map.has_key(file_name) and file_time_map[file_name] > time_stamp:
                    continue
                file_time_map[file_name] = time_stamp
        f.close()
    return file_time_map

lock = threading.Lock()
def dump_check_point(check_point_file, result_map=None):
    if len(check_point_file) == 0 or len(result_map) == 0:
        return
    lock.acquire()
    old_file_time_map = {}
    if os.path.isfile(check_point_file):
        old_file_time_map = load_check_point(check_point_file)
    try:
        f = open(check_point_file,"w")
        for k, v in result_map.items():
            if old_file_time_map.has_key(k) and old_file_time_map[k] < v:
                del old_file_time_map[k]
            line = "%s#%s\n" % (v, k)
            line = format_utf8(line)
            f.write(line)
        for k, v in old_file_time_map.items():
            line = "%s#%s\n" % (v, k)
            line = format_utf8(line)
            f.write(line)
    except:
        pass
    try:
        f.close()
    except:
        pass
    lock.release()

def format_object(object):
    tmp_list = object.split(os.sep)
    object = "/".join(x for x in tmp_list if x.strip() and x != "/")
    while 1:
        if object.find('//') == -1:
            break
        object = object.replace('//', '/')
    return  object
    
def cmd_upload_object_from_localdir(args, options):
    check_args(3, args)
    path = args[2]
    (bucket, object) = parse_bucket_object(path)
    check_bucket(bucket)
    local_path = args[1]
    if not os.path.isdir(local_path):
        print "%s is not dir, please input localdir" % local_path
        return
        
    is_check_point = False
    check_point_file = ""
    file_time_map = {}
    if options.check_point:
        is_check_point = True
        check_point_file = options.check_point
        file_time_map = load_check_point(check_point_file)

    prefix = object
    thread_num = 5
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)

    topdown = True
    queue = Queue.Queue(0)
    local_path = smart_code(local_path)
    def process_localfile(items):
        for item in items:
            local_file = os.path.join(root, item) 
            local_file = smart_code(local_file)
            if os.path.isfile(local_file):
                local_file_full_path = os.path.abspath(local_file)
                local_file_full_path = format_utf8(local_file_full_path)
                if is_check_point and file_time_map.has_key(local_file_full_path):
                    t1 = (int)(os.path.getmtime(local_file))
                    t2 = file_time_map[local_file_full_path]
                    if t1 <= t2:
                        continue
                if prefix != "":
                    object = prefix + "/" + local_file[len(local_path) + 1:]
                else:
                    object = local_file[len(local_path) + 1:]
                object = format_object(object)
                queue.put((put_object, bucket, object, local_file))

    for root, dirs, files in os.walk(local_path, topdown):
        process_localfile(files)
        process_localfile(dirs)
                 
    qsize = queue.qsize()
    thread_pool = []
    for i in xrange(thread_num):
        current = UploadObjectWorker(check_point_file, retry_times, queue)
        thread_pool.append(current)
        current.start()
    queue.join()
    for item in thread_pool:
        item.join()

    print "Total being uploaded localfiles num: %s" % qsize
    global PUT_OK
    print "OK num:%s" % PUT_OK
    print "FAIL num:%s" % (qsize - PUT_OK)

def get_object_list_marker_from_xml(body):
    #return ([(object, object_length, last_modify_time)...], marker)
    object_meta_list = []
    next_marker = ""
    hh = GetBucketXml(body)
    (fl, pl) = hh.list()
    if len(fl) != 0:
        for i in fl:
            if isinstance(i[0], unicode):
                object = i[0].encode('utf-8')
            else:
                object = i[0]
            last_modify_time = i[1]
            length = i[3]
            object_meta_list.append((object, length, last_modify_time))
    if hh.is_truncated:
        next_marker = hh.nextmarker
    return (object_meta_list, next_marker)

def cmd_deleteallobject(args, options):
    if len(args) == 1:
        return cmd_getallbucket(args, options)
    path = args[1]
    (bucket, object) = parse_bucket_object(path)
    if len(bucket) == 0:
        return cmd_getallbucket(args, options)
    prefix = object
    marker = ''
    delimiter = ''
    maxkeys = '1000'
    if options.marker:
        marker = options.marker
    if options.delimiter:
        delimiter = options.delimiter
    if options.maxkeys:
        maxkeys = options.maxkeys
    debug = True
    delete_all_objects(get_oss(), bucket, prefix, delimiter, marker, maxkeys, debug)

def cmd_getallbucket(args, options):
    res = get_oss().get_service()
    width = 20
    if (res.status / 100) == 2:
        body = res.read()
        h = GetServiceXml(body)
        is_init = False
        for i in h.list():
            if len(i) >= 3 and i[2].strip():
                if not is_init:
                    print "%s %s %s" % ("CreateTime".ljust(width), "BucketLocation".ljust(width), "BucketName".ljust(width))
                    is_init = True
                print "%s %s %s" % (str(format_datetime(i[1])).ljust(width), i[2].ljust(width), i[0])
            elif len(i) >= 2:
                if not is_init:
                    print "%s %s" % ("CreateTime".ljust(width), "BucketName".ljust(width))
                    is_init = True
                print "%s %s" % (str(format_datetime(i[1])).ljust(width), i[0])
        print "\nBucket Number is: ", len(h.list())
    return res

def cmd_createbucket(args, options):
    check_args(2, args)
    if options.acl is not None and options.acl not in ACL_LIST:
        print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
        sys.exit(1)
    acl = ''
    if options.acl:
        acl = options.acl
    bucket = parse_bucket(args[1])
    if options.location is not None:
        location = options.location
        return get_oss().put_bucket_with_location(bucket, acl, location)
    return get_oss().put_bucket(bucket, acl)

def cmd_getbucketlocation(args, options):
    check_args(2, args)
    bucket = parse_bucket(args[1])
    res = get_oss().get_bucket_location(bucket)
    if res.status / 100 == 2:
        body = res.read()
        h = GetBucketLocationXml(body)
        print h.location
    return res

def cmd_deletebucket(args, options):
    check_args(2, args)
    bucket = parse_bucket(args[1])
    return get_oss().delete_bucket(bucket)

def cmd_deletewholebucket(args, options):
    check_args(2, args)
    bucket = parse_bucket(args[1])
    debug = True
    delete_marker = ""
    delete_upload_id_marker = ""
    if options.marker:
        delete_marker = options.marker
    if options.upload_id:
        delete_upload_id_marker = options.upload_id
    return clear_all_objects_in_bucket(get_oss(), bucket, delete_marker, delete_upload_id_marker, debug)

def delete_object(bucket, object, retry_times=5):
    object = smart_code(object)
    global DELETE_OK
    ret = False
    for i in xrange(retry_times):
        try:
            oss = get_oss()
            res = oss.delete_object(bucket, object)
            if 2 == res.status / 100:
                ret = True
            if ret:
                DELETE_OK += 1
                print "delete %s/%s OK" % (bucket, object)
                return ret
            else:
                print "delete %s/%s FAIL, status:%s, request-id:%s" % (bucket, object, res.status, res.getheader("x-oss-request-id"))
        except:
            print "delete %s/%s exception" % (bucket, object)
    return False

class DeleteObjectWorker(threading.Thread):
    def __init__(self, retry_times, queue):
        threading.Thread.__init__(self)
        self.queue = queue
        self.retry_times = retry_times

    def run(self):
        while 1:
            try:
                (delete_object, bucket, object) = self.queue.get(block=False)
                delete_object(bucket, object, self.retry_times)
                self.queue.task_done()
            except Queue.Empty:
                break
            except:
                self.queue.task_done()

def cmd_deletebyfile(args, options):
    check_args(2, args)
    localfile = args[1]
    check_localfile(localfile)
    queue = Queue.Queue(0)
    f = open(localfile)
    for line in f:
        line = line.strip()
        (bucket, object) = parse_bucket_object(line)
        if len(bucket) != 0 and len(object) != 0:
            queue.put((delete_object, bucket, object))
    f.close()
    thread_num = 5
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)
    thread_pool = []
    for i in xrange(thread_num):
        current = DeleteObjectWorker(retry_times, queue)
        thread_pool.append(current)
        current.start()
    queue.join()
    for item in thread_pool:
        item.join()

def cmd_setacl(args, options):
    check_args(2, args)
    if options.acl is None or options.acl not in ACL_LIST:
        print "acl invalid, SHOULD be one of %s" % (ACL_LIST)
        sys.exit(1)
    bucket = parse_bucket(args[1])
    return get_oss().put_bucket(bucket, options.acl)

def cmd_getacl(args, options):
    check_args(2, args)
    bucket = parse_bucket(args[1])
    res = get_oss().get_bucket_acl(bucket)
    if (res.status / 100) == 2:
        body = res.read()
        h = GetBucketAclXml(body)
        print h.grant
    return res

def to_http_headers(string):
    headers_map = {}
    for i in string.split(','):
        key_value_list = i.strip().split(':')
        if len(key_value_list) == 2:
            headers_map[key_value_list[0]] = key_value_list[1] 
    return headers_map
        
def cmd_mkdir(args, options):
    check_args(2, args)
    if not args[1].endswith('/'):
        args[1] += '/'
    (bucket, object) = parse_bucket_object(args[1])
    res = get_oss().put_object_from_string(bucket, object, "")
    return res

def handler(signum, frame):
    print 'Signal handler called with signal', signum
    raise Exception("timeout")
try:
    signal.signal(signal.SIGALRM, handler)
except:
    pass

def cmd_put(args, options):
    check_args(3, args)
    localfile = args[1]
    check_localfile(localfile)
    if os.path.getsize(localfile) > MAX_OBJECT_SIZE:
        print "locafile:%s is bigger than %s, it is not support by put, please use multiupload instead." % (localfile, MAX_OBJECT_SIZE) 
        return
    #user specified objectname oss://bucket/[path]/object
    (bucket, object) = parse_bucket_object(args[2])
    if len(object) == 0:
        # e.g. upload to oss://bucket/
        object = os.path.basename(localfile)
    elif object.endswith("/"):
        #e.g. uplod to oss://bucket/a/b/
        object += os.path.basename(localfile)
    content_type = ""
    headers = {}
    if options.content_type:
        content_type = options.content_type
    if options.headers:
        headers = to_http_headers(options.headers)
    timeout = 0
    if options.timeout:
        timeout = (int)(options.timeout)
        print "timeout", timeout

    try:
        signal.alarm(timeout)
    except:
        pass
    res = get_oss().put_object_from_file(bucket, object, localfile, content_type, headers)
    try:
        signal.alarm(0) # Disable the signal
    except:
        pass

    if res.status == 200:
        second_level_domain = get_second_level_domain(OSS_HOST)
        if check_bucket_valid(bucket) and not is_ip(second_level_domain):
            print "Object URL is: http://%s.%s/%s" % (bucket, second_level_domain, object)
        else:
            
            print "Object URL is: http://%s/%s/%s" % (second_level_domain, bucket, object)
        print "Object abstract path is: oss://%s/%s" % (bucket, object)
        header_map = convert_header2map(res.getheaders())
        print "ETag is %s " % safe_get_element("etag", header_map) 
    return res

def cmd_upload(args, options):
    check_args(3, args)
    localfile = args[1]
    check_localfile(localfile)
    #user specified objectname oss://bucket/[path]/object
    (bucket, object) = parse_bucket_object(args[2])
    if len(object) == 0:
        # e.g. upload to oss://bucket/
        object = os.path.basename(localfile)
    elif object.endswith("/"):
        #e.g. uplod to oss://bucket/a/b/
        object += os.path.basename(localfile)
    headers = {}
    content_type = ''
    if options.headers:
        headers = to_http_headers(options.headers)
    if options.content_type:
        content_type = options.content_type
        headers['Content-Type'] = content_type
    thread_num = 10
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    max_part_num = 1000
    if options.max_part_num:
        max_part_num = (int)(options.max_part_num)
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)
    oss = get_oss()
    oss.set_retry_times(retry_times)
    res = oss.upload_large_file(bucket, object, localfile, thread_num, max_part_num, headers)

    if res.status == 200:
        print "Object URL is: http://%s/%s/%s" % (OSS_HOST, bucket, object)
        print "Object abstract path is: oss://%s/%s" % (bucket, object)
        header_map = convert_header2map(res.getheaders())
        print "ETag is %s " % safe_get_element("etag", header_map) 

    return res

def cmd_multi_upload(args, options):
    check_args(3, args)
    localfile = args[1]
    check_localfile(localfile)
    #user specified objectname oss://bucket/[path]/object
    (bucket, object) = parse_bucket_object(args[2])
    if len(object) == 0:
        # e.g. upload to oss://bucket/
        object = os.path.basename(localfile)
    elif object.endswith("/"):
        #e.g. uplod to oss://bucket/a/b/
        object += os.path.basename(localfile)
    headers = {}
    if options.headers:
        headers = to_http_headers(options.headers)
    thread_num = 10 
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    max_part_num = 1000
    if options.max_part_num:
        max_part_num = options.max_part_num
    upload_id = ""
    if options.upload_id:
        upload_id = options.upload_id
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)
    oss = get_oss()
    oss.set_retry_times(retry_times)

    res = oss.multi_upload_file(bucket, object, localfile, upload_id, thread_num, max_part_num, headers)
    if res.status == 200:
        print "Object URL is: http://%s/%s/%s" % (OSS_HOST, bucket, object)
        print "Object abstract path is: oss://%s/%s" % (bucket, object)
        header_map = convert_header2map(res.getheaders())
        print "ETag is %s " % safe_get_element("etag", header_map) 
    return res

def cmd_copy(args, options):
    check_args(3, args)
    (bucket_source, object_source) = parse_bucket_object(args[1])
    check_bucket_object(bucket_source, object_source)
    (bucket, object) = parse_bucket_object(args[2])
    check_bucket_object(bucket, object)

    content_type = ""
    headers = {}
    if options.headers:
        headers = to_http_headers(options.headers)
    if options.content_type:
        content_type = options.content_type
        headers['Content-Type'] = content_type
    res = get_oss().copy_object(bucket_source, object_source, bucket, object, headers)
    if res.status == 200:
        print "Object URL is: http://%s/%s/%s" % (OSS_HOST, bucket, object)
        print "Object abstract path is: oss://%s/%s" % (bucket, object)
        header_map = convert_header2map(res.getheaders())
        print "ETag is %s " % safe_get_element("etag", header_map) 
    return res

def cmd_get(args, options):
    check_args(3, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    localfile = args[2]
    localfile = smart_code(localfile)
    res = get_oss().get_object_to_file(bucket, object, localfile)
    if res.status == 200:
        print "The object %s is downloaded to %s, please check." % (object, localfile)
    return res

def multi_get(bucket, object, localfile, thread_num, retry_times):
    length = 0
    try:
        res = get_oss().head_object(bucket, object)
        if 200 == res.status:
            length = (int)(res.getheader('content-length'))
        else:
            print "can not get the length of object:", object
            return False
        ranges = []
        ranges.append(0)
        size = length // thread_num 
        for i in xrange(thread_num - 1):
            ranges.append((i + 1) * size)
        ranges.append(length)

        threadpool = []
        show_bar = False
        for i in xrange(len(ranges) - 1):
            exec("file_%s = open(localfile, 'wb+')" % i)
            oss = get_oss(show_bar)
            exec("current = MultiGetWorker(oss, bucket, object, file_%s, ranges[i], ranges[i + 1] - 1, %s)" % (i, retry_times))
            threadpool.append(current)
            current.start()

        for item in threadpool:
            item.join()
    except Exception:
        return False
    if not os.path.isfile(localfile) or length != os.path.getsize(localfile):
        print "localfile:%s size:%s is not equal with object:%s size:%s " % (localfile, os.path.getsize(localfile), object, length)
        return False
    return True

def cmd_multi_get(args, options):
    check_args(3, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    localfile = args[2]
    localfile = smart_code(localfile)
    thread_num = 5
    if options.thread_num:
        thread_num = (int)(options.thread_num)
    retry_times = 5
    if options.retry_times:
        retry_times = (int)(options.retry_times)

    ret = multi_get(bucket, object, localfile, thread_num, retry_times)
    if ret:
        print "The object %s is downloaded to %s, please check." % (object, localfile)
    else:
        print "Download object:%s failed!" % (object)

def cmd_cat(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    res = get_oss().get_object(bucket, object)
    if res.status == 200:
        data = ""
        while 1:
            data = res.read(10240)
            if len(data) != 0:
                print data
            else:
                break
    return res

def cmd_meta(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    res = get_oss().head_object(bucket, object)
    if res.status == 200:
        header_map = convert_header2map(res.getheaders())
        width = 16
        print "%s: %s" % ("objectname".ljust(width), object)
        for key, value in header_map.items():
            print "%s: %s" % (key.ljust(width), value)
    return res

def cmd_info(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    res = get_oss().get_object_info(bucket, object)
    if res.status == 200:
        print res.read()
    return res

def cmd_delete(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    return get_oss().delete_object(bucket, object)

def cmd_cancel(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    if options.upload_id is None:
        print "upload_id invalid, please set with --upload_id=xxx"
        sys.exit(1)
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    return get_oss().cancel_upload(bucket, object, options.upload_id)

def cmd_sign_url(args, options):
    check_args(2, args)
    (bucket, object) = parse_bucket_object(args[1]) 
    if options.timeout:
        timeout = options.timeout
    else:
        timeout = "600"
    print "timeout is %s seconds." % timeout
    (bucket, object) = parse_bucket_object(args[1]) 
    check_bucket_object(bucket, object)
    method = 'GET'
    print get_oss().sign_url(method, bucket, object, int(timeout))

def cmd_configure(args, options):
    if options.accessid is None or options.accesskey is None:
        print "%s miss parameters, use --id=[accessid] --key=[accesskey] to specify id/key pair" % args[0]
        sys.exit(1) 
    config = ConfigParser.RawConfigParser()
    config.add_section(CONFIGSECTION)
    if options.host is not None:
        config.set(CONFIGSECTION, 'host', options.host)
    config.set(CONFIGSECTION, 'accessid', options.accessid)
    config.set(CONFIGSECTION, 'accesskey', options.accesskey)
    cfgfile = open(CONFIGFILE, 'w+')
    config.write(cfgfile)
    print "Your configuration is saved into %s ." % CONFIGFILE
    cfgfile.close()

def cmd_help(args, options):
    print HELP 

def get_oss(show_bar = True):
    oss = OssAPI(OSS_HOST, ID, KEY)
    oss.show_bar = show_bar 
    oss.set_send_buf_size(SEND_BUF_SIZE)
    oss.set_recv_buf_size(RECV_BUF_SIZE)
    oss.set_debug(IS_DEBUG)
    return oss

def setup_crenditials():
    config = ConfigParser.ConfigParser()
    try:
        config.read(CONFIGFILE)
        global OSS_HOST
        global ID
        global KEY
        try:
            OSS_HOST = config.get(CONFIGSECTION, 'host')
        except Exception:
            OSS_HOST = DEFAUL_HOST 
        ID = config.get(CONFIGSECTION, 'accessid')
        KEY = config.get(CONFIGSECTION, 'accesskey')
        if options.accessid is not None:
            ID = options.accessid
        if options.accesskey is not None:
            KEY = options.accesskey
        if options.host is not None:
            OSS_HOST = options.host
    except Exception:
        if options.accessid is not None:
            ID = options.accessid
        if options.accesskey is not None:
            KEY = options.accesskey
        if options.host is not None:
            OSS_HOST = options.host

        if len(ID) == 0 or len(KEY) == 0:
            print "can't get accessid/accesskey, setup use : config --id=accessid --key=accesskey"
            sys.exit(1)

def setup_cmdlist():
    CMD_LIST['getallbucket'] = cmd_getallbucket
    CMD_LIST['gs'] = cmd_getallbucket
    CMD_LIST['createbucket'] = cmd_createbucket
    CMD_LIST['cb'] = cmd_createbucket
    CMD_LIST['mb'] = cmd_createbucket
    CMD_LIST['pb'] = cmd_createbucket
    CMD_LIST['deletebucket'] = cmd_deletebucket
    CMD_LIST['deletewholebucket'] = cmd_deletewholebucket
    CMD_LIST['deletebyfile'] = cmd_deletebyfile
    CMD_LIST['db'] = cmd_deletebucket
    CMD_LIST['getbucketlocation'] = cmd_getbucketlocation
    CMD_LIST['gl'] = cmd_getbucketlocation

    CMD_LIST['getacl'] = cmd_getacl
    CMD_LIST['setacl'] = cmd_setacl

    CMD_LIST['ls'] = cmd_listing
    CMD_LIST['list'] = cmd_listing
    CMD_LIST['mkdir'] = cmd_mkdir
    CMD_LIST['init'] = cmd_init_upload
    CMD_LIST['uploadpartfromstring'] = cmd_upload_part_from_string
    CMD_LIST['upfs'] = cmd_upload_part_from_string
    CMD_LIST['upff'] = cmd_upload_part_from_file
    CMD_LIST['uploadpartfromfile'] = cmd_upload_part_from_file
    CMD_LIST['listpart'] = cmd_listpart
    CMD_LIST['listparts'] = cmd_listparts
    CMD_LIST['getallpartsize'] = cmd_getallpartsize
    CMD_LIST['listallobject'] = cmd_listallobject
    CMD_LIST['downloadallobject'] = cmd_downloadallobject
    CMD_LIST['downloadtodir'] = cmd_downloadallobject
    CMD_LIST['uploadfromdir'] = cmd_upload_object_from_localdir
    CMD_LIST['deleteallobject'] = cmd_deleteallobject
    CMD_LIST['put'] = cmd_put
    CMD_LIST['copy'] = cmd_copy
    CMD_LIST['upload'] = cmd_upload
    CMD_LIST['multiupload'] = cmd_multi_upload
    CMD_LIST['multi_upload'] = cmd_multi_upload
    CMD_LIST['mp'] = cmd_multi_upload
    CMD_LIST['get'] = cmd_get
    CMD_LIST['multiget'] = cmd_multi_get
    CMD_LIST['multi_get'] = cmd_multi_get
    CMD_LIST['cat'] = cmd_cat
    CMD_LIST['meta'] = cmd_meta
    CMD_LIST['info'] = cmd_info
    CMD_LIST['rm'] = cmd_delete
    CMD_LIST['delete'] = cmd_delete
    CMD_LIST['del'] = cmd_delete
    CMD_LIST['cancel'] = cmd_cancel

    CMD_LIST['signurl'] = cmd_sign_url
    CMD_LIST['config'] = cmd_configure
    CMD_LIST['help'] = cmd_help
    CMD_LIST['sign'] = cmd_sign_url

if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option("-H", "--host", dest="host", help="specify ")
    parser.add_option("-i", "--id", dest="accessid", help="specify access id")
    parser.add_option("-k", "--key", dest="accesskey", help="specify access key")
    parser.add_option("-t", "--timeout", dest="timeout", help="timeout for sign url")
    parser.add_option("-a", "--acl", dest="acl", help="when createbucket/setacl use this option to specify acl")
    parser.add_option("", "--upload_id", dest="upload_id", help="get parts to specify upload_id")
    parser.add_option("", "--part_number", dest="part_number", help="get parts to specify upload_id")
    parser.add_option("", "--data", dest="data", help="get parts to specify upload_id")
    parser.add_option("--headers", dest="headers", help="HTTP headers for put object, input format SHOULE like --headers=\"key1:value1,key2:value2\"")
    parser.add_option("-c", "--content-type", dest="content_type",
                  help="content type for the file, will return in GET")
    parser.add_option("", "--thread_num", dest="thread_num", help="object group upload thread num")
    parser.add_option("", "--max_part_num", dest="max_part_num", help="object group max part num")
    parser.add_option("", "--retry_times", dest="retry_times", help="max retry times when fail")
    parser.add_option("", "--out", dest="out", help="output the result when listallobject")
    parser.add_option("", "--config_file", dest="config_file", help="the file which stores id-key pair")
    parser.add_option("", "--replace", dest="replace", help="replace the localfile if it is true")
    parser.add_option("", "--check_point", dest="check_point", help="the file which records the localfile with name and time")
    parser.add_option("", "--marker", dest="marker", help="get bucket(list objects) parameter")
    parser.add_option("", "--delimiter", dest="delimiter", help="get bucket(list objects) parameter")
    parser.add_option("", "--maxkeys", dest="maxkeys", help="get bucket(list objects) parameter")
    parser.add_option("", "--location", dest="location", help="bucket location")
    parser.add_option("", "--send_buf_size", dest="send_buf_size", help="buffer size that client sends each time")
    parser.add_option("", "--recv_buf_size", dest="recv_buf_size", help="buffer size that client receives each time")
    parser.add_option("", "--debug", dest="debug", help="set log")

    setup_cmdlist()
    #when not specify a option, the value is None
    (options, args) = parser.parse_args()
    if options.config_file is not None:
        CONFIGFILE = options.config_file
    if options.debug is not None:
        debug = options.debug 
        if debug.lower() == "true":
            IS_DEBUG = True
        else:
            IS_DEBUG = False
    if options.send_buf_size is not None:
        try:
            SEND_BUF_SIZE = (int)(options.send_buf_size)
        except ValueError:
            pass
    if options.recv_buf_size is not None:
        try:
            RECV_BUF_SIZE = (int)(options.recv_buf_size)
        except ValueError:
            pass
        
    if len(args) < 1:
        print HELP 
        sys.exit(1) 

    if args[0] != 'config':
        setup_crenditials()
    else:
        CMD_LIST['config'](args, options)
        sys.exit(1)

    if args[0] not in CMD_LIST.keys():
        print "unsupported command : %s " % args[0]
        print "use --help for more information"
        sys.exit(1) 

    cmd = args[0]
    begin = time.time()
    res = CMD_LIST[cmd](args, options)
    print_result(cmd, res)
    end = time.time()
    sys.stderr.write("%.3f(s) elapsed\n" % (end - begin))