From e5b599bbcd5afb39b0bafad6422d3e46029153a7 Mon Sep 17 00:00:00 2001 From: riki Date: Thu, 3 Oct 2019 11:35:37 +0700 Subject: [PATCH 1/4] Support automatic UpSampling op --- README.md | 1 - json2prototxt.py | 12 ++++++++++++ prototxt_basic.py | 6 +++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ed81f8e..8bf49f5 100644 --- a/README.md +++ b/README.md @@ -33,7 +33,6 @@ The convertor Is not fully automatically, The convertor not -+ if you wanna convert upsampling operator , the convertor will convert Upsampling operator to Deconvolution in Caffe , The Deconvolution channels need to be set (in prototxt_basic.py names_output). + If you use Flatten Layer ,You need to manually to connect them becasuse the converted compute graph will be divided into two parts. + If convert a detection model. You need to remove the anchor process and put it into post process. + Usually,If you find that conversion errors, please set the prefix name of you backbone network in mxnet2caffe.py. \ No newline at end of file diff --git a/json2prototxt.py b/json2prototxt.py index c10ecbd..f7e2fb0 100644 --- a/json2prototxt.py +++ b/json2prototxt.py @@ -35,6 +35,18 @@ if not str(input_i['name']).startswith(str(node_i['name'])): print(' use shared weight -> %s'% str(input_i['name'])) info['share'] = True + + if str(node_i['op']) == 'UpSampling': + found = False + while not found: + inputs = node_i['inputs'] + for j in range(len(inputs)): + if 'attrs' in jdata['nodes'][inputs[j][0]]: + if 'num_filter' in jdata['nodes'][inputs[j][0]]['attrs']: + info["num_output"] = int(jdata['nodes'][inputs[j][0]]['attrs']['num_filter']) + info["group"] = info["num_output"] + found = True + node_i = jdata['nodes'][inputs[0][0]] write_node(prototxt_file, info) diff --git a/prototxt_basic.py b/prototxt_basic.py index 4ed4fdc..4c349fb 100644 --- a/prototxt_basic.py +++ b/prototxt_basic.py @@ -5,7 +5,7 @@ attrstr = "attrs" #attrstr = "param" -names_output = {"rf_c2_upsampling":256 ,"rf_c3_upsampling":256} +# names_output = {"rf_c2_upsampling":256 ,"rf_c3_upsampling":256} #names_output = {"ssh_m2_red_up":32,"ssh_c3_up":32 } def data(txt_file, info): @@ -154,13 +154,13 @@ def Upsampling(txt_file, info): print(info[attrstr]) print(info) txt_file.write(' convolution_param {\n') - txt_file.write(' num_output: %s\n' % names_output[info["name"]]) + txt_file.write(' num_output: %s\n' % info["num_output"]) #txt_file.write(' num_output: %s\n' % info[attrstr]['num_filter']) txt_file.write(' kernel_size: %d\n' % (2 * scale - scale % 2)) # TODO txt_file.write(' stride: %d\n' % scale) txt_file.write(' pad: %d\n' % math.ceil((scale - 1)/2.0)) # TODO #txt_file.write(' group: %s\n' % info[attrstr]['num_filter']) - txt_file.write(' group: %s\n' % names_output[info["name"]]) + txt_file.write(' group: %s\n' % info["group"]) txt_file.write(' bias_term: false\n') txt_file.write(' weight_filler: {\n') From 32735b986d98e903e8b8be3616f9a86c03679985 Mon Sep 17 00:00:00 2001 From: riki Date: Thu, 3 Oct 2019 11:55:36 +0700 Subject: [PATCH 2/4] Support conversion with one command --- json2prototxt.py | 67 ++++++++++++++++++++++-------------------------- mxnet2caffe.py | 9 ++++--- 2 files changed, 37 insertions(+), 39 deletions(-) diff --git a/json2prototxt.py b/json2prototxt.py index c10ecbd..2021c63 100644 --- a/json2prototxt.py +++ b/json2prototxt.py @@ -1,42 +1,37 @@ -import sys -import argparse import json from prototxt_basic import * -parser = argparse.ArgumentParser(description='Convert MXNet jason to Caffe prototxt') -parser.add_argument('--mx-json', type=str, default='R50v2/R50v2-symbol.json') -parser.add_argument('--cf-prototxt', type=str, default='R50v2/R50v2.prototxt') -args = parser.parse_args() -with open(args.mx_json) as json_file: - jdata = json.load(json_file) - print(jdata) - -with open(args.cf_prototxt, "w") as prototxt_file: - for i_node in range(0,len(jdata['nodes'])): - node_i = jdata['nodes'][i_node] - if str(node_i['op']) == 'null' and str(node_i['name']) != 'data': - continue - - print('{}, \top:{}, name:{} -> {}'.format(i_node,node_i['op'].ljust(20), - node_i['name'].ljust(30), - node_i['name']).ljust(20)) - info = node_i - - info['top'] = info['name'] - info['bottom'] = [] - info['params'] = [] - for input_idx_i in node_i['inputs']: - input_i = jdata['nodes'][input_idx_i[0]] - if str(input_i['op']) != 'null' or (str(input_i['name']) == 'data'): - info['bottom'].append(str(input_i['name'])) - if str(input_i['op']) == 'null': - info['params'].append(str(input_i['name'])) - if not str(input_i['name']).startswith(str(node_i['name'])): - print(' use shared weight -> %s'% str(input_i['name'])) - info['share'] = True +def write_prototxt(json_path, prototx_path): + with open(json_path) as json_file: + jdata = json.load(json_file) + print(jdata) + + with open(prototx_path, "w") as prototxt_file: + for i_node in range(0,len(jdata['nodes'])): + node_i = jdata['nodes'][i_node] + if str(node_i['op']) == 'null' and str(node_i['name']) != 'data': + continue - write_node(prototxt_file, info) - -print("*** JSON to PROTOTXT FINISH ***") + print('{}, \top:{}, name:{} -> {}'.format(i_node,node_i['op'].ljust(20), + node_i['name'].ljust(30), + node_i['name']).ljust(20)) + info = node_i + + info['top'] = info['name'] + info['bottom'] = [] + info['params'] = [] + for input_idx_i in node_i['inputs']: + input_i = jdata['nodes'][input_idx_i[0]] + if str(input_i['op']) != 'null' or (str(input_i['name']) == 'data'): + info['bottom'].append(str(input_i['name'])) + if str(input_i['op']) == 'null': + info['params'].append(str(input_i['name'])) + if not str(input_i['name']).startswith(str(node_i['name'])): + print(' use shared weight -> %s'% str(input_i['name'])) + info['share'] = True + + write_node(prototxt_file, info) + + print("*** JSON to PROTOTXT FINISH ***") diff --git a/mxnet2caffe.py b/mxnet2caffe.py index 1a66c1e..18cb43c 100644 --- a/mxnet2caffe.py +++ b/mxnet2caffe.py @@ -1,4 +1,4 @@ -import sys, argparse +import argparse import mxnet as mx import sys import os @@ -11,10 +11,9 @@ sys.path.append(os.path.join(curr_path, "/Users/yujinke/me/caffe/python")) import caffe - +from json2prototxt import write_prototxt import time -import os os.environ["CUDA_VISIBLE_DEVICES"] = '4' parser = argparse.ArgumentParser(description='Convert MXNet model to Caffe model') parser.add_argument('--mx-model', type=str, default='model_mxnet/face/facega2') @@ -23,6 +22,10 @@ parser.add_argument('--cf-model', type=str, default='model_caffe/face/facega2.caffemodel') args = parser.parse_args() +# ------------------------------------------ +# Create prototxt +write_prototxt(args.mx_model + '-symbol.json', args.cf_prototxt) + # ------------------------------------------ # Load _, arg_params, aux_params = mx.model.load_checkpoint(args.mx_model, args.mx_epoch) From 9a63cfbb981ca35b3d6b1787ed9ef77af5a68a10 Mon Sep 17 00:00:00 2001 From: riki Date: Thu, 3 Oct 2019 12:42:04 +0700 Subject: [PATCH 3/4] Support automatic backbone detection --- find.py | 23 +++++++++++++++++++++++ mxnet2caffe.py | 5 +++-- 2 files changed, 26 insertions(+), 2 deletions(-) create mode 100644 find.py diff --git a/find.py b/find.py new file mode 100644 index 0000000..1294563 --- /dev/null +++ b/find.py @@ -0,0 +1,23 @@ +from difflib import SequenceMatcher +import json +import collections + + +def find_backbone(json_path): + with open(json_path) as json_file: + jdata = json.load(json_file) + + matches = [] + for i_node in range(0, len(jdata['nodes']) - 1): + node_i1 = jdata['nodes'][i_node] + node_i2 = jdata['nodes'][i_node+1] + name1 = (node_i1['name']) + name2 = (node_i2['name']) + + match = SequenceMatcher(None, name1, name2).find_longest_match(0, name1.find('_'), 0, name2.find('_')) + matches.append(name1[match.a: match.a + match.size]) + + counter = collections.Counter(matches) + final_match = counter.most_common()[0][0] + + return final_match diff --git a/mxnet2caffe.py b/mxnet2caffe.py index 1a66c1e..1710b3f 100644 --- a/mxnet2caffe.py +++ b/mxnet2caffe.py @@ -11,7 +11,7 @@ sys.path.append(os.path.join(curr_path, "/Users/yujinke/me/caffe/python")) import caffe - +from find import * import time import os @@ -42,7 +42,8 @@ print('----------------------------------\n') print('VALID KEYS:') -backbone = "hstage1" +# backbone = "hstage1" +backbone = find_backbone(args.mx_model + '-symbol.json') for i_key,key_i in enumerate(all_keys): From a85532fa303fd0e64eaee85bee66cf817936414d Mon Sep 17 00:00:00 2001 From: riki Date: Thu, 3 Oct 2019 13:34:45 +0700 Subject: [PATCH 4/4] Support custom input shape --- json2prototxt.py | 7 +++++++ prototxt_basic.py | 5 ++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/json2prototxt.py b/json2prototxt.py index c10ecbd..cb4a14c 100644 --- a/json2prototxt.py +++ b/json2prototxt.py @@ -6,6 +6,7 @@ parser = argparse.ArgumentParser(description='Convert MXNet jason to Caffe prototxt') parser.add_argument('--mx-json', type=str, default='R50v2/R50v2-symbol.json') parser.add_argument('--cf-prototxt', type=str, default='R50v2/R50v2.prototxt') +parser.add_argument('--input_shape', type=str, default='1,3,640,640') args = parser.parse_args() with open(args.mx_json) as json_file: @@ -35,6 +36,12 @@ if not str(input_i['name']).startswith(str(node_i['name'])): print(' use shared weight -> %s'% str(input_i['name'])) info['share'] = True + + if str(node_i['op']) == 'data': + for char in ['[', ']', '(', ')']: + input_shape = args.input_shape.replace(char, '') + input_shape = [int(item) for item in input_shape.split(',')] + info["shape"] = input_shape write_node(prototxt_file, info) diff --git a/prototxt_basic.py b/prototxt_basic.py index 4ed4fdc..40a76cd 100644 --- a/prototxt_basic.py +++ b/prototxt_basic.py @@ -15,7 +15,10 @@ def data(txt_file, info): txt_file.write(' type: "Input"\n') txt_file.write(' top: "data"\n') txt_file.write(' input_param {\n') - txt_file.write(' shape: { dim: 1 dim: 3 dim: 512 dim: 1224 }\n') # TODO + txt_file.write(' shape: {{ dim: {} dim: {} dim: {} dim: {} }}\n'.format(info['shape'][0], + info['shape'][1], + info['shape'][2], + info['shape'][3])) txt_file.write(' }\n') txt_file.write('}\n') txt_file.write('\n')