|
18 | 18 | along with this program. If not, see <http://www.gnu.org/licenses/>. |
19 | 19 |
|
20 | 20 | Authors: |
21 | | - lindaxiang |
| 21 | + Linda Xiang (linda.xiang@oicr.on.ca) |
22 | 22 | """ |
23 | 23 |
|
24 | 24 | import os |
25 | | -import sys |
26 | 25 | import argparse |
27 | | -import subprocess |
| 26 | +import json |
| 27 | +import uuid |
| 28 | +import hashlib |
| 29 | +import copy |
28 | 30 |
|
| 31 | +variant_type_to_data_type_etc = { |
| 32 | + 'snv': ['Simple Nucleotide Variation', 'Raw SNV Calls', ['CaVEMan', 'bcftools'], ['GATK-Mutect2', 'bcftools']], # dataCategory, dataType, analysis_tools |
| 33 | + 'indel': ['Simple Nucleotide Variation', 'Raw InDel Calls', ['Pindel', 'bcftools'], ['GATK-Mutect2', 'bcftools']] |
| 34 | +} |
| 35 | + |
| 36 | +def calculate_size(file_path): |
| 37 | + return os.stat(file_path).st_size |
| 38 | + |
| 39 | + |
| 40 | +def calculate_md5(file_path): |
| 41 | + md5 = hashlib.md5() |
| 42 | + with open(file_path, 'rb') as f: |
| 43 | + for chunk in iter(lambda: f.read(1024 * 1024), b''): |
| 44 | + md5.update(chunk) |
| 45 | + return md5.hexdigest() |
| 46 | + |
| 47 | + |
| 48 | +def get_files_info(file_to_upload): |
| 49 | + basename = os.path.basename(file_to_upload) |
| 50 | + input_wf = basename.split(".")[5] |
| 51 | + variant_type = basename.split(".")[8] |
| 52 | + file_info = { |
| 53 | + 'fileName': basename, |
| 54 | + 'fileType': 'VCF' if basename.endswith('.vcf.gz') else basename.split(".")[-1].upper(), |
| 55 | + 'fileSize': calculate_size(file_to_upload), |
| 56 | + 'fileMd5sum': calculate_md5(file_to_upload), |
| 57 | + 'fileAccess': 'open', |
| 58 | + 'info': { |
| 59 | + 'data_category': variant_type_to_data_type_etc[variant_type][0] |
| 60 | + } |
| 61 | + } |
| 62 | + |
| 63 | + if file_to_upload.endswith('.vcf.gz'): |
| 64 | + file_info['dataType'] = variant_type_to_data_type_etc[variant_type][1] |
| 65 | + elif file_to_upload.endswith('.vcf.gz.tbi'): |
| 66 | + file_info['dataType'] = 'VCF Index' |
| 67 | + else: |
| 68 | + pass |
| 69 | + |
| 70 | + if input_wf in (['sanger-wgs', 'sanger-wxs']): |
| 71 | + file_info['info']['analysis_tools'] = variant_type_to_data_type_etc[variant_type][2] |
| 72 | + elif input_wf in (['gatk-mutect2']): |
| 73 | + file_info['info']['analysis_tools'] = variant_type_to_data_type_etc[variant_type][3] |
| 74 | + |
| 75 | + return file_info |
| 76 | + |
| 77 | +def get_sample_info(sample_list): |
| 78 | + samples = copy.deepcopy(sample_list) |
| 79 | + for sample in samples: |
| 80 | + for item in ['info', 'sampleId', 'specimenId', 'donorId', 'studyId']: |
| 81 | + sample.pop(item, None) |
| 82 | + sample['specimen'].pop(item, None) |
| 83 | + sample['donor'].pop(item, None) |
| 84 | + |
| 85 | + return samples |
29 | 86 |
|
30 | 87 | def main(): |
31 | 88 | """ |
32 | 89 | Python implementation of tool: payload-gen-variant-filtering |
33 | | -
|
34 | | - This is auto-generated Python code, please update as needed! |
35 | 90 | """ |
36 | 91 |
|
37 | 92 | parser = argparse.ArgumentParser(description='Tool: payload-gen-variant-filtering') |
38 | | - parser.add_argument('-i', '--input-file', dest='input_file', type=str, |
39 | | - help='Input file', required=True) |
40 | | - parser.add_argument('-o', '--output-dir', dest='output_dir', type=str, |
41 | | - help='Output directory', required=True) |
| 93 | + parser.add_argument("-a", dest="analysis", required=True, |
| 94 | + help="json file containing sequencing_alignment SONG analysis for tumour sample") |
| 95 | + parser.add_argument("-f", dest="files_to_upload", type=str, nargs="+", help="Files to be uploaded", required=True) |
| 96 | + parser.add_argument("-w", dest="wf_name", type=str, help="workflow full name", required=True) |
| 97 | + parser.add_argument("-s", dest="wf_short_name", type=str, help="workflow short name", required=True) |
| 98 | + parser.add_argument("-v", dest="wf_version", type=str, required=True, help="workflow version") |
| 99 | + parser.add_argument("-r", dest="wf_run", type=str, required=True, help="workflow run ID") |
| 100 | + parser.add_argument("-j", dest="wf_session", type=str, required=True, help="workflow session ID") |
42 | 101 | args = parser.parse_args() |
43 | 102 |
|
44 | | - if not os.path.isfile(args.input_file): |
45 | | - sys.exit('Error: specified input file %s does not exist or is not accessible!' % args.input_file) |
| 103 | + analysis = {} |
| 104 | + with open(args.analysis, 'r') as f: |
| 105 | + analysis = json.load(f) |
| 106 | + |
| 107 | + analysis_type = analysis.get('analysisType').get('name') |
| 108 | + payload = { |
| 109 | + 'analysisType': { |
| 110 | + 'name': analysis_type |
| 111 | + }, |
| 112 | + 'studyId': analysis.get('studyId'), |
| 113 | + 'experiment': analysis.get('experiment'), |
| 114 | + 'samples': get_sample_info(analysis.get('samples')), |
| 115 | + 'files': [], |
| 116 | + 'workflow': { |
| 117 | + 'workflow_name': 'Open Access Variant Filtering', |
| 118 | + 'workflow_short_name': args.wf_short_name, |
| 119 | + 'workflow_version': args.wf_version, |
| 120 | + 'run_id': args.wf_run, |
| 121 | + 'session_id': args.wf_session, |
| 122 | + 'inputs': [ |
| 123 | + { |
| 124 | + 'input_analysis_id': analysis.get('analysisId'), |
| 125 | + 'analysis_type': analysis_type |
| 126 | + } |
| 127 | + ], |
| 128 | + 'genome_build': 'GRCh38_hla_decoy_ebv' |
| 129 | + }, |
| 130 | + 'variant_class': analysis.get('variant_class') |
| 131 | + } |
46 | 132 |
|
47 | | - if not os.path.isdir(args.output_dir): |
48 | | - sys.exit('Error: specified output dir %s does not exist or is not accessible!' % args.output_dir) |
| 133 | + for f in args.files_to_upload: |
| 134 | + file_info = get_files_info(f) |
| 135 | + payload['files'].append(file_info) |
49 | 136 |
|
50 | | - subprocess.run(f"cp {args.input_file} {args.output_dir}/", shell=True, check=True) |
| 137 | + with open("%s.%s.payload.json" % (str(uuid.uuid4()), analysis_type), 'w') as f: |
| 138 | + f.write(json.dumps(payload, indent=2)) |
51 | 139 |
|
52 | 140 |
|
53 | 141 | if __name__ == "__main__": |
|
0 commit comments