Newer
Older
#!/usr/bin/env python
import argparse
import json
import sys
from chipathlon.utils import progress
import requests
import urlparse
import os.path
import os
import datetime
parser = argparse.ArgumentParser(description="Download raw JSON for all experiments.")
parser.add_argument("-o", "--output-dir", dest="outputdir", default=os.getcwd(), help="Output directory. (default: %(default)s)")
parser.add_argument("-q", "--quiet", action='store_true', help="Quiet mode. Do not print progress information. (default: false)")
parser.add_argument("-r", "--resume", action='store_false', \
help="Skip re-fetching existing experiment JSON files to speed up overall download. (default: true)")
args = parser.parse_args()
encode_baseurl = "https://www.encodeproject.org/experiments/"
json_arg = {'format': 'json'}
current_date = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")
meta_file = os.path.join(args.outputdir, "encode_meta_%s.json" % (current_date,))
r = requests.get("%s?format=json&limit=all" % (encode_baseurl,), params=json_arg)
with open(meta_file, "w") as wh:
wh.write(r.text)
data = json.loads(r.text)
for exp in data["@graph"]:
exp_ids.append(exp["accession"])
if not os.path.isdir(os.path.join(args.outputdir, "data")):
os.makedirs(os.path.join(args.outputdir, "data"))
# Loop through the IDs and use the exp ID to download the full JSON file.
total = len(exp_ids)
for i, exp_id in enumerate(exp_ids):
exp_url = urlparse.urljoin(encode_baseurl, exp_id)
json_file = os.path.join(args.outputdir, "data", "%s.json" % (exp_id,))
if args.resume:
if not os.path.isfile(json_file) or os.path.getsize(json_file) == 0:
r = requests.get(exp_url, params=json_arg)
with open(json_file, "w") as wh:
wh.write(r.text)
else:
r = requests.get(exp_url, params=json_arg)
with open(json_file, "w") as wh:
wh.write(r.text)
if not args.quiet:
progress(i, total)
if not args.quiet:
progress(total, total)