db.py 17.3 KB
Newer Older
aknecht2's avatar
aknecht2 committed
1
from pymongo import MongoClient
2
import pymongo.errors
3
4
5
import gridfs
import sys
import traceback
6
import os
7
import itertools
8
import time
9
import chipathlon.conf
10
from pprint import pprint
11
import hashlib
aknecht2's avatar
aknecht2 committed
12

13

14
15
16
class MongoDB(object):

    def __init__(self, host, username, password):
17
18
        self.client = MongoClient(host)
        self.db = self.client.chipseq
19
        try:
20
            self.db.authenticate(username, password, mechanism="SCRAM-SHA-1")
21
22
23
24
25
26
27
        except:
            print("Could not authenticate to db %s!" % (host,))
            print traceback.format_exc()
            sys.exit(1)
        self.gfs = gridfs.GridFS(self.db)
        return

28
29
30
31
32
33
34
35
36
37
38
39
    def delete_result(self, result_id):
        # Make sure result exists
        cursor = self.db.results.find({
            "_id": result_id
        })
        if cursor.count() == 1:
            result = cursor.next()
            self.gfs.delete(result["gridfs_id"])
            self.db[result["result_type"]].delete_many({"result_id": result["_id"]})
            self.db.results.delete_one({"_id": result["_id"]})
        else:
            print "result_id %s doesn't exist." % (result_id,)
40
41
        return

42
    def check_result(self, file_name, control_sample_ids, experiment_sample_ids, ref_genome, result_type, params, debug=False):
43
44
45
        try:
            query = {
                "result_type": result_type,
46
47
                "ref_genome": ref_genome,
                "timestamp": {"$exists": True},
48
                "file_name": file_name
49
            }
50
51
52
            # In the case that there are 0 samples we just want to check for existence.
            query["control_sample_ids"] = {"$all": control_sample_ids} if (len(control_sample_ids) > 0) else {"$exists": True}
            query["experiment_sample_ids"] = {"$all": experiment_sample_ids} if (len(experiment_sample_ids) > 0) else {"$exists": True}
53
54
55
56
57
58
59
60
            for job_name in params:
                param_keys = params[job_name].keys()
                if len(param_keys) == 0:
                    query[job_name] = {"$exists": True}
                else:
                    for param_name in param_keys:
                        query[job_name + "." + param_name] = params[job_name][param_name]
            cursor = self.db.results.find(query)
61
62
63
64
65
66
            if debug:
                print "[DB.check_result] Query = %s" % (query,)
                print "[DB.check_result] count = %s" % (cursor.count(),)
            if cursor.count() > 0:
                # Use the latest if available
                return cursor.sort("timestamp", pymongo.DESCENDING).next()
67
68
        except pymongo.errors.OperationFailure as e:
            print "Error with experiment_id %s: %s" % (experiment_id, e)
69
        return None
70

71
    def create_result(self, output_file, control_sample_ids, experiment_sample_ids, result_type, additional_data = {}, gfs_attributes = {}):
72
73
        # Make sure output_file exists
        if os.path.isfile(output_file):
74
            # Make sure that all control_sample_ids & experiment_sample_ids are valid
75
            # REMEMBER, these are ids for control & experiment SAMPLES
76
77
            valid_controls = [self.is_valid_sample(cid) for cid in control_sample_ids]
            valid_experiments = [self.is_valid_sample(eid) for eid in experiment_sample_ids]
78
79
80
81
82
83
84
85
            if all(valid_controls) and all(valid_experiments):
                # First, we load the output file into gfs
                with open(output_file, "r") as rh:
                    # Calling put returns the gfs id
                    gridfs_id = self.gfs.put(rh, filename=os.path.basename(output_file), **gfs_attributes)
                # Now, we create the actual result entry by combining all necessary info
                result_entry = {
                    "gridfs_id": gridfs_id,
86
87
                    "control_sample_ids": control_sample_ids,
                    "experiment_sample_ids": experiment_sample_ids,
88
89
90
                    "result_type": result_type,
                    "file_name": output_file,
                    "timestamp": time.time()
91
92
93
94
95
96
97
                }
                # Add additional attributes into the result_entry
                result_entry.update(additional_data)
                # Insert the entry into the database, and return the id
                result = self.db.results.insert_one(result_entry)
                return (True, "Result created successfully.", result.inserted_id)
            else:
98
                msg = "Not all input ids are valid.  The following are invalid: "
99
                for id_list, valid_list in zip([control_sample_ids, experiment_sample_ids], [valid_controls, valid_experiments]):
100
                    msg += ", ".join([id_list[i] for i, valid in enumerate(valid_list) if not valid])
101
102
103
104
        else:
            msg = "Specified output_file %s does not exist." % (output_file,)
        return (False, msg, None)

105
106
107
108
109
110
111
    def save_bam(self, bam_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
        # Create result entry for bam files.  Since bam is a binary format, the file will only
        # be stored in GridFS
        valid, msg, result_id = self.create_result(bam_file, control_sample_ids, experiment_sample_ids, "bam", additional_data, gfs_attributes = {"file_type": "bam"})
        return (valid, msg, result_id)


112
    def save_bed(self, bed_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
113
        # Create result_entry for bed_file
114
        valid, msg, result_id = self.create_result(bed_file, control_sample_ids, experiment_sample_ids, "bed", additional_data, gfs_attributes = {"file_type": "bed"})
115
116
117
118
119
        if valid:
            # Now we load the actual bed data into the bed collection.
            # Data is in a six column format
            # chr, start, end, name, score, strand
            # Load data using a list comprehension over lines,
120
121
122
            # then insert with insert_one()
            # Each document contains "n_lines" number of lines from the
            # result BED file.
123
            print "loading bed_data..."
124
            with open(bed_file, "r") as rh:
125
                msg = "Bed file successfully inserted."
126
127
                # Lazy load files in specified line chunk size
                n_lines = chipathlon.conf.result_lines_per_document
128
129
130
                line_set = list(itertools.islice(rh, n_lines))
                while line_set:
                    try:
131
132
133
134
                        result_lines = []
                        for line in line_set:
                            line_info = line.split()
                            line_record =  {
135
136
137
138
139
                                "chr": line_info[0],
                                "start": line_info[1],
                                "end": line_info[2],
                                "name": line_info[3],
                                "score": line_info[4],
140
                                "strand": line_info[5],
141
                            }
142
143
144
145
                            result_lines.append(line_record)

                        self.db.bed.insert_one({"result_id": result_id, "result_lines": result_lines})

146
147
148
149
150
                    except pymongo.errors.OperationFailure as e:
                        valid = False
                        msg = "Error inserting bed_file %s: %s" % (bed_file, e)
                    line_set = list(itertools.islice(rh, n_lines))
        return (valid, msg, result_id)
151

152
    def save_peak(self, peak_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
153
        # Create result_entry for peak_file
154
        valid, msg, result_id = self.create_result(peak_file, control_sample_ids, experiment_sample_ids, "peak", additional_data, gfs_attributes = {"file_type": os.path.splitext(peak_file)[1][1:]})
155
156
157
158
        if valid:
            # Now we load the actual peak data into the collection
            # Data is in a 10 column format
            # chr, start, end, name, score, strand, signal_value, p_value, q_value, summit
159
160
            # Each document contains "n_lines" number of lines from the
            # result peak file.
161
            with open(peak_file, "r") as rh:
162
                msg = "Peak file successfully inserted."
163
164
                # Lazy load files in specified line chunk size
                n_lines = chipathlon.conf.result_lines_per_document
165
166
167
                line_set = list(itertools.islice(rh, n_lines))
                while line_set:
                    try:
168
169
170
171
                        result_lines = []
                        for line in line_set:
                            line_info = line.split()
                            line_record =  {
172
173
174
175
176
177
178
179
180
181
182
                                "chr": line_info[0],
                                "start": line_info[1],
                                "end": line_info[2],
                                "name": line_info[3],
                                "score": line_info[4],
                                "strand": line_info[5],
                                "signal_value": line_info[6],
                                "p_value": line_info[7],
                                "q_value": line_info[8],
                                "summit": line_info[9]
                            }
183
184
185
186
                            result_lines.append(line_record)

                        self.db.peak.insert_one({"result_id": result_id, "result_lines": result_lines})

187
188
189
190
191
                    except pymongo.errors.OperationFailure as e:
                        valid = False
                        msg = "Error inserting peak_file %s: %s" % (peak_file, e)
                    line_set = list(itertools.islice(rh, n_lines))
        return (valid, msg, result_id)
192

193
194
195
196
197
198
199
200
201
202
203
    def is_valid_sample(self, sample_accession):
        try:
            cursor = self.db.samples.find({
                "accession": sample_accession
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with sample_accession %s: %s" % (sample_accession, e)
        return False

204
205
206
207
208
209
210
211
212
213
214
215
216
    def is_valid_experiment(self, experiment_id):
        try:
            cursor = self.db.experiments.find({
                "target": {"$exists": True},
                "revoked_files.0": {"$exists": False},
                "@id": "/experiments/%s/" % (experiment_id,)
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with experiment_id %s: %s" % (experiment_id, e)
        return False

217
218
219
220
221
222
223
224
225
226
227
228
    def check_valid_samples(self):
        cursor = self.db.experiments.aggregate([
            {
                "$match": {
                    "target": {"$exists": True},
                    "revoked_files.0": {"$exists": False},
                    "assembly.0": {"$exists": True},
                    "assembly.1": {"$exists": False}
                }
            },
            {
                "$lookup": {
229
                    "from": "samples",
230
                    "localField": "uuid",
231
232
233
234
235
236
237
238
239
240
241
242
243
                    "foreignField": "experiment_id",
                    "as": "samples"
                }
            }
        ])
        total = 0
        has_samples = 0
        for document in cursor:
            total += 1
            if len(document["samples"]) > 0:
                has_samples += 1
        return (has_samples, total)

244
245
246
247
248
249
250
251
252
253
254
255
256
257
    def get_assembly(self, experiment_id):
        valid = True
        msg = ""
        data = ""
        cursor = self.db.experiments.find({
            "target": {"$exists": True},
            "revoked_files.0": {"$exists": False},
            "assembly.0": {"$exists": True},
            "assembly.1": {"$exists": False},
            "@id": "/experiments/%s/" % (experiment_id,)
        })
        if cursor.count() == 1:
            document = cursor.next()
            data = document["assembly"][0]
aknecht2's avatar
aknecht2 committed
258
            msg = "Succesfully retrieved assembly for experiment with id '%s'.\n" % (experiment_id,)
259
260
        else:
            valid = False
aknecht2's avatar
aknecht2 committed
261
            msg = "Experiment with id '%s' does not exist.\n" % (experiment_id,)
262
263
        return (valid, msg, data)

Adam Caprez's avatar
Adam Caprez committed
264
    def fetch_from_gridfs(self, gridfs_id, filename, checkmd5=True):
265
266
267
268
269
270
271
272
        """
        :param gridfs_id: GridFS _id of file to get.
        :type gridfs_id: bson.objectid.ObjectId
        :param filename: Filename to save file to.
        :type filename: str

        Fetch the file with _id 'gridfs_id' from GridFS and save to the file 'filename'.
        """
273
274
275
276
277
278
279
280

        try:
            gridfs_file = self.gfs.get(gridfs_id)
            gridfs_md5 = gridfs_file.md5
        except gridfs.errors.NoFile as e:
            print "Error fetching file from GridFS!\nNo file with ID '%s'" % (gridfs_id)
            print e
            sys.exit(1)
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296

        try:
            output_fh = open(filename,'wb')
        except IOError as e:
            print "Error creating GridFS output file '%s':" % (filename)
            print (e.errno,e.strerror)
            sys.exit(1)

        hash_md5 = hashlib.md5()
        for chunk in gridfs_file:
            output_fh.write(chunk)
            hash_md5.update(chunk)

        output_fh.close()
        gridfs_file.close()

Adam Caprez's avatar
Adam Caprez committed
297
298
299
300
301
302
        if checkmd5:
            if gridfs_md5 == hash_md5.hexdigest():
                return True
            else:
                print "MD5 mismatch saving file from GridFS to '%s'" % (filename)
                return False
303
        else:
Adam Caprez's avatar
Adam Caprez committed
304
            return True
305

306
307
308
309
    def get_samples(self, experiment_id):
        valid = True
        msg = ""
        data = {}
310
311
312
313
        # First, check to make sure the target experiment is valid
        if self.is_valid_experiment(experiment_id):
            # Next, we check that there is a least 1 possible control
            check3 = self.db.experiments.find({
314
315
                "target": {"$exists": True},
                "revoked_files.0": {"$exists": False},
316
317
318
                "assembly.0": {"$exists": True},
                "assembly.1": {"$exists": False},
                "possible_controls.0": {"$exists": True},
319
320
                "@id": "/experiments/%s/" % (experiment_id,)
            })
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
            if check3.count() == 1:
                # Complicated aggregtaion pipeline does the following steps:
                # 1. Find the experiment that matches the given id
                # 2. Join samples into the collection by exp_id
                # 3. Iterate through possible_controls
                # 4. Join possible_control data into control_exps
                # 5. Iterate through control_exps
                # 6. Join samples into the control_exps by exp_id
                # 7. Re-aggregate all data into arrays
                pipeline = [
                    {
                        "$match": {
                            "target": {"$exists": True},
                            "revoked_files.0": {"$exists": False},
                            "assembly.0": {"$exists": True},
                            "assembly.1": {"$exists": False},
                            "possible_controls.0": {"$exists": True},
                            "@id": "/experiments/%s/" % (experiment_id,)
339
                        }
340
341
342
343
344
345
346
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "uuid",
                            "foreignField": "experiment_id",
                            "as": "samples"
347
                        }
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
                    },
                    {
                        "$unwind": "$possible_controls"
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "possible_controls.uuid",
                            "foreignField": "experiment_id",
                            "as": "possible_controls.samples"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "possible_controls": {"$push": "$possible_controls"},
                            "samples": {"$push": "$samples"}
                        }
                    }
                ]
                cursor = self.db.experiments.aggregate(pipeline)
                # We should have only 1 document
                document = cursor.next()
                control_inputs = [sample for control in document["possible_controls"] for sample in control["samples"] if ("file_type" in sample and sample["file_type"] == "fastq")]
                experiment_inputs = [sample for sample in document["samples"][0] if ("file_type" in sample and sample["file_type"] == "fastq")]
                if (len(control_inputs) > 0 and len(experiment_inputs) > 0):
                    msg = "Succesfully retrieved input files for experiment with id '%s'.\n" % (experiment_id,)
                    data = {
                        "control": control_inputs,
                        "experiment": experiment_inputs
                    }
379
380
                else:
                    valid = False
381
                    msg = "Experiment with id '%s' has %s possible control inputs, and %s possible experiment inputs.\n" % (experiment_id, len(control_inputs), len(experiment_inputs))
382
383
            else:
                valid = False
384
                msg = "Experiment with id '%s' does not have possible_controls.\n" % (experiment_id,)
385
386
        else:
            valid = False
387
            msg = "Experiment with id '%s' is not valid!  It may not exist, or it may be missing required metadata.\n" % (experiment_id,)
388
        return (valid, msg, data)