db.py 15.6 KB
Newer Older
aknecht2's avatar
aknecht2 committed
1
from pymongo import MongoClient
2
import pymongo.errors
3
4
5
import gridfs
import sys
import traceback
6
import os
7
import itertools
8
import chipathlon.conf
9
from pprint import pprint
10
import hashlib
aknecht2's avatar
aknecht2 committed
11

12

13
14
15
class MongoDB(object):

    def __init__(self, host, username, password):
16
17
        self.client = MongoClient(host)
        self.db = self.client.chipseq
18
        try:
19
            self.db.authenticate(username, password, mechanism="SCRAM-SHA-1")
20
21
22
23
24
25
26
        except:
            print("Could not authenticate to db %s!" % (host,))
            print traceback.format_exc()
            sys.exit(1)
        self.gfs = gridfs.GridFS(self.db)
        return

27
28
29
30
31
32
33
34
35
36
37
38
    def delete_result(self, result_id):
        # Make sure result exists
        cursor = self.db.results.find({
            "_id": result_id
        })
        if cursor.count() == 1:
            result = cursor.next()
            self.gfs.delete(result["gridfs_id"])
            self.db[result["result_type"]].delete_many({"result_id": result["_id"]})
            self.db.results.delete_one({"_id": result["_id"]})
        else:
            print "result_id %s doesn't exist." % (result_id,)
39
40
        return

41
    def create_result(self, output_file, control_sample_ids, experiment_sample_ids, result_type, additional_data = {}, gfs_attributes = {}):
42
43
        # Make sure output_file exists
        if os.path.isfile(output_file):
44
            # Make sure that all control_sample_ids & experiment_sample_ids are valid
45
            # REMEMBER, these are ids for control & experiment SAMPLES
46
47
            valid_controls = [self.is_valid_sample(cid) for cid in control_sample_ids]
            valid_experiments = [self.is_valid_sample(eid) for eid in experiment_sample_ids]
48
49
50
51
52
53
54
55
            if all(valid_controls) and all(valid_experiments):
                # First, we load the output file into gfs
                with open(output_file, "r") as rh:
                    # Calling put returns the gfs id
                    gridfs_id = self.gfs.put(rh, filename=os.path.basename(output_file), **gfs_attributes)
                # Now, we create the actual result entry by combining all necessary info
                result_entry = {
                    "gridfs_id": gridfs_id,
56
57
                    "control_sample_ids": control_sample_ids,
                    "experiment_sample_ids": experiment_sample_ids,
58
59
60
61
62
63
64
65
                    "result_type": result_type
                }
                # Add additional attributes into the result_entry
                result_entry.update(additional_data)
                # Insert the entry into the database, and return the id
                result = self.db.results.insert_one(result_entry)
                return (True, "Result created successfully.", result.inserted_id)
            else:
66
                msg = "Not all input ids are valid.  The following are invalid: "
67
                for id_list, valid_list in zip([control_sample_ids, experiment_sample_ids], [valid_controls, valid_experiments]):
68
                    msg += ", ".join([id_list[i] for i, valid in enumerate(valid_list) if not valid])
69
70
71
72
        else:
            msg = "Specified output_file %s does not exist." % (output_file,)
        return (False, msg, None)

73
74
75
76
77
78
79
    def save_bam(self, bam_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
        # Create result entry for bam files.  Since bam is a binary format, the file will only
        # be stored in GridFS
        valid, msg, result_id = self.create_result(bam_file, control_sample_ids, experiment_sample_ids, "bam", additional_data, gfs_attributes = {"file_type": "bam"})
        return (valid, msg, result_id)


80
    def save_bed(self, bed_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
81
        # Create result_entry for bed_file
82
        valid, msg, result_id = self.create_result(bed_file, control_sample_ids, experiment_sample_ids, "bed", additional_data, gfs_attributes = {"file_type": "bed"})
83
84
85
86
87
        if valid:
            # Now we load the actual bed data into the bed collection.
            # Data is in a six column format
            # chr, start, end, name, score, strand
            # Load data using a list comprehension over lines,
88
89
90
            # then insert with insert_one()
            # Each document contains "n_lines" number of lines from the
            # result BED file.
91
            print "loading bed_data..."
92
            with open(bed_file, "r") as rh:
93
                msg = "Bed file successfully inserted."
94
95
                # Lazy load files in specified line chunk size
                n_lines = chipathlon.conf.result_lines_per_document
96
97
98
                line_set = list(itertools.islice(rh, n_lines))
                while line_set:
                    try:
99
100
101
102
                        result_lines = []
                        for line in line_set:
                            line_info = line.split()
                            line_record =  {
103
104
105
106
107
                                "chr": line_info[0],
                                "start": line_info[1],
                                "end": line_info[2],
                                "name": line_info[3],
                                "score": line_info[4],
108
                                "strand": line_info[5],
109
                            }
110
111
112
113
                            result_lines.append(line_record)

                        self.db.bed.insert_one({"result_id": result_id, "result_lines": result_lines})

114
115
116
117
118
                    except pymongo.errors.OperationFailure as e:
                        valid = False
                        msg = "Error inserting bed_file %s: %s" % (bed_file, e)
                    line_set = list(itertools.islice(rh, n_lines))
        return (valid, msg, result_id)
119

120
    def save_peak(self, peak_file, control_sample_ids, experiment_sample_ids, additional_data = {}):
121
        # Create result_entry for peak_file
122
        valid, msg, result_id = self.create_result(peak_file, control_sample_ids, experiment_sample_ids, "peak", additional_data, gfs_attributes = {"file_type": os.path.splitext(peak_file)[1][1:]})
123
124
125
126
        if valid:
            # Now we load the actual peak data into the collection
            # Data is in a 10 column format
            # chr, start, end, name, score, strand, signal_value, p_value, q_value, summit
127
128
            # Each document contains "n_lines" number of lines from the
            # result peak file.
129
            with open(peak_file, "r") as rh:
130
                msg = "Peak file successfully inserted."
131
132
                # Lazy load files in specified line chunk size
                n_lines = chipathlon.conf.result_lines_per_document
133
134
135
                line_set = list(itertools.islice(rh, n_lines))
                while line_set:
                    try:
136
137
138
139
                        result_lines = []
                        for line in line_set:
                            line_info = line.split()
                            line_record =  {
140
141
142
143
144
145
146
147
148
149
150
                                "chr": line_info[0],
                                "start": line_info[1],
                                "end": line_info[2],
                                "name": line_info[3],
                                "score": line_info[4],
                                "strand": line_info[5],
                                "signal_value": line_info[6],
                                "p_value": line_info[7],
                                "q_value": line_info[8],
                                "summit": line_info[9]
                            }
151
152
153
154
                            result_lines.append(line_record)

                        self.db.peak.insert_one({"result_id": result_id, "result_lines": result_lines})

155
156
157
158
159
                    except pymongo.errors.OperationFailure as e:
                        valid = False
                        msg = "Error inserting peak_file %s: %s" % (peak_file, e)
                    line_set = list(itertools.islice(rh, n_lines))
        return (valid, msg, result_id)
160

161
162
163
164
165
166
167
168
169
170
171
    def is_valid_sample(self, sample_accession):
        try:
            cursor = self.db.samples.find({
                "accession": sample_accession
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with sample_accession %s: %s" % (sample_accession, e)
        return False

172
173
174
175
176
177
178
179
180
181
182
183
184
    def is_valid_experiment(self, experiment_id):
        try:
            cursor = self.db.experiments.find({
                "target": {"$exists": True},
                "revoked_files.0": {"$exists": False},
                "@id": "/experiments/%s/" % (experiment_id,)
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with experiment_id %s: %s" % (experiment_id, e)
        return False

185
186
187
188
189
190
191
192
193
194
195
196
    def check_valid_samples(self):
        cursor = self.db.experiments.aggregate([
            {
                "$match": {
                    "target": {"$exists": True},
                    "revoked_files.0": {"$exists": False},
                    "assembly.0": {"$exists": True},
                    "assembly.1": {"$exists": False}
                }
            },
            {
                "$lookup": {
197
                    "from": "samples",
198
                    "localField": "uuid",
199
200
201
202
203
204
205
206
207
208
209
210
211
                    "foreignField": "experiment_id",
                    "as": "samples"
                }
            }
        ])
        total = 0
        has_samples = 0
        for document in cursor:
            total += 1
            if len(document["samples"]) > 0:
                has_samples += 1
        return (has_samples, total)

212
213
214
215
216
217
218
219
220
221
222
223
224
225
    def get_assembly(self, experiment_id):
        valid = True
        msg = ""
        data = ""
        cursor = self.db.experiments.find({
            "target": {"$exists": True},
            "revoked_files.0": {"$exists": False},
            "assembly.0": {"$exists": True},
            "assembly.1": {"$exists": False},
            "@id": "/experiments/%s/" % (experiment_id,)
        })
        if cursor.count() == 1:
            document = cursor.next()
            data = document["assembly"][0]
aknecht2's avatar
aknecht2 committed
226
            msg = "Succesfully retrieved assembly for experiment with id '%s'.\n" % (experiment_id,)
227
228
        else:
            valid = False
aknecht2's avatar
aknecht2 committed
229
            msg = "Experiment with id '%s' does not exist.\n" % (experiment_id,)
230
231
        return (valid, msg, data)

Adam Caprez's avatar
Adam Caprez committed
232
    def fetch_from_gridfs(self, gridfs_id, filename, checkmd5=True):
233
234
235
236
237
238
239
240
        """
        :param gridfs_id: GridFS _id of file to get.
        :type gridfs_id: bson.objectid.ObjectId
        :param filename: Filename to save file to.
        :type filename: str

        Fetch the file with _id 'gridfs_id' from GridFS and save to the file 'filename'.
        """
241
242
243
244
245
246
247
248

        try:
            gridfs_file = self.gfs.get(gridfs_id)
            gridfs_md5 = gridfs_file.md5
        except gridfs.errors.NoFile as e:
            print "Error fetching file from GridFS!\nNo file with ID '%s'" % (gridfs_id)
            print e
            sys.exit(1)
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264

        try:
            output_fh = open(filename,'wb')
        except IOError as e:
            print "Error creating GridFS output file '%s':" % (filename)
            print (e.errno,e.strerror)
            sys.exit(1)

        hash_md5 = hashlib.md5()
        for chunk in gridfs_file:
            output_fh.write(chunk)
            hash_md5.update(chunk)

        output_fh.close()
        gridfs_file.close()

Adam Caprez's avatar
Adam Caprez committed
265
266
267
268
269
270
        if checkmd5:
            if gridfs_md5 == hash_md5.hexdigest():
                return True
            else:
                print "MD5 mismatch saving file from GridFS to '%s'" % (filename)
                return False
271
        else:
Adam Caprez's avatar
Adam Caprez committed
272
            return True
273

274
275
276
277
    def get_samples(self, experiment_id):
        valid = True
        msg = ""
        data = {}
278
279
280
281
        # First, check to make sure the target experiment is valid
        if self.is_valid_experiment(experiment_id):
            # Next, we check that there is a least 1 possible control
            check3 = self.db.experiments.find({
282
283
                "target": {"$exists": True},
                "revoked_files.0": {"$exists": False},
284
285
286
                "assembly.0": {"$exists": True},
                "assembly.1": {"$exists": False},
                "possible_controls.0": {"$exists": True},
287
288
                "@id": "/experiments/%s/" % (experiment_id,)
            })
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
            if check3.count() == 1:
                # Complicated aggregtaion pipeline does the following steps:
                # 1. Find the experiment that matches the given id
                # 2. Join samples into the collection by exp_id
                # 3. Iterate through possible_controls
                # 4. Join possible_control data into control_exps
                # 5. Iterate through control_exps
                # 6. Join samples into the control_exps by exp_id
                # 7. Re-aggregate all data into arrays
                pipeline = [
                    {
                        "$match": {
                            "target": {"$exists": True},
                            "revoked_files.0": {"$exists": False},
                            "assembly.0": {"$exists": True},
                            "assembly.1": {"$exists": False},
                            "possible_controls.0": {"$exists": True},
                            "@id": "/experiments/%s/" % (experiment_id,)
307
                        }
308
309
310
311
312
313
314
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "uuid",
                            "foreignField": "experiment_id",
                            "as": "samples"
315
                        }
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
                    },
                    {
                        "$unwind": "$possible_controls"
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "possible_controls.uuid",
                            "foreignField": "experiment_id",
                            "as": "possible_controls.samples"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "possible_controls": {"$push": "$possible_controls"},
                            "samples": {"$push": "$samples"}
                        }
                    }
                ]
                cursor = self.db.experiments.aggregate(pipeline)
                # We should have only 1 document
                document = cursor.next()
                control_inputs = [sample for control in document["possible_controls"] for sample in control["samples"] if ("file_type" in sample and sample["file_type"] == "fastq")]
                experiment_inputs = [sample for sample in document["samples"][0] if ("file_type" in sample and sample["file_type"] == "fastq")]
                if (len(control_inputs) > 0 and len(experiment_inputs) > 0):
                    msg = "Succesfully retrieved input files for experiment with id '%s'.\n" % (experiment_id,)
                    data = {
                        "control": control_inputs,
                        "experiment": experiment_inputs
                    }
347
348
                else:
                    valid = False
349
                    msg = "Experiment with id '%s' has %s possible control inputs, and %s possible experiment inputs.\n" % (experiment_id, len(control_inputs), len(experiment_inputs))
350
351
            else:
                valid = False
352
                msg = "Experiment with id '%s' does not have possible_controls.\n" % (experiment_id,)
353
354
        else:
            valid = False
355
            msg = "Experiment with id '%s' is not valid!  It may not exist, or it may be missing required metadata.\n" % (experiment_id,)
356
        return (valid, msg, data)