db.py 20 KB
Newer Older
aknecht2's avatar
aknecht2 committed
1
from pymongo import MongoClient
2
import pymongo.errors
3
4
5
import gridfs
import sys
import traceback
6
import os
7
import itertools
8
import time
9
import collections
10
import chipathlon.conf
11
from pprint import pprint
12
import hashlib
13
from chipathlon.utils import progress
aknecht2's avatar
aknecht2 committed
14

15

16
class MongoDB(object):
aknecht2's avatar
aknecht2 committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
    """
    :param host: The host address of the MongoDB database.
    :type host: str
    :param username: The username of the account for the MongoDB database.
    :type username: str
    :param password: The password for the user.
    :type password: str
    :param debug: A flag for printing additional messages.
    :type debug: bool

    This class is used to manage all interactions with the encode metadata.
    The metadata can be very unruly and difficult to deal with.  There
    are several helper functions within this class to make some database
    operations much easier.
    """
32

33
34
    def __init__(self, host, username, password, debug=False):
        self.debug = debug
35
36
37
        self.host = host
        self.username = username
        self.password = password
38
39
        self.client = MongoClient(host)
        self.db = self.client.chipseq
40
        self.cache = collections.defaultdict(dict)
41
        try:
42
            self.db.authenticate(username, password, mechanism="SCRAM-SHA-1")
43
44
45
46
47
48
49
        except:
            print("Could not authenticate to db %s!" % (host,))
            print traceback.format_exc()
            sys.exit(1)
        self.gfs = gridfs.GridFS(self.db)
        return

50
    def add_cache(self, accession, file_type, data):
51
        """
52
53
54
55
        :param accession: The accession of the file to store.
        :type accession: str
        :param file_type: The type of file to store.
        :type file_type: str
56
57
        :param data: The data to add to the cache.
        :type data: Object
aknecht2's avatar
aknecht2 committed
58
59
60
61
62
63

        Adds a data result to the internal cache.  This is used to speed up
        requests that are identical.  We may have multiple runs that use
        identical control / signal files but change around the alignment or
        peak calling tools.  In these cases we don't want to request info
        from the database multiple times for the same data.
64
        """
65
        self.cache[accession][file_type] = data
66
67
        return

68
    def get_cache(self, accession, file_type):
69
        """
70
71
72
73
        :param accession: The accession of the file to retrieve.
        :type accession: str
        :param file_type: The type of file to retrieve.
        :type file_type: str
aknecht2's avatar
aknecht2 committed
74
75

        Gets a data item from the internal cache.
76
        """
77
78
        if accession in self.cache:
            return self.cache[accession].get(file_type)
79
80
        return None

81
    def delete_result(self, result, genome):
82
        """
83
        :param result: The result to delete
aknecht2's avatar
aknecht2 committed
84
        :type result: :py:class:`~chipathlon.result.Result`
85
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
86
        :type genome: :py:class:`~chipathlon.genome.Genome`
87
88
89

        Deletes a result and it's corresponding gridfs entry.
        """
90
        result_id = self.get_reuslt_id(result, genome)
91
92
93
94
95
96
97
98
99
        cursor = self.db.results.find({
            "_id": result_id
        })
        if cursor.count() == 1:
            result = cursor.next()
            self.gfs.delete(result["gridfs_id"])
            self.db.results.delete_one({"_id": result["_id"]})
        else:
            print "result_id %s doesn't exist." % (result_id,)
100
101
        return

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
    def _get_result_query(self, result, genome):
        query = {
            "result_type": result.file_type,
            "assembly": genome.assembly,
            "timestamp": {"$exists": True},
            "file_name": result.full_name
        }
        # In the case that there are 0 samples we just want to check for existence.
        control_sample_accessions = result.get_accessions("control")
        signal_sample_accessions = result.get_accessions("signal")
        query["control_sample_accessions"] = {"$all": control_sample_accessions} if (len(control_sample_accessions) > 0) else {"$exists": True}
        query["signal_sample_accessions"] = {"$all": signal_sample_accessions} if (len(signal_sample_accessions) > 0) else {"$exists": True}
        for job in result.all_jobs:
            job_args = job.get_db_arguments()
            arg_keys = job_args.keys()
            if len(arg_keys) == 0:
                query[job.job_name] = {"$exists": True}
            else:
                for arg_name in arg_keys:
                    query[job.job_name + "." + arg_name] = job_args[arg_name]
122
123
        if self.debug:
            print "Result query: %s" % (query,)
124
125
126
        return query

    def result_exists(self, result, genome):
127
128
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
129
        :type result: :py:meth:`~chipathlon.result.Result`
130
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
131
        :type genome: :py:meth:`~chipathlon.genome.Genome`
132

aknecht2's avatar
aknecht2 committed
133
134
135
        Check if a result exists in the database.  The genome parameter
        is required since some files have been aligned or use individual
        chromsome fasta or size files for peak calling.
136
        """
137
138
139
140
141
142
143
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            return cursor.count() > 0
        except pymongo.errors.OperationFailure as e:
            print "Error checking result [%s]: %s" % (file_name, e)
        return False

144
145
146
    def get_result_id(self, result, genome):
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
147
        :type result: :py:meth:`~chipathlon.result.Result`
148
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
149
150
        :type genome: :py:meth:`~chipathlon.genome.Genome`
        :returns: The id found or None
151

aknecht2's avatar
aknecht2 committed
152
        Get the id of a result in the database.
153
154
155
156
157
158
159
160
        """
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            if cursor.count() == 1:
                return cursor._id
        except pymongo.errors.OperationFailure as e:
            print "Error getting result id [%s]: %s" % (file_name, e)
        return None
161
162

    def get_result(self, result, genome):
163
164
165
166
167
168
169
170
171
        """
        :param result: The result to check.
        :type result: :py:meth:~chipathlon.result.Result
        :param genome: The genome to find information from.
        :type genome: :py:meth:~chipathlon.genome.Genome

        Get the metadata for the result from the database.  If multiple results
        exist, the most recently saved result is returned.
        """
172
        try:
173
            cursor = self.db.results.find(self._get_result_query(result, genome))
174
175
            if cursor.count() > 0:
                return cursor.sort("timestamp", pymongo.DESCENDING).next()
176
        except pymongo.errors.OperationFailure as e:
177
            print "Error checking result [%s]: %s" % (file_name, e)
178
        return None
179

180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
    def save_result(self, output_file, control_sample_accessions, signal_sample_accessions, result_type, additional_data = {}, gfs_attributes = {}):
        """
        :param output_file: The path to the result to save.
        :type output_file: str
        :param control_sample_accessions: A list of control accessions.
        :type control_sample_accessions: list
        :param signal_sample_accessions: A list of signal accessions.
        :type signal_sample_accessions: list
        :param result_type: Useful file type info
        :type result_type: str
        :param additional_data: Additional metadata to store in mongo.
        :type additional_data: dict
        :param gfs_attributes: Additional metadata to store in gridfs.
        :type gfs_attributes: dict

aknecht2's avatar
aknecht2 committed
195
196
197
198
199
        Saves a result entry into MongodDB and uploads the file into gridfs.
        The only difference between additional_data and gfs_attributes is the
        location the metadata is stored.  Both just store key value pairs of
        information, the additional_data information is stored in the result
        entry, the gfs_attributes information is stored in gridfs.
200
        """
201
202
        # Make sure output_file exists
        if os.path.isfile(output_file):
203
            # Make sure that all control_sample_accessions & signal_sample_accessions are valid
204
            # REMEMBER, these are ids for control & experiment SAMPLES
205
206
            valid_controls = [self.is_valid_sample(cid) for cid in control_sample_accessions]
            valid_experiments = [self.is_valid_sample(eid) for eid in signal_sample_accessions]
207
            if all(valid_controls) and all(valid_experiments):
208
                gfs_attributes["file_type"] = result_type
209
210
211
212
213
214
215
                # First, we load the output file into gfs
                with open(output_file, "r") as rh:
                    # Calling put returns the gfs id
                    gridfs_id = self.gfs.put(rh, filename=os.path.basename(output_file), **gfs_attributes)
                # Now, we create the actual result entry by combining all necessary info
                result_entry = {
                    "gridfs_id": gridfs_id,
216
217
                    "control_sample_accessions": control_sample_accessions,
                    "signal_sample_accessions": signal_sample_accessions,
218
219
220
                    "result_type": result_type,
                    "file_name": output_file,
                    "timestamp": time.time()
221
222
223
224
225
226
227
                }
                # Add additional attributes into the result_entry
                result_entry.update(additional_data)
                # Insert the entry into the database, and return the id
                result = self.db.results.insert_one(result_entry)
                return (True, "Result created successfully.", result.inserted_id)
            else:
228
                msg = "Not all input ids are valid.  The following are invalid: "
229
                for id_list, valid_list in zip([control_sample_accessions, signal_sample_accessions], [valid_controls, valid_experiments]):
230
                    msg += ", ".join([id_list[i] for i, valid in enumerate(valid_list) if not valid])
231
232
233
234
        else:
            msg = "Specified output_file %s does not exist." % (output_file,)
        return (False, msg, None)

235
    def is_valid_sample(self, sample_accession):
236
237
238
        """
        :param sample_accession: The accession number to check.
        :type sample_accession: str
aknecht2's avatar
aknecht2 committed
239
        :returns: Whether or not the sample is valid.
240
241
242

        Ensures that a sample with the accession specified actually exists.
        """
243
244
245
246
247
248
249
250
251
252
        try:
            cursor = self.db.samples.find({
                "accession": sample_accession
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with sample_accession %s: %s" % (sample_accession, e)
        return False

253
254
255
256
    def is_valid_experiment(self, experiment_accession):
        """
        :param experiment_accession: The accession number to check.
        :type experiment_accession: str
aknecht2's avatar
aknecht2 committed
257
        :returns: Whether or not the experiment is valid
258
259
260

        Ensures that an experiment with the accession specified actually exists.
        """
261
262
263
        try:
            cursor = self.db.experiments.find({
                "target": {"$exists": True},
264
                "@id": "/experiments/%s/" % (experiment_accession,)
265
266
267
268
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
269
            print "Error with experiment_accession %s: %s" % (experiment_accession, e)
270
271
        return False

Adam Caprez's avatar
Adam Caprez committed
272
    def fetch_from_gridfs(self, gridfs_id, filename, checkmd5=True):
273
274
        """
        :param gridfs_id: GridFS _id of file to get.
aknecht2's avatar
aknecht2 committed
275
        :type gridfs_id: :py:class:`bson.objectid.ObjectId`
276
277
        :param filename: Filename to save file to.
        :type filename: str
278
279
        :param checkmd5: Whether or not to validate the md5 of the result
        :type checkmd5: bool
280

281
        Fetch the file with the corresponding id and save it under the
aknecht2's avatar
aknecht2 committed
282
283
        specified 'filename'.  If checkmd5 is specified, validate that the
        saved file has a correct md5 value.
284
        """
285
286
287
288
289
290
291
        try:
            gridfs_file = self.gfs.get(gridfs_id)
            gridfs_md5 = gridfs_file.md5
        except gridfs.errors.NoFile as e:
            print "Error fetching file from GridFS!\nNo file with ID '%s'" % (gridfs_id)
            print e
            sys.exit(1)
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307

        try:
            output_fh = open(filename,'wb')
        except IOError as e:
            print "Error creating GridFS output file '%s':" % (filename)
            print (e.errno,e.strerror)
            sys.exit(1)

        hash_md5 = hashlib.md5()
        for chunk in gridfs_file:
            output_fh.write(chunk)
            hash_md5.update(chunk)

        output_fh.close()
        gridfs_file.close()

Adam Caprez's avatar
Adam Caprez committed
308
309
310
311
312
313
        if checkmd5:
            if gridfs_md5 == hash_md5.hexdigest():
                return True
            else:
                print "MD5 mismatch saving file from GridFS to '%s'" % (filename)
                return False
314
        else:
Adam Caprez's avatar
Adam Caprez committed
315
            return True
316

317
318
319
320
    def get_sample(self, accession, file_type):
        """
        :param accession: The accession number of the target sample
        :type accession: string
aknecht2's avatar
aknecht2 committed
321
        :param file_type: The file type of the target sample.
322
        :type file_type: string
323
        :returns: A tuple (valid, msg, data)
324

aknecht2's avatar
aknecht2 committed
325
326
327
328
        Gets the associated sample based on accession number and file_type.
        For loading input files for workflows the file_type should be fastq
        or bam.  Other file types can be specified for loading additional files
        saved in the experiment metadata.
329
330
331
332
        """
        valid = True
        msg = ""
        data = {}
333
        check_cache = self.get_cache(accession, file_type)
334
335
336
        if check_cache is not None:
            msg = "Retrieved data from cache."
            data = check_cache
337
        else:
338
339
340
341
342
343
            cursor = self.db.samples.find({
                "accession": accession,
                "file_type": file_type
            })
            if cursor.count() == 1:
                data = cursor.next()
344
                self.add_cache(accession, file_type, data)
345
346
347
348
349
350
351
            else:
                valid = False
                msg = "Found %s files with accession: %s, file_type: %s. Should only be 1." % (
                    cursor.count(),
                    accession,
                    file_type
                )
352
353
        return (valid, msg, data)

354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    def clean_gfs(self):
        """
        This function finds all files stored in gridfs that are not currently
        referenced by any result file and removes them.
        A clean database is a happy database.
        """
        cursor = self.db.results.aggregate([
            {
                "$group": {
                    "_id": 1,
                    "valid_ids": {"$push": "$gridfs_id"}
                }
            }
        ])
        # Doc contains all our valid ids
        id_doc = cursor.next()
        # Find all fs.files documents
        gfs_cursor = self.db.fs.files.find({
            "_id": {
                "$nin": id_doc["valid_ids"]
            }
        })
        # Iterate through file, delete fs.chunks then fs.files
        total_files = gfs_cursor.count()
        print "Found %s unused gridfs files.  Preparing to delete...." % (total_files,)
        for i, fs_file in enumerate(gfs_cursor):
            progress(i, total_files)
            self.db.fs.chunks.remove({
                "files_id": fs_file["_id"]
            })
            self.db.fs.files.remove({
                "_id": fs_file["_id"]
            })
        progress(total_files, total_files)
        return

390
391
392
393
394
395
    def get_samples(self, experiment_accession, file_type):
        """
        :param experiment_accession: Accession number of the experiment to grab samples from.
        :type experiment_accession: str
        :param file_type: File type of samples to grab usually fastq or bam
        :type file_type: str
396
        :returns: A tuple (valid, msg, data)
397
398
399

        Validates and gets samples for the given experiment.  Experiments must
        have control and signal samples of the provided file_type to be
aknecht2's avatar
aknecht2 committed
400
401
402
403
        considered valid.  Returns a tuple with three values (valid, msg, data)
        valid -- Whether or not the accession / file_type combo is a valid exp
        msg -- Why it is or is not valid
        data -- A dictionary containing a list of all control / sample documents.
404

aknecht2's avatar
aknecht2 committed
405
406
407
        The data dictionary has two keys, "control" and "signal", each one containing
        a list of all metadata related to the experiment samples.  The sample metadata
        is taken directly from Mongo.
408
        """
409
410
411
        valid = True
        msg = ""
        data = {}
412
        # First, check to make sure the target experiment is valid
413
        if self.is_valid_experiment(experiment_accession):
414
415
            # Next, we check that there is a least 1 possible control
            check3 = self.db.experiments.find({
416
                "target": {"$exists": True},
417
                "possible_controls.0": {"$exists": True},
418
                "@id": "/experiments/%s/" % (experiment_accession,)
419
            })
420
421
422
423
424
425
426
427
428
429
430
431
432
433
            if check3.count() == 1:
                # Complicated aggregtaion pipeline does the following steps:
                # 1. Find the experiment that matches the given id
                # 2. Join samples into the collection by exp_id
                # 3. Iterate through possible_controls
                # 4. Join possible_control data into control_exps
                # 5. Iterate through control_exps
                # 6. Join samples into the control_exps by exp_id
                # 7. Re-aggregate all data into arrays
                pipeline = [
                    {
                        "$match": {
                            "target": {"$exists": True},
                            "possible_controls.0": {"$exists": True},
434
                            "@id": "/experiments/%s/" % (experiment_accession,)
435
                        }
436
437
438
439
440
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "uuid",
441
                            "foreignField": "experiment_id",
442
                            "as": "samples"
443
                        }
444
445
446
447
448
449
450
451
                    },
                    {
                        "$unwind": "$possible_controls"
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "possible_controls.uuid",
452
                            "foreignField": "experiment_id",
453
454
455
456
457
458
459
460
461
462
463
464
465
466
                            "as": "possible_controls.samples"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "possible_controls": {"$push": "$possible_controls"},
                            "samples": {"$push": "$samples"}
                        }
                    }
                ]
                cursor = self.db.experiments.aggregate(pipeline)
                # We should have only 1 document
                document = cursor.next()
467
                control_inputs = [sample for control in document["possible_controls"] for sample in control["samples"] if ("file_type" in sample and sample["file_type"] == file_type)]
468
469
                signal_inputs = [sample for sample in document["samples"][0] if ("file_type" in sample and sample["file_type"] == file_type)]
                if (len(control_inputs) > 0 and len(signal_inputs) > 0):
470
                    msg = "Succesfully retrieved input files for experiment with id '%s'.\n" % (experiment_accession,)
471
472
                    data = {
                        "control": control_inputs,
473
                        "signal": signal_inputs
474
                    }
475
476
                else:
                    valid = False
477
                    msg = "Experiment with id '%s' has %s possible control inputs, and %s possible signal inputs.\n" % (experiment_accession, len(control_inputs), len(signal_inputs))
478
479
            else:
                valid = False
480
                msg = "Experiment with id '%s' does not have possible_controls.\n" % (experiment_accession,)
481
482
        else:
            valid = False
483
            msg = "Experiment with id '%s' is not valid!  It may not exist, or it may be missing required metadata.\n" % (experiment_accession,)
484
        return (valid, msg, data)