db.py 20.1 KB
Newer Older
aknecht2's avatar
aknecht2 committed
1
from pymongo import MongoClient
2
import pymongo.errors
3
4
5
import gridfs
import sys
import traceback
6
import os
7
import itertools
8
import time
9
import collections
10
import chipathlon.conf
11
from pprint import pprint
12
import hashlib
13
from chipathlon.utils import progress
aknecht2's avatar
aknecht2 committed
14

15

16
class MongoDB(object):
aknecht2's avatar
aknecht2 committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
    """
    :param host: The host address of the MongoDB database.
    :type host: str
    :param username: The username of the account for the MongoDB database.
    :type username: str
    :param password: The password for the user.
    :type password: str
    :param debug: A flag for printing additional messages.
    :type debug: bool

    This class is used to manage all interactions with the encode metadata.
    The metadata can be very unruly and difficult to deal with.  There
    are several helper functions within this class to make some database
    operations much easier.
    """
32

33
    def __init__(self, host="localhost", username=None, password=None, debug=False):
34
        self.debug = debug
35
36
37
        self.host = host
        self.username = username
        self.password = password
38
39
        self.client = MongoClient(host)
        self.db = self.client.chipseq
40
        self.cache = collections.defaultdict(dict)
41
42
43
44
45
46
47
        if username and password:
            try:
                self.db.authenticate(username, password, mechanism="SCRAM-SHA-1")
            except:
                print("Could not authenticate to db %s!" % (host,))
                print traceback.format_exc()
                sys.exit(1)
48
49
50
        self.gfs = gridfs.GridFS(self.db)
        return

51
    def add_cache(self, accession, file_type, data):
52
        """
53
54
55
56
        :param accession: The accession of the file to store.
        :type accession: str
        :param file_type: The type of file to store.
        :type file_type: str
57
58
        :param data: The data to add to the cache.
        :type data: Object
aknecht2's avatar
aknecht2 committed
59
60
61
62
63
64

        Adds a data result to the internal cache.  This is used to speed up
        requests that are identical.  We may have multiple runs that use
        identical control / signal files but change around the alignment or
        peak calling tools.  In these cases we don't want to request info
        from the database multiple times for the same data.
65
        """
66
        self.cache[accession][file_type] = data
67
68
        return

69
    def get_cache(self, accession, file_type):
70
        """
71
72
73
74
        :param accession: The accession of the file to retrieve.
        :type accession: str
        :param file_type: The type of file to retrieve.
        :type file_type: str
aknecht2's avatar
aknecht2 committed
75
76

        Gets a data item from the internal cache.
77
        """
78
79
        if accession in self.cache:
            return self.cache[accession].get(file_type)
80
81
        return None

82
    def delete_result(self, result, genome):
83
        """
84
        :param result: The result to delete
aknecht2's avatar
aknecht2 committed
85
        :type result: :py:class:`~chipathlon.result.Result`
86
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
87
        :type genome: :py:class:`~chipathlon.genome.Genome`
88
89
90

        Deletes a result and it's corresponding gridfs entry.
        """
91
        result_id = self.get_reuslt_id(result, genome)
92
93
94
95
96
97
98
99
100
        cursor = self.db.results.find({
            "_id": result_id
        })
        if cursor.count() == 1:
            result = cursor.next()
            self.gfs.delete(result["gridfs_id"])
            self.db.results.delete_one({"_id": result["_id"]})
        else:
            print "result_id %s doesn't exist." % (result_id,)
101
102
        return

103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    def _get_result_query(self, result, genome):
        query = {
            "result_type": result.file_type,
            "assembly": genome.assembly,
            "timestamp": {"$exists": True},
            "file_name": result.full_name
        }
        # In the case that there are 0 samples we just want to check for existence.
        control_sample_accessions = result.get_accessions("control")
        signal_sample_accessions = result.get_accessions("signal")
        query["control_sample_accessions"] = {"$all": control_sample_accessions} if (len(control_sample_accessions) > 0) else {"$exists": True}
        query["signal_sample_accessions"] = {"$all": signal_sample_accessions} if (len(signal_sample_accessions) > 0) else {"$exists": True}
        for job in result.all_jobs:
            job_args = job.get_db_arguments()
            arg_keys = job_args.keys()
            if len(arg_keys) == 0:
                query[job.job_name] = {"$exists": True}
            else:
                for arg_name in arg_keys:
                    query[job.job_name + "." + arg_name] = job_args[arg_name]
123
124
        if self.debug:
            print "Result query: %s" % (query,)
125
126
127
        return query

    def result_exists(self, result, genome):
128
129
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
130
        :type result: :py:meth:`~chipathlon.result.Result`
131
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
132
        :type genome: :py:meth:`~chipathlon.genome.Genome`
133

aknecht2's avatar
aknecht2 committed
134
135
136
        Check if a result exists in the database.  The genome parameter
        is required since some files have been aligned or use individual
        chromsome fasta or size files for peak calling.
137
        """
138
139
140
141
142
143
144
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            return cursor.count() > 0
        except pymongo.errors.OperationFailure as e:
            print "Error checking result [%s]: %s" % (file_name, e)
        return False

145
146
147
    def get_result_id(self, result, genome):
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
148
        :type result: :py:meth:`~chipathlon.result.Result`
149
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
150
151
        :type genome: :py:meth:`~chipathlon.genome.Genome`
        :returns: The id found or None
152

aknecht2's avatar
aknecht2 committed
153
        Get the id of a result in the database.
154
155
156
157
158
159
160
161
        """
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            if cursor.count() == 1:
                return cursor._id
        except pymongo.errors.OperationFailure as e:
            print "Error getting result id [%s]: %s" % (file_name, e)
        return None
162
163

    def get_result(self, result, genome):
164
165
166
167
168
169
170
171
172
        """
        :param result: The result to check.
        :type result: :py:meth:~chipathlon.result.Result
        :param genome: The genome to find information from.
        :type genome: :py:meth:~chipathlon.genome.Genome

        Get the metadata for the result from the database.  If multiple results
        exist, the most recently saved result is returned.
        """
173
        try:
174
            cursor = self.db.results.find(self._get_result_query(result, genome))
175
176
            if cursor.count() > 0:
                return cursor.sort("timestamp", pymongo.DESCENDING).next()
177
        except pymongo.errors.OperationFailure as e:
178
            print "Error checking result [%s]: %s" % (file_name, e)
179
        return None
180

181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
    def save_result(self, output_file, control_sample_accessions, signal_sample_accessions, result_type, additional_data = {}, gfs_attributes = {}):
        """
        :param output_file: The path to the result to save.
        :type output_file: str
        :param control_sample_accessions: A list of control accessions.
        :type control_sample_accessions: list
        :param signal_sample_accessions: A list of signal accessions.
        :type signal_sample_accessions: list
        :param result_type: Useful file type info
        :type result_type: str
        :param additional_data: Additional metadata to store in mongo.
        :type additional_data: dict
        :param gfs_attributes: Additional metadata to store in gridfs.
        :type gfs_attributes: dict

aknecht2's avatar
aknecht2 committed
196
197
198
199
200
        Saves a result entry into MongodDB and uploads the file into gridfs.
        The only difference between additional_data and gfs_attributes is the
        location the metadata is stored.  Both just store key value pairs of
        information, the additional_data information is stored in the result
        entry, the gfs_attributes information is stored in gridfs.
201
        """
202
203
        # Make sure output_file exists
        if os.path.isfile(output_file):
204
            # Make sure that all control_sample_accessions & signal_sample_accessions are valid
205
            # REMEMBER, these are ids for control & experiment SAMPLES
206
207
            valid_controls = [self.is_valid_sample(cid) for cid in control_sample_accessions]
            valid_experiments = [self.is_valid_sample(eid) for eid in signal_sample_accessions]
208
            if all(valid_controls) and all(valid_experiments):
209
                gfs_attributes["file_type"] = result_type
210
211
212
213
214
215
216
                # First, we load the output file into gfs
                with open(output_file, "r") as rh:
                    # Calling put returns the gfs id
                    gridfs_id = self.gfs.put(rh, filename=os.path.basename(output_file), **gfs_attributes)
                # Now, we create the actual result entry by combining all necessary info
                result_entry = {
                    "gridfs_id": gridfs_id,
217
218
                    "control_sample_accessions": control_sample_accessions,
                    "signal_sample_accessions": signal_sample_accessions,
219
220
221
                    "result_type": result_type,
                    "file_name": output_file,
                    "timestamp": time.time()
222
223
224
225
226
227
228
                }
                # Add additional attributes into the result_entry
                result_entry.update(additional_data)
                # Insert the entry into the database, and return the id
                result = self.db.results.insert_one(result_entry)
                return (True, "Result created successfully.", result.inserted_id)
            else:
229
                msg = "Not all input ids are valid.  The following are invalid: "
230
                for id_list, valid_list in zip([control_sample_accessions, signal_sample_accessions], [valid_controls, valid_experiments]):
231
                    msg += ", ".join([id_list[i] for i, valid in enumerate(valid_list) if not valid])
232
233
234
235
        else:
            msg = "Specified output_file %s does not exist." % (output_file,)
        return (False, msg, None)

236
    def is_valid_sample(self, sample_accession):
237
238
239
        """
        :param sample_accession: The accession number to check.
        :type sample_accession: str
aknecht2's avatar
aknecht2 committed
240
        :returns: Whether or not the sample is valid.
241
242
243

        Ensures that a sample with the accession specified actually exists.
        """
244
245
246
247
248
249
250
251
252
253
        try:
            cursor = self.db.samples.find({
                "accession": sample_accession
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with sample_accession %s: %s" % (sample_accession, e)
        return False

254
255
256
257
    def is_valid_experiment(self, experiment_accession):
        """
        :param experiment_accession: The accession number to check.
        :type experiment_accession: str
aknecht2's avatar
aknecht2 committed
258
        :returns: Whether or not the experiment is valid
259
260
261

        Ensures that an experiment with the accession specified actually exists.
        """
262
263
264
        try:
            cursor = self.db.experiments.find({
                "target": {"$exists": True},
265
                "@id": "/experiments/%s/" % (experiment_accession,)
266
267
268
269
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
270
            print "Error with experiment_accession %s: %s" % (experiment_accession, e)
271
272
        return False

Adam Caprez's avatar
Adam Caprez committed
273
    def fetch_from_gridfs(self, gridfs_id, filename, checkmd5=True):
274
275
        """
        :param gridfs_id: GridFS _id of file to get.
aknecht2's avatar
aknecht2 committed
276
        :type gridfs_id: :py:class:`bson.objectid.ObjectId`
277
278
        :param filename: Filename to save file to.
        :type filename: str
279
280
        :param checkmd5: Whether or not to validate the md5 of the result
        :type checkmd5: bool
281

282
        Fetch the file with the corresponding id and save it under the
aknecht2's avatar
aknecht2 committed
283
284
        specified 'filename'.  If checkmd5 is specified, validate that the
        saved file has a correct md5 value.
285
        """
286
287
288
289
290
291
292
        try:
            gridfs_file = self.gfs.get(gridfs_id)
            gridfs_md5 = gridfs_file.md5
        except gridfs.errors.NoFile as e:
            print "Error fetching file from GridFS!\nNo file with ID '%s'" % (gridfs_id)
            print e
            sys.exit(1)
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308

        try:
            output_fh = open(filename,'wb')
        except IOError as e:
            print "Error creating GridFS output file '%s':" % (filename)
            print (e.errno,e.strerror)
            sys.exit(1)

        hash_md5 = hashlib.md5()
        for chunk in gridfs_file:
            output_fh.write(chunk)
            hash_md5.update(chunk)

        output_fh.close()
        gridfs_file.close()

Adam Caprez's avatar
Adam Caprez committed
309
310
311
312
313
314
        if checkmd5:
            if gridfs_md5 == hash_md5.hexdigest():
                return True
            else:
                print "MD5 mismatch saving file from GridFS to '%s'" % (filename)
                return False
315
        else:
Adam Caprez's avatar
Adam Caprez committed
316
            return True
317

318
319
320
321
    def get_sample(self, accession, file_type):
        """
        :param accession: The accession number of the target sample
        :type accession: string
aknecht2's avatar
aknecht2 committed
322
        :param file_type: The file type of the target sample.
323
        :type file_type: string
324
        :returns: A tuple (valid, msg, data)
325

aknecht2's avatar
aknecht2 committed
326
327
328
329
        Gets the associated sample based on accession number and file_type.
        For loading input files for workflows the file_type should be fastq
        or bam.  Other file types can be specified for loading additional files
        saved in the experiment metadata.
330
331
332
333
        """
        valid = True
        msg = ""
        data = {}
334
        check_cache = self.get_cache(accession, file_type)
335
336
337
        if check_cache is not None:
            msg = "Retrieved data from cache."
            data = check_cache
338
        else:
339
340
341
342
343
344
            cursor = self.db.samples.find({
                "accession": accession,
                "file_type": file_type
            })
            if cursor.count() == 1:
                data = cursor.next()
345
                self.add_cache(accession, file_type, data)
346
347
348
349
350
351
352
            else:
                valid = False
                msg = "Found %s files with accession: %s, file_type: %s. Should only be 1." % (
                    cursor.count(),
                    accession,
                    file_type
                )
353
354
        return (valid, msg, data)

355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
    def clean_gfs(self):
        """
        This function finds all files stored in gridfs that are not currently
        referenced by any result file and removes them.
        A clean database is a happy database.
        """
        cursor = self.db.results.aggregate([
            {
                "$group": {
                    "_id": 1,
                    "valid_ids": {"$push": "$gridfs_id"}
                }
            }
        ])
        # Doc contains all our valid ids
        id_doc = cursor.next()
        # Find all fs.files documents
        gfs_cursor = self.db.fs.files.find({
            "_id": {
                "$nin": id_doc["valid_ids"]
            }
        })
        # Iterate through file, delete fs.chunks then fs.files
        total_files = gfs_cursor.count()
        print "Found %s unused gridfs files.  Preparing to delete...." % (total_files,)
        for i, fs_file in enumerate(gfs_cursor):
            progress(i, total_files)
            self.db.fs.chunks.remove({
                "files_id": fs_file["_id"]
            })
            self.db.fs.files.remove({
                "_id": fs_file["_id"]
            })
        progress(total_files, total_files)
        return

391
392
393
394
395
396
    def get_samples(self, experiment_accession, file_type):
        """
        :param experiment_accession: Accession number of the experiment to grab samples from.
        :type experiment_accession: str
        :param file_type: File type of samples to grab usually fastq or bam
        :type file_type: str
397
        :returns: A tuple (valid, msg, data)
398
399
400

        Validates and gets samples for the given experiment.  Experiments must
        have control and signal samples of the provided file_type to be
aknecht2's avatar
aknecht2 committed
401
402
403
404
        considered valid.  Returns a tuple with three values (valid, msg, data)
        valid -- Whether or not the accession / file_type combo is a valid exp
        msg -- Why it is or is not valid
        data -- A dictionary containing a list of all control / sample documents.
405

aknecht2's avatar
aknecht2 committed
406
407
408
        The data dictionary has two keys, "control" and "signal", each one containing
        a list of all metadata related to the experiment samples.  The sample metadata
        is taken directly from Mongo.
409
        """
410
411
412
        valid = True
        msg = ""
        data = {}
413
        # First, check to make sure the target experiment is valid
414
        if self.is_valid_experiment(experiment_accession):
415
416
            # Next, we check that there is a least 1 possible control
            check3 = self.db.experiments.find({
417
                "target": {"$exists": True},
418
                "possible_controls.0": {"$exists": True},
419
                "@id": "/experiments/%s/" % (experiment_accession,)
420
            })
421
422
423
424
425
426
427
428
429
430
431
432
433
434
            if check3.count() == 1:
                # Complicated aggregtaion pipeline does the following steps:
                # 1. Find the experiment that matches the given id
                # 2. Join samples into the collection by exp_id
                # 3. Iterate through possible_controls
                # 4. Join possible_control data into control_exps
                # 5. Iterate through control_exps
                # 6. Join samples into the control_exps by exp_id
                # 7. Re-aggregate all data into arrays
                pipeline = [
                    {
                        "$match": {
                            "target": {"$exists": True},
                            "possible_controls.0": {"$exists": True},
435
                            "@id": "/experiments/%s/" % (experiment_accession,)
436
                        }
437
438
439
440
441
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "uuid",
442
                            "foreignField": "experiment_id",
443
                            "as": "samples"
444
                        }
445
446
447
448
449
450
451
452
                    },
                    {
                        "$unwind": "$possible_controls"
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "possible_controls.uuid",
453
                            "foreignField": "experiment_id",
454
455
456
457
458
459
460
461
462
463
464
465
466
467
                            "as": "possible_controls.samples"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "possible_controls": {"$push": "$possible_controls"},
                            "samples": {"$push": "$samples"}
                        }
                    }
                ]
                cursor = self.db.experiments.aggregate(pipeline)
                # We should have only 1 document
                document = cursor.next()
468
                control_inputs = [sample for control in document["possible_controls"] for sample in control["samples"] if ("file_type" in sample and sample["file_type"] == file_type)]
469
470
                signal_inputs = [sample for sample in document["samples"][0] if ("file_type" in sample and sample["file_type"] == file_type)]
                if (len(control_inputs) > 0 and len(signal_inputs) > 0):
471
                    msg = "Succesfully retrieved input files for experiment with id '%s'.\n" % (experiment_accession,)
472
473
                    data = {
                        "control": control_inputs,
474
                        "signal": signal_inputs
475
                    }
476
477
                else:
                    valid = False
478
                    msg = "Experiment with id '%s' has %s possible control inputs, and %s possible signal inputs.\n" % (experiment_accession, len(control_inputs), len(signal_inputs))
479
480
            else:
                valid = False
481
                msg = "Experiment with id '%s' does not have possible_controls.\n" % (experiment_accession,)
482
483
        else:
            valid = False
484
            msg = "Experiment with id '%s' is not valid!  It may not exist, or it may be missing required metadata.\n" % (experiment_accession,)
485
        return (valid, msg, data)