db.py 20.2 KB
Newer Older
aknecht2's avatar
aknecht2 committed
1
from pymongo import MongoClient
2
import pymongo.errors
3
4
5
import gridfs
import sys
import traceback
6
import os
7
import itertools
8
import time
9
import collections
10
import chipathlon.conf
11
from pprint import pprint
12
import hashlib
13
from chipathlon.utils import progress
aknecht2's avatar
aknecht2 committed
14

15

16
class MongoDB(object):
aknecht2's avatar
aknecht2 committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
    """
    :param host: The host address of the MongoDB database.
    :type host: str
    :param username: The username of the account for the MongoDB database.
    :type username: str
    :param password: The password for the user.
    :type password: str
    :param debug: A flag for printing additional messages.
    :type debug: bool

    This class is used to manage all interactions with the encode metadata.
    The metadata can be very unruly and difficult to deal with.  There
    are several helper functions within this class to make some database
    operations much easier.
    """
32

33
    def __init__(self, host="localhost", username=None, password=None, debug=False):
34
        self.debug = debug
35
36
37
        self.host = host
        self.username = username
        self.password = password
38
39
        self.client = MongoClient(host)
        self.db = self.client.chipseq
40
        self.cache = collections.defaultdict(dict)
41
42
43
44
45
46
47
48
        self.cache = {}
        if username and password:
            try:
                self.db.authenticate(username, password, mechanism="SCRAM-SHA-1")
            except:
                print("Could not authenticate to db %s!" % (host,))
                print traceback.format_exc()
                sys.exit(1)
49
50
51
        self.gfs = gridfs.GridFS(self.db)
        return

52
    def add_cache(self, accession, file_type, data):
53
        """
54
55
56
57
        :param accession: The accession of the file to store.
        :type accession: str
        :param file_type: The type of file to store.
        :type file_type: str
58
59
        :param data: The data to add to the cache.
        :type data: Object
aknecht2's avatar
aknecht2 committed
60
61
62
63
64
65

        Adds a data result to the internal cache.  This is used to speed up
        requests that are identical.  We may have multiple runs that use
        identical control / signal files but change around the alignment or
        peak calling tools.  In these cases we don't want to request info
        from the database multiple times for the same data.
66
        """
67
        self.cache[accession][file_type] = data
68
69
        return

70
    def get_cache(self, accession, file_type):
71
        """
72
73
74
75
        :param accession: The accession of the file to retrieve.
        :type accession: str
        :param file_type: The type of file to retrieve.
        :type file_type: str
aknecht2's avatar
aknecht2 committed
76
77

        Gets a data item from the internal cache.
78
        """
79
80
        if accession in self.cache:
            return self.cache[accession].get(file_type)
81
82
        return None

83
    def delete_result(self, result, genome):
84
        """
85
        :param result: The result to delete
aknecht2's avatar
aknecht2 committed
86
        :type result: :py:class:`~chipathlon.result.Result`
87
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
88
        :type genome: :py:class:`~chipathlon.genome.Genome`
89
90
91

        Deletes a result and it's corresponding gridfs entry.
        """
92
        result_id = self.get_reuslt_id(result, genome)
93
94
95
96
97
98
99
100
101
        cursor = self.db.results.find({
            "_id": result_id
        })
        if cursor.count() == 1:
            result = cursor.next()
            self.gfs.delete(result["gridfs_id"])
            self.db.results.delete_one({"_id": result["_id"]})
        else:
            print "result_id %s doesn't exist." % (result_id,)
102
103
        return

104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
    def _get_result_query(self, result, genome):
        query = {
            "result_type": result.file_type,
            "assembly": genome.assembly,
            "timestamp": {"$exists": True},
            "file_name": result.full_name
        }
        # In the case that there are 0 samples we just want to check for existence.
        control_sample_accessions = result.get_accessions("control")
        signal_sample_accessions = result.get_accessions("signal")
        query["control_sample_accessions"] = {"$all": control_sample_accessions} if (len(control_sample_accessions) > 0) else {"$exists": True}
        query["signal_sample_accessions"] = {"$all": signal_sample_accessions} if (len(signal_sample_accessions) > 0) else {"$exists": True}
        for job in result.all_jobs:
            job_args = job.get_db_arguments()
            arg_keys = job_args.keys()
            if len(arg_keys) == 0:
                query[job.job_name] = {"$exists": True}
            else:
                for arg_name in arg_keys:
                    query[job.job_name + "." + arg_name] = job_args[arg_name]
124
125
        if self.debug:
            print "Result query: %s" % (query,)
126
127
128
        return query

    def result_exists(self, result, genome):
129
130
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
131
        :type result: :py:meth:`~chipathlon.result.Result`
132
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
133
        :type genome: :py:meth:`~chipathlon.genome.Genome`
134

aknecht2's avatar
aknecht2 committed
135
136
137
        Check if a result exists in the database.  The genome parameter
        is required since some files have been aligned or use individual
        chromsome fasta or size files for peak calling.
138
        """
139
140
141
142
143
144
145
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            return cursor.count() > 0
        except pymongo.errors.OperationFailure as e:
            print "Error checking result [%s]: %s" % (file_name, e)
        return False

146
147
148
    def get_result_id(self, result, genome):
        """
        :param result: The result to check.
aknecht2's avatar
aknecht2 committed
149
        :type result: :py:meth:`~chipathlon.result.Result`
150
        :param genome: The genome to find information from.
aknecht2's avatar
aknecht2 committed
151
152
        :type genome: :py:meth:`~chipathlon.genome.Genome`
        :returns: The id found or None
153

aknecht2's avatar
aknecht2 committed
154
        Get the id of a result in the database.
155
156
157
158
159
160
161
162
        """
        try:
            cursor = self.db.results.find(self._get_result_query(result, genome))
            if cursor.count() == 1:
                return cursor._id
        except pymongo.errors.OperationFailure as e:
            print "Error getting result id [%s]: %s" % (file_name, e)
        return None
163
164

    def get_result(self, result, genome):
165
166
167
168
169
170
171
172
173
        """
        :param result: The result to check.
        :type result: :py:meth:~chipathlon.result.Result
        :param genome: The genome to find information from.
        :type genome: :py:meth:~chipathlon.genome.Genome

        Get the metadata for the result from the database.  If multiple results
        exist, the most recently saved result is returned.
        """
174
        try:
175
            cursor = self.db.results.find(self._get_result_query(result, genome))
176
177
            if cursor.count() > 0:
                return cursor.sort("timestamp", pymongo.DESCENDING).next()
178
        except pymongo.errors.OperationFailure as e:
179
            print "Error checking result [%s]: %s" % (file_name, e)
180
        return None
181

182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
    def save_result(self, output_file, control_sample_accessions, signal_sample_accessions, result_type, additional_data = {}, gfs_attributes = {}):
        """
        :param output_file: The path to the result to save.
        :type output_file: str
        :param control_sample_accessions: A list of control accessions.
        :type control_sample_accessions: list
        :param signal_sample_accessions: A list of signal accessions.
        :type signal_sample_accessions: list
        :param result_type: Useful file type info
        :type result_type: str
        :param additional_data: Additional metadata to store in mongo.
        :type additional_data: dict
        :param gfs_attributes: Additional metadata to store in gridfs.
        :type gfs_attributes: dict

aknecht2's avatar
aknecht2 committed
197
198
199
200
201
        Saves a result entry into MongodDB and uploads the file into gridfs.
        The only difference between additional_data and gfs_attributes is the
        location the metadata is stored.  Both just store key value pairs of
        information, the additional_data information is stored in the result
        entry, the gfs_attributes information is stored in gridfs.
202
        """
203
204
        # Make sure output_file exists
        if os.path.isfile(output_file):
205
            # Make sure that all control_sample_accessions & signal_sample_accessions are valid
206
            # REMEMBER, these are ids for control & experiment SAMPLES
207
208
            valid_controls = [self.is_valid_sample(cid) for cid in control_sample_accessions]
            valid_experiments = [self.is_valid_sample(eid) for eid in signal_sample_accessions]
209
            if all(valid_controls) and all(valid_experiments):
210
                gfs_attributes["file_type"] = result_type
211
212
213
214
215
216
217
                # First, we load the output file into gfs
                with open(output_file, "r") as rh:
                    # Calling put returns the gfs id
                    gridfs_id = self.gfs.put(rh, filename=os.path.basename(output_file), **gfs_attributes)
                # Now, we create the actual result entry by combining all necessary info
                result_entry = {
                    "gridfs_id": gridfs_id,
218
219
                    "control_sample_accessions": control_sample_accessions,
                    "signal_sample_accessions": signal_sample_accessions,
220
221
222
                    "result_type": result_type,
                    "file_name": output_file,
                    "timestamp": time.time()
223
224
225
226
227
228
229
                }
                # Add additional attributes into the result_entry
                result_entry.update(additional_data)
                # Insert the entry into the database, and return the id
                result = self.db.results.insert_one(result_entry)
                return (True, "Result created successfully.", result.inserted_id)
            else:
230
                msg = "Not all input ids are valid.  The following are invalid: "
231
                for id_list, valid_list in zip([control_sample_accessions, signal_sample_accessions], [valid_controls, valid_experiments]):
232
                    msg += ", ".join([id_list[i] for i, valid in enumerate(valid_list) if not valid])
233
234
235
236
        else:
            msg = "Specified output_file %s does not exist." % (output_file,)
        return (False, msg, None)

237
    def is_valid_sample(self, sample_accession):
238
239
240
        """
        :param sample_accession: The accession number to check.
        :type sample_accession: str
aknecht2's avatar
aknecht2 committed
241
        :returns: Whether or not the sample is valid.
242
243
244

        Ensures that a sample with the accession specified actually exists.
        """
245
246
247
248
249
250
251
252
253
254
        try:
            cursor = self.db.samples.find({
                "accession": sample_accession
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
            print "Error with sample_accession %s: %s" % (sample_accession, e)
        return False

255
256
257
258
    def is_valid_experiment(self, experiment_accession):
        """
        :param experiment_accession: The accession number to check.
        :type experiment_accession: str
aknecht2's avatar
aknecht2 committed
259
        :returns: Whether or not the experiment is valid
260
261
262

        Ensures that an experiment with the accession specified actually exists.
        """
263
264
265
        try:
            cursor = self.db.experiments.find({
                "target": {"$exists": True},
266
                "@id": "/experiments/%s/" % (experiment_accession,)
267
268
269
270
            })
            if cursor.count() == 1:
                return True
        except pymongo.errors.OperationFailure as e:
271
            print "Error with experiment_accession %s: %s" % (experiment_accession, e)
272
273
        return False

Adam Caprez's avatar
Adam Caprez committed
274
    def fetch_from_gridfs(self, gridfs_id, filename, checkmd5=True):
275
276
        """
        :param gridfs_id: GridFS _id of file to get.
aknecht2's avatar
aknecht2 committed
277
        :type gridfs_id: :py:class:`bson.objectid.ObjectId`
278
279
        :param filename: Filename to save file to.
        :type filename: str
280
281
        :param checkmd5: Whether or not to validate the md5 of the result
        :type checkmd5: bool
282

283
        Fetch the file with the corresponding id and save it under the
aknecht2's avatar
aknecht2 committed
284
285
        specified 'filename'.  If checkmd5 is specified, validate that the
        saved file has a correct md5 value.
286
        """
287
288
289
290
291
292
293
        try:
            gridfs_file = self.gfs.get(gridfs_id)
            gridfs_md5 = gridfs_file.md5
        except gridfs.errors.NoFile as e:
            print "Error fetching file from GridFS!\nNo file with ID '%s'" % (gridfs_id)
            print e
            sys.exit(1)
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309

        try:
            output_fh = open(filename,'wb')
        except IOError as e:
            print "Error creating GridFS output file '%s':" % (filename)
            print (e.errno,e.strerror)
            sys.exit(1)

        hash_md5 = hashlib.md5()
        for chunk in gridfs_file:
            output_fh.write(chunk)
            hash_md5.update(chunk)

        output_fh.close()
        gridfs_file.close()

Adam Caprez's avatar
Adam Caprez committed
310
311
312
313
314
315
        if checkmd5:
            if gridfs_md5 == hash_md5.hexdigest():
                return True
            else:
                print "MD5 mismatch saving file from GridFS to '%s'" % (filename)
                return False
316
        else:
Adam Caprez's avatar
Adam Caprez committed
317
            return True
318

319
320
321
322
    def get_sample(self, accession, file_type):
        """
        :param accession: The accession number of the target sample
        :type accession: string
aknecht2's avatar
aknecht2 committed
323
        :param file_type: The file type of the target sample.
324
        :type file_type: string
325
        :returns: A tuple (valid, msg, data)
326

aknecht2's avatar
aknecht2 committed
327
328
329
330
        Gets the associated sample based on accession number and file_type.
        For loading input files for workflows the file_type should be fastq
        or bam.  Other file types can be specified for loading additional files
        saved in the experiment metadata.
331
332
333
334
        """
        valid = True
        msg = ""
        data = {}
335
        check_cache = self.get_cache(accession, file_type)
336
337
338
        if check_cache is not None:
            msg = "Retrieved data from cache."
            data = check_cache
339
        else:
340
341
342
343
344
345
            cursor = self.db.samples.find({
                "accession": accession,
                "file_type": file_type
            })
            if cursor.count() == 1:
                data = cursor.next()
346
                self.add_cache(accession, file_type, data)
347
348
349
350
351
352
353
            else:
                valid = False
                msg = "Found %s files with accession: %s, file_type: %s. Should only be 1." % (
                    cursor.count(),
                    accession,
                    file_type
                )
354
355
        return (valid, msg, data)

356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
    def clean_gfs(self):
        """
        This function finds all files stored in gridfs that are not currently
        referenced by any result file and removes them.
        A clean database is a happy database.
        """
        cursor = self.db.results.aggregate([
            {
                "$group": {
                    "_id": 1,
                    "valid_ids": {"$push": "$gridfs_id"}
                }
            }
        ])
        # Doc contains all our valid ids
        id_doc = cursor.next()
        # Find all fs.files documents
        gfs_cursor = self.db.fs.files.find({
            "_id": {
                "$nin": id_doc["valid_ids"]
            }
        })
        # Iterate through file, delete fs.chunks then fs.files
        total_files = gfs_cursor.count()
        print "Found %s unused gridfs files.  Preparing to delete...." % (total_files,)
        for i, fs_file in enumerate(gfs_cursor):
            progress(i, total_files)
            self.db.fs.chunks.remove({
                "files_id": fs_file["_id"]
            })
            self.db.fs.files.remove({
                "_id": fs_file["_id"]
            })
        progress(total_files, total_files)
        return

392
393
394
395
396
397
    def get_samples(self, experiment_accession, file_type):
        """
        :param experiment_accession: Accession number of the experiment to grab samples from.
        :type experiment_accession: str
        :param file_type: File type of samples to grab usually fastq or bam
        :type file_type: str
398
        :returns: A tuple (valid, msg, data)
399
400
401

        Validates and gets samples for the given experiment.  Experiments must
        have control and signal samples of the provided file_type to be
aknecht2's avatar
aknecht2 committed
402
403
404
405
        considered valid.  Returns a tuple with three values (valid, msg, data)
        valid -- Whether or not the accession / file_type combo is a valid exp
        msg -- Why it is or is not valid
        data -- A dictionary containing a list of all control / sample documents.
406

aknecht2's avatar
aknecht2 committed
407
408
409
        The data dictionary has two keys, "control" and "signal", each one containing
        a list of all metadata related to the experiment samples.  The sample metadata
        is taken directly from Mongo.
410
        """
411
412
413
        valid = True
        msg = ""
        data = {}
414
        # First, check to make sure the target experiment is valid
415
        if self.is_valid_experiment(experiment_accession):
416
417
            # Next, we check that there is a least 1 possible control
            check3 = self.db.experiments.find({
418
                "target": {"$exists": True},
419
                "possible_controls.0": {"$exists": True},
420
                "@id": "/experiments/%s/" % (experiment_accession,)
421
            })
422
423
424
425
426
427
428
429
430
431
432
433
434
435
            if check3.count() == 1:
                # Complicated aggregtaion pipeline does the following steps:
                # 1. Find the experiment that matches the given id
                # 2. Join samples into the collection by exp_id
                # 3. Iterate through possible_controls
                # 4. Join possible_control data into control_exps
                # 5. Iterate through control_exps
                # 6. Join samples into the control_exps by exp_id
                # 7. Re-aggregate all data into arrays
                pipeline = [
                    {
                        "$match": {
                            "target": {"$exists": True},
                            "possible_controls.0": {"$exists": True},
436
                            "@id": "/experiments/%s/" % (experiment_accession,)
437
                        }
438
439
440
441
442
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "uuid",
443
                            "foreignField": "experiment_id",
444
                            "as": "samples"
445
                        }
446
447
448
449
450
451
452
453
                    },
                    {
                        "$unwind": "$possible_controls"
                    },
                    {
                        "$lookup": {
                            "from": "samples",
                            "localField": "possible_controls.uuid",
454
                            "foreignField": "experiment_id",
455
456
457
458
459
460
461
462
463
464
465
466
467
468
                            "as": "possible_controls.samples"
                        }
                    },
                    {
                        "$group": {
                            "_id": "$_id",
                            "possible_controls": {"$push": "$possible_controls"},
                            "samples": {"$push": "$samples"}
                        }
                    }
                ]
                cursor = self.db.experiments.aggregate(pipeline)
                # We should have only 1 document
                document = cursor.next()
469
                control_inputs = [sample for control in document["possible_controls"] for sample in control["samples"] if ("file_type" in sample and sample["file_type"] == file_type)]
470
471
                signal_inputs = [sample for sample in document["samples"][0] if ("file_type" in sample and sample["file_type"] == file_type)]
                if (len(control_inputs) > 0 and len(signal_inputs) > 0):
472
                    msg = "Succesfully retrieved input files for experiment with id '%s'.\n" % (experiment_accession,)
473
474
                    data = {
                        "control": control_inputs,
475
                        "signal": signal_inputs
476
                    }
477
478
                else:
                    valid = False
479
                    msg = "Experiment with id '%s' has %s possible control inputs, and %s possible signal inputs.\n" % (experiment_accession, len(control_inputs), len(signal_inputs))
480
481
            else:
                valid = False
482
                msg = "Experiment with id '%s' does not have possible_controls.\n" % (experiment_accession,)
483
484
        else:
            valid = False
485
            msg = "Experiment with id '%s' is not valid!  It may not exist, or it may be missing required metadata.\n" % (experiment_accession,)
486
        return (valid, msg, data)